Compare commits

..

12 Commits

Author SHA1 Message Date
Paul Masurel
2267722d01 Added SegmentFruit 2018-12-13 08:58:00 +09:00
Paul Masurel
279a9eb5e3 Closes #449 (#450)
Clippy working on stable.
Clippy warnings addressed
2018-12-10 12:20:59 +09:00
fdb-hiroshima
21a24672d8 Add accessors for Snippet and HighlightSection (#448)
* Add accessors for Snippet and HighlightSection

And add an example of custom highlighter

* Remove inline(always) and unnecessary empty lines
2018-12-02 18:00:16 +09:00
dependabot[bot]
a3f1fbaae6 Update scoped-pool requirement from 0.1 to 1.0 (#447)
Updates the requirements on [scoped-pool](https://github.com/reem/rust-scoped-pool) to permit the latest version.
- [Release notes](https://github.com/reem/rust-scoped-pool/releases)
- [Commits](https://github.com/reem/rust-scoped-pool/commits/1.0.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-12-01 13:54:59 +09:00
Paul Masurel
a6e767c877 Cargo fmt 2018-11-30 22:52:45 +09:00
Paul Masurel
6af0488dbe Executor made sorted 2018-11-30 22:52:26 +09:00
Paul Masurel
07d87e154b Collector refactoring and multithreaded search (#437)
* Split Collector into an overall Collector and a per-segment SegmentCollector. Precursor to cross-segment parallelism, and as a side benefit cleans up any per-segment fields from being Option<T> to just T.

* Attempt to add MultiCollector back

* working. Chained collector is broken though

* Fix chained collector

* Fix test

* Make Weight Send+Sync for parallelization purposes

* Expose parameters of RangeQuery for external usage

* Removed &mut self

* fixing tests

* Restored TestCollectors

* blop

* multicollector working

* chained collector working

* test broken

* fixing unit test

* blop

* blop

* Blop

* simplifying APi

* blop

* better syntax

* Simplifying top_collector

* refactoring

* blop

* Sync with master

* Added multithread search

* Collector refactoring

* Schema::builder

* CR and rustdoc

* CR comments

* blop

* Added an executor

* Sorted the segment readers in the searcher

* Update searcher.rs

* Fixed unit testst

* changed the place where we have the sort-segment-by-count heuristic

* using crossbeam::channel

* inlining

* Comments about panics propagating

* Added unit test for executor panicking

* Readded default

* Removed Default impl

* Added unit test for executor
2018-11-30 22:46:59 +09:00
Paul Masurel
8b0b0133dd Importing crossbeam_channel from crossbeam reexport. 2018-11-19 09:19:28 +09:00
dependabot[bot]
7b9752f897 Update crossbeam-channel requirement from 0.2 to 0.3 (#436)
* Update crossbeam-channel requirement from 0.2 to 0.3

Updates the requirements on [crossbeam-channel](https://github.com/crossbeam-rs/crossbeam-channel) to permit the latest version.
- [Release notes](https://github.com/crossbeam-rs/crossbeam-channel/releases)
- [Changelog](https://github.com/crossbeam-rs/crossbeam-channel/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crossbeam-rs/crossbeam-channel/commits/v0.3.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>

* fixing build
2018-11-16 14:26:59 +09:00
dependabot[bot]
c92f41aea8 Update rand requirement from 0.5 to 0.6 (#440)
* Update rand requirement from 0.5 to 0.6

Updates the requirements on [rand](https://github.com/rust-random/rand) to permit the latest version.
- [Release notes](https://github.com/rust-random/rand/releases)
- [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-random/rand/commits)

Signed-off-by: dependabot[bot] <support@dependabot.com>

* Updating rand.
2018-11-16 12:38:01 +09:00
Do Duy
dea16f1d9d Derive Clone for QueryParser (#442) 2018-11-15 18:45:40 +09:00
dependabot[bot]
236cfbec08 Update crossbeam requirement from 0.4 to 0.5 (#438)
Updates the requirements on [crossbeam](https://github.com/crossbeam-rs/crossbeam) to permit the latest version.
- [Release notes](https://github.com/crossbeam-rs/crossbeam/releases)
- [Changelog](https://github.com/crossbeam-rs/crossbeam/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crossbeam-rs/crossbeam/commits/crossbeam-0.5.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-11-15 06:16:22 +09:00
89 changed files with 2665 additions and 1945 deletions

View File

@@ -1,5 +1,13 @@
Tantivy 0.8.1
=====================
*No change in the index format*
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
- Multithreaded search (@jwolfe, @fulmicoton)
Tantivy 0.7.1 Tantivy 0.7.1
===================== =====================
*No change in the index format*
- Bugfix: NGramTokenizer panics on non ascii chars - Bugfix: NGramTokenizer panics on non ascii chars
- Added a space usage API - Added a space usage API

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.7.1" version = "0.8.0-dev"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -33,8 +33,7 @@ itertools = "0.7"
levenshtein_automata = {version="0.1", features=["fst_automaton"]} levenshtein_automata = {version="0.1", features=["fst_automaton"]}
bit-set = "0.5" bit-set = "0.5"
uuid = { version = "0.7", features = ["v4", "serde"] } uuid = { version = "0.7", features = ["v4", "serde"] }
crossbeam = "0.4" crossbeam = "0.5"
crossbeam-channel = "0.2"
futures = "0.1" futures = "0.1"
futures-cpupool = "0.1" futures-cpupool = "0.1"
owning_ref = "0.4" owning_ref = "0.4"
@@ -49,12 +48,13 @@ owned-read = "0.4"
failure = "0.1" failure = "0.1"
htmlescape = "0.3.1" htmlescape = "0.3.1"
fail = "0.2" fail = "0.2"
scoped-pool = "1.0"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.2" winapi = "0.2"
[dev-dependencies] [dev-dependencies]
rand = "0.5" rand = "0.6"
maplit = "1" maplit = "1"
[profile.release] [profile.release]

View File

@@ -4,9 +4,8 @@
[Avant Propos](./avant-propos.md) [Avant Propos](./avant-propos.md)
- [Schema](./schema.md)
- [Indexing](./indexing.md)
- [Segments](./basis.md) - [Segments](./basis.md)
- [Defining your schema](./schema.md)
- [Facetting](./facetting.md) - [Facetting](./facetting.md)
- [Innerworkings](./innerworkings.md) - [Innerworkings](./innerworkings.md)
- [Inverted index](./inverted_index.md) - [Inverted index](./inverted_index.md)

View File

@@ -31,3 +31,4 @@ relevancy, collapsing, highlighting, spatial search.
index from a different format. index from a different format.
Tantivy exposes a lot of low level API to do all of these things. Tantivy exposes a lot of low level API to do all of these things.

View File

View File

@@ -1,50 +1 @@
# Schema # Defining your schema
When starting a new project using tantivy, your first step will be to your schema. Be aware that changing it will probably require you to reindex all of your data.
It is strongly recommended you keep the means to iterate through your original data when this happens.
If not specified otherwise, tantivy does not keep a raw version of your data,
so the good practise is to rely on a distinct storage to store your
raw documents.
The schema defines both the type of the fields you are indexing, but also the type of indexing you want to apply to them. The set of search operations that you will be able to perform depends on the way you set up your schema.
Here is what defining your schema could look like.
```Rust
use tantivy::schema::{Schema, TEXT, STORED, INT_INDEXED};
let mut schema_builder = SchemaBuilder::default();
let text_field = schema_builder.add_text_field("name", TEXT | STORED);
let tag_field = schema_builder.add_facet_field("tags");
let timestamp_field = schema_buider.add_u64_field("timestamp", INT_INDEXED)
let schema = schema_builder.build();
```
Notice how adding a new field to your schema builder
follows the following pattern :
```verbatim
schema_builder.add_<fieldtype>_field("<fieldname>", <field_configuration>);
```
This method returns a `Field` handle that will be used for all kind of
# Field types
Tantivy currently supports only 4 types.
- `text` (understand `&str`)
- `u64` and `i64`
- `HierarchicalFacet`
Let's go into their specificities.
# Text
Full-text search is the bread and butter of search engine.
The key idea is fairly simple. Your text is broken apart into tokens (that's
what we call tokenization). Tantivy then keeps track of the list of the documents containing each token.
In order to increase recall you might want to normalize tokens. For instance,
you most likely want to lowercase your tokens so that documents match the query `cat` regardless of whether your they contain the token `cat` or `Cat`.

View File

@@ -16,10 +16,11 @@ extern crate tempdir;
// Importing tantivy... // Importing tantivy...
#[macro_use] #[macro_use]
extern crate tantivy; extern crate tantivy;
use tantivy::collector::TopCollector; use tantivy::collector::TopDocs;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::Index; use tantivy::Index;
use tempdir::TempDir;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the // Let's create a temporary directory for the
@@ -34,7 +35,7 @@ fn main() -> tantivy::Result<()> {
// be indexed". // be indexed".
// first we need to define a schema ... // first we need to define a schema ...
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
// Our first field is title. // Our first field is title.
// We want full-text search for it, and we also want // We want full-text search for it, and we also want
@@ -105,37 +106,37 @@ fn main() -> tantivy::Result<()> {
// For convenience, tantivy also comes with a macro to // For convenience, tantivy also comes with a macro to
// reduce the boilerplate above. // reduce the boilerplate above.
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "Of Mice and Men", title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
bank and runs deep and green. The water is warm too, for it has slipped twinkling \ bank and runs deep and green. The water is warm too, for it has slipped twinkling \
over the yellow sands in the sunlight before reaching the narrow pool. On one \ over the yellow sands in the sunlight before reaching the narrow pool. On one \
side of the river the golden foothill slopes curve up to the strong and rocky \ side of the river the golden foothill slopes curve up to the strong and rocky \
Gabilan Mountains, but on the valley side the water is lined with trees—willows \ Gabilan Mountains, but on the valley side the water is lined with trees—willows \
fresh and green with every spring, carrying in their lower leaf junctures the \ fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \ debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool" limbs and branches that arch over the pool"
)); ));
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "Of Mice and Men", title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
bank and runs deep and green. The water is warm too, for it has slipped twinkling \ bank and runs deep and green. The water is warm too, for it has slipped twinkling \
over the yellow sands in the sunlight before reaching the narrow pool. On one \ over the yellow sands in the sunlight before reaching the narrow pool. On one \
side of the river the golden foothill slopes curve up to the strong and rocky \ side of the river the golden foothill slopes curve up to the strong and rocky \
Gabilan Mountains, but on the valley side the water is lined with trees—willows \ Gabilan Mountains, but on the valley side the water is lined with trees—willows \
fresh and green with every spring, carrying in their lower leaf junctures the \ fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \ debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool" limbs and branches that arch over the pool"
)); ));
// Multivalued field just need to be repeated. // Multivalued field just need to be repeated.
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "Frankenstein", title => "Frankenstein",
title => "The Modern Prometheus", title => "The Modern Prometheus",
body => "You will rejoice to hear that no disaster has accompanied the commencement of an \ body => "You will rejoice to hear that no disaster has accompanied the commencement of an \
enterprise which you have regarded with such evil forebodings. I arrived here \ enterprise which you have regarded with such evil forebodings. I arrived here \
yesterday, and my first task is to assure my dear sister of my welfare and \ yesterday, and my first task is to assure my dear sister of my welfare and \
increasing confidence in the success of my undertaking." increasing confidence in the success of my undertaking."
)); ));
// This is an example, so we will only index 3 documents // This is an example, so we will only index 3 documents
@@ -212,15 +213,10 @@ fn main() -> tantivy::Result<()> {
// //
// We are not interested in all of the documents but // We are not interested in all of the documents but
// only in the top 10. Keeping track of our top 10 best documents // only in the top 10. Keeping track of our top 10 best documents
// is the role of the TopCollector. // is the role of the TopDocs.
let mut top_collector = TopCollector::with_limit(10);
// We can now perform our query. // We can now perform our query.
searcher.search(&*query, &mut top_collector)?; let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
// Our top collector now contains the 10
// most relevant doc ids...
let doc_addresses = top_collector.docs();
// The actual documents still need to be // The actual documents still need to be
// retrieved from Tantivy's store. // retrieved from Tantivy's store.
@@ -229,12 +225,10 @@ fn main() -> tantivy::Result<()> {
// the document returned will only contain // the document returned will only contain
// a title. // a title.
for doc_address in doc_addresses { for (_score, doc_address) in top_docs {
let retrieved_doc = searcher.doc(doc_address)?; let retrieved_doc = searcher.doc(doc_address)?;
println!("{}", schema.to_json(&retrieved_doc)); println!("{}", schema.to_json(&retrieved_doc));
} }
Ok(()) Ok(())
} }
use tempdir::TempDir;

View File

@@ -0,0 +1,189 @@
// # Custom collector example
//
// This example shows how you can implement your own
// collector. As an example, we will compute a collector
// that computes the standard deviation of a given fast field.
//
// Of course, you can have a look at the tantivy's built-in collectors
// such as the `CountCollector` for more examples.
extern crate tempdir;
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::{Collector, SegmentCollector};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser;
use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INT_INDEXED, TEXT};
use tantivy::Index;
use tantivy::SegmentReader;
#[derive(Default)]
struct Stats {
count: usize,
sum: f64,
squared_sum: f64,
}
impl Stats {
pub fn count(&self) -> usize {
self.count
}
pub fn mean(&self) -> f64 {
self.sum / (self.count as f64)
}
fn square_mean(&self) -> f64 {
self.squared_sum / (self.count as f64)
}
pub fn standard_deviation(&self) -> f64 {
let mean = self.mean();
(self.square_mean() - mean * mean).sqrt()
}
fn non_zero_count(self) -> Option<Stats> {
if self.count == 0 {
None
} else {
Some(self)
}
}
}
struct StatsCollector {
field: Field,
}
impl StatsCollector {
fn with_field(field: Field) -> StatsCollector {
StatsCollector { field }
}
}
impl Collector for StatsCollector {
// That's the type of our result.
// Our standard deviation will be a float.
type Fruit = Option<Stats>;
type SegmentFruit = Self::Fruit;
type Child = StatsSegmentCollector;
fn for_segment(
&self,
_segment_local_id: u32,
segment: &SegmentReader,
) -> tantivy::Result<StatsSegmentCollector> {
let fast_field_reader = segment.fast_field_reader(self.field)?;
Ok(StatsSegmentCollector {
fast_field_reader,
stats: Stats::default(),
})
}
fn requires_scoring(&self) -> bool {
// this collector does not care about score.
false
}
fn merge_fruits(&self, segment_stats: Vec<Option<Stats>>) -> tantivy::Result<Option<Stats>> {
let mut stats = Stats::default();
for segment_stats_opt in segment_stats {
if let Some(segment_stats) = segment_stats_opt {
stats.count += segment_stats.count;
stats.sum += segment_stats.sum;
stats.squared_sum += segment_stats.squared_sum;
}
}
Ok(stats.non_zero_count())
}
}
struct StatsSegmentCollector {
fast_field_reader: FastFieldReader<u64>,
stats: Stats,
}
impl SegmentCollector for StatsSegmentCollector {
type Fruit = Option<Stats>;
fn collect(&mut self, doc: u32, _score: f32) {
let value = self.fast_field_reader.get(doc) as f64;
self.stats.count += 1;
self.stats.sum += value;
self.stats.squared_sum += value * value;
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
self.stats.non_zero_count()
}
}
fn main() -> tantivy::Result<()> {
// # Defining the schema
//
// The Tantivy index requires a very strict schema.
// The schema declares which fields are in the index,
// and for each field, its type and "the way it should
// be indexed".
// first we need to define a schema ...
let mut schema_builder = Schema::builder();
// We'll assume a fictional index containing
// products, and with a name, a description, and a price.
let product_name = schema_builder.add_text_field("name", TEXT);
let product_description = schema_builder.add_text_field("description", TEXT);
let price = schema_builder.add_u64_field("price", INT_INDEXED | FAST);
let schema = schema_builder.build();
// # Indexing documents
//
// Lets index a bunch of fake documents for the sake of
// this example.
let index = Index::create_in_ram(schema.clone());
let mut index_writer = index.writer(50_000_000)?;
index_writer.add_document(doc!(
product_name => "Super Broom 2000",
product_description => "While it is ok for short distance travel, this broom \
was designed quiditch. It will up your game.",
price => 30_200u64
));
index_writer.add_document(doc!(
product_name => "Turbulobroom",
product_description => "You might have heard of this broom before : it is the sponsor of the Wales team.\
You'll enjoy its sharp turns, and rapid acceleration",
price => 29_240u64
));
index_writer.add_document(doc!(
product_name => "Broomio",
product_description => "Great value for the price. This broom is a market favorite",
price => 21_240u64
));
index_writer.add_document(doc!(
product_name => "Whack a Mole",
product_description => "Prime quality bat.",
price => 5_200u64
));
index_writer.commit()?;
index.load_searchers()?;
let searcher = index.searcher();
let query_parser = QueryParser::for_index(&index, vec![product_name, product_description]);
// here we want to get a hit on the 'ken' in Frankenstein
let query = query_parser.parse_query("broom")?;
if let Some(stats) = searcher.search(&query, &StatsCollector::with_field(price))? {
println!("count: {}", stats.count());
println!("mean: {}", stats.mean());
println!("standard deviation: {}", stats.standard_deviation());
}
Ok(())
}

View File

@@ -5,7 +5,7 @@
#[macro_use] #[macro_use]
extern crate tantivy; extern crate tantivy;
use tantivy::collector::TopCollector; use tantivy::collector::TopDocs;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::tokenizer::NgramTokenizer; use tantivy::tokenizer::NgramTokenizer;
@@ -20,7 +20,7 @@ fn main() -> tantivy::Result<()> {
// be indexed". // be indexed".
// first we need to define a schema ... // first we need to define a schema ...
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
// Our first field is title. // Our first field is title.
// In this example we want to use NGram searching // In this example we want to use NGram searching
@@ -68,12 +68,12 @@ fn main() -> tantivy::Result<()> {
// heap for the indexer can increase its throughput. // heap for the indexer can increase its throughput.
let mut index_writer = index.writer(50_000_000)?; let mut index_writer = index.writer(50_000_000)?;
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "The Old Man and the Sea", title => "The Old Man and the Sea",
body => "He was an old man who fished alone in a skiff in the Gulf Stream and \ body => "He was an old man who fished alone in a skiff in the Gulf Stream and \
he had gone eighty-four days now without taking a fish." he had gone eighty-four days now without taking a fish."
)); ));
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "Of Mice and Men", title => "Of Mice and Men",
body => r#"A few miles south of Soledad, the Salinas River drops in close to the hillside body => r#"A few miles south of Soledad, the Salinas River drops in close to the hillside
bank and runs deep and green. The water is warm too, for it has slipped twinkling bank and runs deep and green. The water is warm too, for it has slipped twinkling
over the yellow sands in the sunlight before reaching the narrow pool. On one over the yellow sands in the sunlight before reaching the narrow pool. On one
@@ -84,7 +84,7 @@ fn main() -> tantivy::Result<()> {
limbs and branches that arch over the pool"# limbs and branches that arch over the pool"#
)); ));
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "Frankenstein", title => "Frankenstein",
body => r#"You will rejoice to hear that no disaster has accompanied the commencement of an body => r#"You will rejoice to hear that no disaster has accompanied the commencement of an
enterprise which you have regarded with such evil forebodings. I arrived here enterprise which you have regarded with such evil forebodings. I arrived here
yesterday, and my first task is to assure my dear sister of my welfare and yesterday, and my first task is to assure my dear sister of my welfare and
@@ -104,11 +104,9 @@ fn main() -> tantivy::Result<()> {
// here we want to get a hit on the 'ken' in Frankenstein // here we want to get a hit on the 'ken' in Frankenstein
let query = query_parser.parse_query("ken")?; let query = query_parser.parse_query("ken")?;
let mut top_collector = TopCollector::with_limit(10); let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
searcher.search(&*query, &mut top_collector)?;
let doc_addresses = top_collector.docs(); for (_, doc_address) in top_docs {
for doc_address in doc_addresses {
let retrieved_doc = searcher.doc(doc_address)?; let retrieved_doc = searcher.doc(doc_address)?;
println!("{}", schema.to_json(&retrieved_doc)); println!("{}", schema.to_json(&retrieved_doc));
} }

View File

@@ -10,7 +10,7 @@
// Importing tantivy... // Importing tantivy...
#[macro_use] #[macro_use]
extern crate tantivy; extern crate tantivy;
use tantivy::collector::TopCollector; use tantivy::collector::TopDocs;
use tantivy::query::TermQuery; use tantivy::query::TermQuery;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::Index; use tantivy::Index;
@@ -27,10 +27,9 @@ fn extract_doc_given_isbn(index: &Index, isbn_term: &Term) -> tantivy::Result<Op
// The second argument is here to tell we don't care about decoding positions, // The second argument is here to tell we don't care about decoding positions,
// or term frequencies. // or term frequencies.
let term_query = TermQuery::new(isbn_term.clone(), IndexRecordOption::Basic); let term_query = TermQuery::new(isbn_term.clone(), IndexRecordOption::Basic);
let mut top_collector = TopCollector::with_limit(1); let top_docs = searcher.search(&term_query, &TopDocs::with_limit(1))?;
searcher.search(&term_query, &mut top_collector)?;
if let Some(doc_address) = top_collector.docs().first() { if let Some((_score, doc_address)) = top_docs.first() {
let doc = searcher.doc(*doc_address)?; let doc = searcher.doc(*doc_address)?;
Ok(Some(doc)) Ok(Some(doc))
} else { } else {
@@ -44,7 +43,7 @@ fn main() -> tantivy::Result<()> {
// //
// Check out the *basic_search* example if this makes // Check out the *basic_search* example if this makes
// small sense to you. // small sense to you.
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
// Tantivy does not really have a notion of primary id. // Tantivy does not really have a notion of primary id.
// This may change in the future. // This may change in the future.

View File

@@ -25,7 +25,7 @@ fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the // Let's create a temporary directory for the
// sake of this example // sake of this example
let index_path = TempDir::new("tantivy_facet_example_dir")?; let index_path = TempDir::new("tantivy_facet_example_dir")?;
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("name", TEXT | STORED); schema_builder.add_text_field("name", TEXT | STORED);
@@ -62,11 +62,10 @@ fn main() -> tantivy::Result<()> {
let mut facet_collector = FacetCollector::for_field(tags); let mut facet_collector = FacetCollector::for_field(tags);
facet_collector.add_facet("/pools"); facet_collector.add_facet("/pools");
searcher.search(&AllQuery, &mut facet_collector).unwrap(); let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
let counts = facet_collector.harvest();
// This lists all of the facet counts // This lists all of the facet counts
let facets: Vec<(&Facet, u64)> = counts.get("/pools").collect(); let facets: Vec<(&Facet, u64)> = facet_counts.get("/pools").collect();
assert_eq!( assert_eq!(
facets, facets,
vec![ vec![

View File

@@ -18,7 +18,7 @@ use tantivy::{DocId, DocSet, Postings};
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// We first create a schema for the sake of the // We first create a schema for the sake of the
// example. Check the `basic_search` example for more information. // example. Check the `basic_search` example for more information.
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
// For this example, we need to make sure to index positions for our title // For this example, we need to make sure to index positions for our title
// field. `TEXT` precisely does this. // field. `TEXT` precisely does this.

View File

@@ -10,11 +10,11 @@ extern crate tempdir;
// Importing tantivy... // Importing tantivy...
#[macro_use] #[macro_use]
extern crate tantivy; extern crate tantivy;
use tantivy::collector::TopCollector; use tantivy::collector::TopDocs;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::Index; use tantivy::Index;
use tantivy::SnippetGenerator; use tantivy::{Snippet, SnippetGenerator};
use tempdir::TempDir; use tempdir::TempDir;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
@@ -23,7 +23,7 @@ fn main() -> tantivy::Result<()> {
let index_path = TempDir::new("tantivy_example_dir")?; let index_path = TempDir::new("tantivy_example_dir")?;
// # Defining the schema // # Defining the schema
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT | STORED); let title = schema_builder.add_text_field("title", TEXT | STORED);
let body = schema_builder.add_text_field("body", TEXT | STORED); let body = schema_builder.add_text_field("body", TEXT | STORED);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -35,15 +35,15 @@ fn main() -> tantivy::Result<()> {
// we'll only need one doc for this example. // we'll only need one doc for this example.
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "Of Mice and Men", title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
bank and runs deep and green. The water is warm too, for it has slipped twinkling \ bank and runs deep and green. The water is warm too, for it has slipped twinkling \
over the yellow sands in the sunlight before reaching the narrow pool. On one \ over the yellow sands in the sunlight before reaching the narrow pool. On one \
side of the river the golden foothill slopes curve up to the strong and rocky \ side of the river the golden foothill slopes curve up to the strong and rocky \
Gabilan Mountains, but on the valley side the water is lined with trees—willows \ Gabilan Mountains, but on the valley side the water is lined with trees—willows \
fresh and green with every spring, carrying in their lower leaf junctures the \ fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \ debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool" limbs and branches that arch over the pool"
)); ));
// ... // ...
index_writer.commit()?; index_writer.commit()?;
@@ -54,18 +54,34 @@ fn main() -> tantivy::Result<()> {
let query_parser = QueryParser::for_index(&index, vec![title, body]); let query_parser = QueryParser::for_index(&index, vec![title, body]);
let query = query_parser.parse_query("sycamore spring")?; let query = query_parser.parse_query("sycamore spring")?;
let mut top_collector = TopCollector::with_limit(10); let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
searcher.search(&*query, &mut top_collector)?;
let snippet_generator = SnippetGenerator::new(&searcher, &*query, body)?; let snippet_generator = SnippetGenerator::create(&searcher, &*query, body)?;
let doc_addresses = top_collector.docs(); for (score, doc_address) in top_docs {
for doc_address in doc_addresses {
let doc = searcher.doc(doc_address)?; let doc = searcher.doc(doc_address)?;
let snippet = snippet_generator.snippet_from_doc(&doc); let snippet = snippet_generator.snippet_from_doc(&doc);
println!("Document score {}:", score);
println!("title: {}", doc.get_first(title).unwrap().text().unwrap()); println!("title: {}", doc.get_first(title).unwrap().text().unwrap());
println!("snippet: {}", snippet.to_html()); println!("snippet: {}", snippet.to_html());
println!("custom highlighting: {}", highlight(snippet));
} }
Ok(()) Ok(())
} }
fn highlight(snippet: Snippet) -> String {
let mut result = String::new();
let mut start_from = 0;
for (start, end) in snippet.highlighted().iter().map(|h| h.bounds()) {
result.push_str(&snippet.fragments()[start_from..start]);
result.push_str(" --> ");
result.push_str(&snippet.fragments()[start..end]);
result.push_str(" <-- ");
start_from = end;
}
result.push_str(&snippet.fragments()[start_from..]);
result
}

View File

@@ -15,7 +15,7 @@ extern crate tempdir;
// Importing tantivy... // Importing tantivy...
#[macro_use] #[macro_use]
extern crate tantivy; extern crate tantivy;
use tantivy::collector::TopCollector; use tantivy::collector::TopDocs;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::tokenizer::*; use tantivy::tokenizer::*;
@@ -23,7 +23,7 @@ use tantivy::Index;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// this example assumes you understand the content in `basic_search` // this example assumes you understand the content in `basic_search`
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
// This configures your custom options for how tantivy will // This configures your custom options for how tantivy will
// store and process your content in the index; The key // store and process your content in the index; The key
@@ -72,26 +72,26 @@ fn main() -> tantivy::Result<()> {
title => "The Old Man and the Sea", title => "The Old Man and the Sea",
body => "He was an old man who fished alone in a skiff in the Gulf Stream and \ body => "He was an old man who fished alone in a skiff in the Gulf Stream and \
he had gone eighty-four days now without taking a fish." he had gone eighty-four days now without taking a fish."
)); ));
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "Of Mice and Men", title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
bank and runs deep and green. The water is warm too, for it has slipped twinkling \ bank and runs deep and green. The water is warm too, for it has slipped twinkling \
over the yellow sands in the sunlight before reaching the narrow pool. On one \ over the yellow sands in the sunlight before reaching the narrow pool. On one \
side of the river the golden foothill slopes curve up to the strong and rocky \ side of the river the golden foothill slopes curve up to the strong and rocky \
Gabilan Mountains, but on the valley side the water is lined with trees—willows \ Gabilan Mountains, but on the valley side the water is lined with trees—willows \
fresh and green with every spring, carrying in their lower leaf junctures the \ fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \ debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool" limbs and branches that arch over the pool"
)); ));
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "Frankenstein", title => "Frankenstein",
body => "You will rejoice to hear that no disaster has accompanied the commencement of an \ body => "You will rejoice to hear that no disaster has accompanied the commencement of an \
enterprise which you have regarded with such evil forebodings. I arrived here \ enterprise which you have regarded with such evil forebodings. I arrived here \
yesterday, and my first task is to assure my dear sister of my welfare and \ yesterday, and my first task is to assure my dear sister of my welfare and \
increasing confidence in the success of my undertaking." increasing confidence in the success of my undertaking."
)); ));
index_writer.commit()?; index_writer.commit()?;
@@ -105,15 +105,11 @@ fn main() -> tantivy::Result<()> {
// stop words are applied on the query as well. // stop words are applied on the query as well.
// The following will be equivalent to `title:frankenstein` // The following will be equivalent to `title:frankenstein`
let query = query_parser.parse_query("title:\"the Frankenstein\"")?; let query = query_parser.parse_query("title:\"the Frankenstein\"")?;
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
let mut top_collector = TopCollector::with_limit(10); for (score, doc_address) in top_docs {
searcher.search(&*query, &mut top_collector)?;
let doc_addresses = top_collector.docs();
for doc_address in doc_addresses {
let retrieved_doc = searcher.doc(doc_address)?; let retrieved_doc = searcher.doc(doc_address)?;
println!("\n==\nDocument score {}:", score);
println!("{}", schema.to_json(&retrieved_doc)); println!("{}", schema.to_json(&retrieved_doc));
} }

View File

@@ -9,7 +9,7 @@ fn main() -> tantivy::Result<()> {
// Check out the basic example if this is confusing to you. // Check out the basic example if this is confusing to you.
// //
// first we need to define a schema ... // first we need to define a schema ...
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title", TEXT | STORED); schema_builder.add_text_field("title", TEXT | STORED);
schema_builder.add_text_field("body", TEXT); schema_builder.add_text_field("body", TEXT);
schema_builder.add_u64_field("year", INT_INDEXED); schema_builder.add_u64_field("year", INT_INDEXED);

View File

@@ -1,142 +0,0 @@
use collector::Collector;
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
/// Collector that does nothing.
/// This is used in the chain Collector and will hopefully
/// be optimized away by the compiler.
pub struct DoNothingCollector;
impl Collector for DoNothingCollector {
#[inline]
fn set_segment(&mut self, _: SegmentLocalId, _: &SegmentReader) -> Result<()> {
Ok(())
}
#[inline]
fn collect(&mut self, _doc: DocId, _score: Score) {}
#[inline]
fn requires_scoring(&self) -> bool {
false
}
}
/// Zero-cost abstraction used to collect on multiple collectors.
/// This contraption is only usable if the type of your collectors
/// are known at compile time.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::{CountCollector, TopCollector, chain};
/// use tantivy::query::QueryParser;
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
///
/// index.load_searchers()?;
/// let searcher = index.searcher();
///
/// {
/// let mut top_collector = TopCollector::with_limit(2);
/// let mut count_collector = CountCollector::default();
/// {
/// let mut collectors = chain().push(&mut top_collector).push(&mut count_collector);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// searcher.search(&*query, &mut collectors).unwrap();
/// }
/// assert_eq!(count_collector.count(), 2);
/// assert!(top_collector.at_capacity());
/// }
///
/// Ok(())
/// }
/// ```
pub struct ChainedCollector<Left: Collector, Right: Collector> {
left: Left,
right: Right,
}
impl<Left: Collector, Right: Collector> ChainedCollector<Left, Right> {
/// Adds a collector
pub fn push<C: Collector>(self, new_collector: &mut C) -> ChainedCollector<Self, &mut C> {
ChainedCollector {
left: self,
right: new_collector,
}
}
}
impl<Left: Collector, Right: Collector> Collector for ChainedCollector<Left, Right> {
fn set_segment(
&mut self,
segment_local_id: SegmentLocalId,
segment: &SegmentReader,
) -> Result<()> {
self.left.set_segment(segment_local_id, segment)?;
self.right.set_segment(segment_local_id, segment)?;
Ok(())
}
fn collect(&mut self, doc: DocId, score: Score) {
self.left.collect(doc, score);
self.right.collect(doc, score);
}
fn requires_scoring(&self) -> bool {
self.left.requires_scoring() || self.right.requires_scoring()
}
}
/// Creates a `ChainedCollector`
pub fn chain() -> ChainedCollector<DoNothingCollector, DoNothingCollector> {
ChainedCollector {
left: DoNothingCollector,
right: DoNothingCollector,
}
}
#[cfg(test)]
mod tests {
use super::*;
use collector::{Collector, CountCollector, TopCollector};
#[test]
fn test_chained_collector() {
let mut top_collector = TopCollector::with_limit(2);
let mut count_collector = CountCollector::default();
{
let mut collectors = chain().push(&mut top_collector).push(&mut count_collector);
collectors.collect(1, 0.2);
collectors.collect(2, 0.1);
collectors.collect(3, 0.5);
}
assert_eq!(count_collector.count(), 3);
assert!(top_collector.at_capacity());
}
}

View File

@@ -1,4 +1,5 @@
use super::Collector; use super::Collector;
use collector::SegmentCollector;
use DocId; use DocId;
use Result; use Result;
use Score; use Score;
@@ -11,14 +12,14 @@ use SegmentReader;
/// ```rust /// ```rust
/// #[macro_use] /// #[macro_use]
/// extern crate tantivy; /// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result}; /// use tantivy::{Index, Result};
/// use tantivy::collector::CountCollector; /// use tantivy::collector::Count;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
@@ -43,59 +44,87 @@ use SegmentReader;
/// let searcher = index.searcher(); /// let searcher = index.searcher();
/// ///
/// { /// {
/// let mut count_collector = CountCollector::default();
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?; /// let query = query_parser.parse_query("diary")?;
/// searcher.search(&*query, &mut count_collector).unwrap(); /// let count = searcher.search(&query, &Count).unwrap();
/// ///
/// assert_eq!(count_collector.count(), 2); /// assert_eq!(count, 2);
/// } /// }
/// ///
/// Ok(()) /// Ok(())
/// } /// }
/// ``` /// ```
#[derive(Default)] pub struct Count;
pub struct CountCollector {
count: usize,
}
impl CountCollector { impl Collector for Count {
/// Returns the count of documents that were type Fruit = usize;
/// collected. type SegmentFruit = usize;
pub fn count(&self) -> usize {
self.count
}
}
impl Collector for CountCollector { type Child = SegmentCountCollector;
fn set_segment(&mut self, _: SegmentLocalId, _: &SegmentReader) -> Result<()> {
Ok(())
}
fn collect(&mut self, _: DocId, _: Score) { fn for_segment(&self, _: SegmentLocalId, _: &SegmentReader) -> Result<SegmentCountCollector> {
self.count += 1; Ok(SegmentCountCollector::default())
} }
fn requires_scoring(&self) -> bool { fn requires_scoring(&self) -> bool {
false false
} }
fn merge_fruits(&self, segment_counts: Vec<usize>) -> Result<usize> {
Ok(segment_counts.into_iter().sum())
}
}
#[derive(Default)]
pub struct SegmentCountCollector {
count: usize,
}
impl SegmentCollector for SegmentCountCollector {
type Fruit = usize;
fn collect(&mut self, _: DocId, _: Score) {
self.count += 1;
}
fn harvest(self) -> usize {
self.count
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{Count, SegmentCountCollector};
use collector::{Collector, CountCollector}; use collector::Collector;
use collector::SegmentCollector;
#[test] #[test]
fn test_count_collector() { fn test_count_collect_does_not_requires_scoring() {
let mut count_collector = CountCollector::default(); assert!(!Count.requires_scoring());
assert_eq!(count_collector.count(), 0); }
count_collector.collect(0u32, 1f32);
assert_eq!(count_collector.count(), 1); #[test]
assert_eq!(count_collector.count(), 1); fn test_segment_count_collector() {
count_collector.collect(1u32, 1f32); {
assert_eq!(count_collector.count(), 2); let count_collector = SegmentCountCollector::default();
assert!(!count_collector.requires_scoring()); assert_eq!(count_collector.harvest(), 0);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
assert_eq!(count_collector.harvest(), 1);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
assert_eq!(count_collector.harvest(), 1);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
count_collector.collect(1u32, 1f32);
assert_eq!(count_collector.harvest(), 2);
}
} }
} }

View File

@@ -1,20 +1,17 @@
use collector::Collector; use collector::Collector;
use collector::SegmentCollector;
use docset::SkipResult; use docset::SkipResult;
use fastfield::FacetReader; use fastfield::FacetReader;
use schema::Facet; use schema::Facet;
use schema::Field; use schema::Field;
use std::cell::UnsafeCell; use std::cmp::Ordering;
use std::collections::btree_map; use std::collections::btree_map;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::collections::BinaryHeap; use std::collections::BinaryHeap;
use std::collections::Bound; use std::collections::Bound;
use std::iter::Peekable; use std::iter::Peekable;
use std::mem;
use std::{u64, usize}; use std::{u64, usize};
use termdict::TermMerger;
use std::cmp::Ordering;
use DocId; use DocId;
use Result; use Result;
use Score; use Score;
@@ -46,12 +43,6 @@ impl<'a> Ord for Hit<'a> {
} }
} }
struct SegmentFacetCounter {
pub facet_reader: FacetReader,
pub facet_ords: Vec<u64>,
pub facet_counts: Vec<u64>,
}
fn facet_depth(facet_bytes: &[u8]) -> usize { fn facet_depth(facet_bytes: &[u8]) -> usize {
if facet_bytes.is_empty() { if facet_bytes.is_empty() {
0 0
@@ -91,14 +82,14 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// ```rust /// ```rust
/// #[macro_use] /// #[macro_use]
/// extern crate tantivy; /// extern crate tantivy;
/// use tantivy::schema::{Facet, SchemaBuilder, TEXT}; /// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{Index, Result}; /// use tantivy::{Index, Result};
/// use tantivy::collector::FacetCollector; /// use tantivy::collector::FacetCollector;
/// use tantivy::query::AllQuery; /// use tantivy::query::AllQuery;
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new(); /// let mut schema_builder = Schema::builder();
/// ///
/// // Facet have their own specific type. /// // Facet have their own specific type.
/// // It is not a bad practise to put all of your /// // It is not a bad practise to put all of your
@@ -141,13 +132,10 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/lang"); /// facet_collector.add_facet("/lang");
/// facet_collector.add_facet("/category"); /// facet_collector.add_facet("/category");
/// searcher.search(&AllQuery, &mut facet_collector).unwrap(); /// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
///
/// // this object contains count aggregate for all of the facets.
/// let counts = facet_collector.harvest();
/// ///
/// // This lists all of the facet counts /// // This lists all of the facet counts
/// let facets: Vec<(&Facet, u64)> = counts /// let facets: Vec<(&Facet, u64)> = facet_counts
/// .get("/category") /// .get("/category")
/// .collect(); /// .collect();
/// assert_eq!(facets, vec![ /// assert_eq!(facets, vec![
@@ -159,13 +147,10 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction"); /// facet_collector.add_facet("/category/fiction");
/// searcher.search(&AllQuery, &mut facet_collector).unwrap(); /// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
///
/// // this object contains count aggregate for all of the facets.
/// let counts = facet_collector.harvest();
/// ///
/// // This lists all of the facet counts /// // This lists all of the facet counts
/// let facets: Vec<(&Facet, u64)> = counts /// let facets: Vec<(&Facet, u64)> = facet_counts
/// .get("/category/fiction") /// .get("/category/fiction")
/// .collect(); /// .collect();
/// assert_eq!(facets, vec![ /// assert_eq!(facets, vec![
@@ -178,13 +163,10 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction"); /// facet_collector.add_facet("/category/fiction");
/// searcher.search(&AllQuery, &mut facet_collector).unwrap(); /// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
///
/// // this object contains count aggregate for all of the facets.
/// let counts = facet_collector.harvest();
/// ///
/// // This lists all of the facet counts /// // This lists all of the facet counts
/// let facets: Vec<(&Facet, u64)> = counts.top_k("/category/fiction", 1); /// let facets: Vec<(&Facet, u64)> = facet_counts.top_k("/category/fiction", 1);
/// assert_eq!(facets, vec![ /// assert_eq!(facets, vec![
/// (&Facet::from("/category/fiction/fantasy"), 2) /// (&Facet::from("/category/fiction/fantasy"), 2)
/// ]); /// ]);
@@ -194,21 +176,21 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// } /// }
/// ``` /// ```
pub struct FacetCollector { pub struct FacetCollector {
facet_ords: Vec<u64>,
field: Field, field: Field,
ff_reader: Option<UnsafeCell<FacetReader>>,
segment_counters: Vec<SegmentFacetCounter>,
// facet_ord -> collapse facet_id
current_segment_collapse_mapping: Vec<usize>,
// collapse facet_id -> count
current_segment_counts: Vec<u64>,
// collapse facet_id -> facet_ord
current_collapse_facet_ords: Vec<u64>,
facets: BTreeSet<Facet>, facets: BTreeSet<Facet>,
} }
pub struct FacetSegmentCollector {
reader: FacetReader,
facet_ords_buf: Vec<u64>,
// facet_ord -> collapse facet_id
collapse_mapping: Vec<usize>,
// collapse facet_id -> count
counts: Vec<u64>,
// collapse facet_id -> facet_ord
collapse_facet_ords: Vec<u64>,
}
fn skip<'a, I: Iterator<Item = &'a Facet>>( fn skip<'a, I: Iterator<Item = &'a Facet>>(
target: &[u8], target: &[u8],
collapse_it: &mut Peekable<I>, collapse_it: &mut Peekable<I>,
@@ -240,15 +222,8 @@ impl FacetCollector {
/// is of the proper type. /// is of the proper type.
pub fn for_field(field: Field) -> FacetCollector { pub fn for_field(field: Field) -> FacetCollector {
FacetCollector { FacetCollector {
facet_ords: Vec::with_capacity(255),
segment_counters: Vec::new(),
field, field,
ff_reader: None, facets: BTreeSet::default(),
facets: BTreeSet::new(),
current_segment_collapse_mapping: Vec::new(),
current_collapse_facet_ords: Vec::new(),
current_segment_counts: Vec::new(),
} }
} }
@@ -278,143 +253,102 @@ impl FacetCollector {
} }
self.facets.insert(facet); self.facets.insert(facet);
} }
fn set_collapse_mapping(&mut self, facet_reader: &FacetReader) {
self.current_segment_collapse_mapping.clear();
self.current_collapse_facet_ords.clear();
self.current_segment_counts.clear();
let mut collapse_facet_it = self.facets.iter().peekable();
self.current_collapse_facet_ords.push(0);
let mut facet_streamer = facet_reader.facet_dict().range().into_stream();
if !facet_streamer.advance() {
return;
}
'outer: loop {
// at the begining of this loop, facet_streamer
// is positionned on a term that has not been processed yet.
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
match skip_result {
SkipResult::Reached => {
// we reach a facet we decided to collapse.
let collapse_depth = facet_depth(facet_streamer.key());
let mut collapsed_id = 0;
self.current_segment_collapse_mapping.push(0);
while facet_streamer.advance() {
let depth = facet_depth(facet_streamer.key());
if depth <= collapse_depth {
continue 'outer;
}
if depth == collapse_depth + 1 {
collapsed_id = self.current_collapse_facet_ords.len();
self.current_collapse_facet_ords
.push(facet_streamer.term_ord());
self.current_segment_collapse_mapping.push(collapsed_id);
} else {
self.current_segment_collapse_mapping.push(collapsed_id);
}
}
break;
}
SkipResult::End | SkipResult::OverStep => {
self.current_segment_collapse_mapping.push(0);
if !facet_streamer.advance() {
break;
}
}
}
}
}
fn finalize_segment(&mut self) {
if self.ff_reader.is_some() {
self.segment_counters.push(SegmentFacetCounter {
facet_reader: self.ff_reader.take().unwrap().into_inner(),
facet_ords: mem::replace(&mut self.current_collapse_facet_ords, Vec::new()),
facet_counts: mem::replace(&mut self.current_segment_counts, Vec::new()),
});
}
}
/// Returns the results of the collection.
///
/// This method does not just return the counters,
/// it also translates the facet ordinals of the last segment.
pub fn harvest(mut self) -> FacetCounts {
self.finalize_segment();
let collapsed_facet_ords: Vec<&[u64]> = self
.segment_counters
.iter()
.map(|segment_counter| &segment_counter.facet_ords[..])
.collect();
let collapsed_facet_counts: Vec<&[u64]> = self
.segment_counters
.iter()
.map(|segment_counter| &segment_counter.facet_counts[..])
.collect();
let facet_streams = self
.segment_counters
.iter()
.map(|seg_counts| seg_counts.facet_reader.facet_dict().range().into_stream())
.collect::<Vec<_>>();
let mut facet_merger = TermMerger::new(facet_streams);
let mut facet_counts = BTreeMap::new();
while facet_merger.advance() {
let count = facet_merger
.current_kvs()
.iter()
.map(|it| {
let seg_ord = it.segment_ord;
let term_ord = it.streamer.term_ord();
collapsed_facet_ords[seg_ord]
.binary_search(&term_ord)
.map(|collapsed_term_id| {
if collapsed_term_id == 0 {
0
} else {
collapsed_facet_counts[seg_ord][collapsed_term_id]
}
}).unwrap_or(0)
}).sum();
if count > 0u64 {
let bytes: Vec<u8> = facet_merger.key().to_owned();
// may create an corrupted facet if the term dicitonary is corrupted
let facet = unsafe { Facet::from_encoded(bytes) };
facet_counts.insert(facet, count);
}
}
FacetCounts { facet_counts }
}
} }
impl Collector for FacetCollector { impl Collector for FacetCollector {
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> { type Fruit = FacetCounts;
self.finalize_segment();
type SegmentFruit = FacetCounts;
type Child = FacetSegmentCollector;
fn for_segment(
&self,
_: SegmentLocalId,
reader: &SegmentReader,
) -> Result<FacetSegmentCollector> {
let facet_reader = reader.facet_reader(self.field)?; let facet_reader = reader.facet_reader(self.field)?;
self.set_collapse_mapping(&facet_reader);
self.current_segment_counts let mut collapse_mapping = Vec::new();
.resize(self.current_collapse_facet_ords.len(), 0); let mut counts = Vec::new();
self.ff_reader = Some(UnsafeCell::new(facet_reader)); let mut collapse_facet_ords = Vec::new();
Ok(())
let mut collapse_facet_it = self.facets.iter().peekable();
collapse_facet_ords.push(0);
{
let mut facet_streamer = facet_reader.facet_dict().range().into_stream();
if facet_streamer.advance() {
'outer: loop {
// at the begining of this loop, facet_streamer
// is positionned on a term that has not been processed yet.
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
match skip_result {
SkipResult::Reached => {
// we reach a facet we decided to collapse.
let collapse_depth = facet_depth(facet_streamer.key());
let mut collapsed_id = 0;
collapse_mapping.push(0);
while facet_streamer.advance() {
let depth = facet_depth(facet_streamer.key());
if depth <= collapse_depth {
continue 'outer;
}
if depth == collapse_depth + 1 {
collapsed_id = collapse_facet_ords.len();
collapse_facet_ords.push(facet_streamer.term_ord());
collapse_mapping.push(collapsed_id);
} else {
collapse_mapping.push(collapsed_id);
}
}
break;
}
SkipResult::End | SkipResult::OverStep => {
collapse_mapping.push(0);
if !facet_streamer.advance() {
break;
}
}
}
}
}
}
counts.resize(collapse_facet_ords.len(), 0);
Ok(FacetSegmentCollector {
reader: facet_reader,
facet_ords_buf: Vec::with_capacity(255),
collapse_mapping,
counts,
collapse_facet_ords,
})
} }
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(&self, segments_facet_counts: Vec<FacetCounts>) -> Result<FacetCounts> {
let mut facet_counts: BTreeMap<Facet, u64> = BTreeMap::new();
for segment_facet_counts in segments_facet_counts {
for (facet, count) in segment_facet_counts.facet_counts {
*(facet_counts.entry(facet).or_insert(0)) += count;
}
}
Ok(FacetCounts { facet_counts })
}
}
impl SegmentCollector for FacetSegmentCollector {
type Fruit = FacetCounts;
fn collect(&mut self, doc: DocId, _: Score) { fn collect(&mut self, doc: DocId, _: Score) {
let facet_reader: &mut FacetReader = unsafe { self.reader.facet_ords(doc, &mut self.facet_ords_buf);
&mut *self
.ff_reader
.as_ref()
.expect("collect() was called before set_segment. This should never happen.")
.get()
};
facet_reader.facet_ords(doc, &mut self.facet_ords);
let mut previous_collapsed_ord: usize = usize::MAX; let mut previous_collapsed_ord: usize = usize::MAX;
for &facet_ord in &self.facet_ords { for &facet_ord in &self.facet_ords_buf {
let collapsed_ord = self.current_segment_collapse_mapping[facet_ord as usize]; let collapsed_ord = self.collapse_mapping[facet_ord as usize];
self.current_segment_counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord self.counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord {
{
0 0
} else { } else {
1 1
@@ -423,8 +357,23 @@ impl Collector for FacetCollector {
} }
} }
fn requires_scoring(&self) -> bool { /// Returns the results of the collection.
false ///
/// This method does not just return the counters,
/// it also translates the facet ordinals of the last segment.
fn harvest(self) -> FacetCounts {
let mut facet_counts = BTreeMap::new();
let facet_dict = self.reader.facet_dict();
for (collapsed_facet_ord, count) in self.counts.iter().cloned().enumerate() {
if count == 0 {
continue;
}
let mut facet = vec![];
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
facet_dict.ord_to_term(facet_ord as u64, &mut facet);
facet_counts.insert(unsafe { Facet::from_encoded(facet) }, count);
}
FacetCounts { facet_counts }
} }
} }
@@ -505,14 +454,14 @@ mod tests {
use core::Index; use core::Index;
use query::AllQuery; use query::AllQuery;
use rand::distributions::Uniform; use rand::distributions::Uniform;
use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use schema::Field; use schema::{Document, Facet, Field, Schema};
use schema::{Document, Facet, SchemaBuilder};
use std::iter; use std::iter;
#[test] #[test]
fn test_facet_collector_drilldown() { fn test_facet_collector_drilldown() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet"); let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -527,7 +476,8 @@ mod tests {
n /= 4; n /= 4;
let leaf = n % 5; let leaf = n % 5;
Facet::from(&format!("/top{}/mid{}/leaf{}", top, mid, leaf)) Facet::from(&format!("/top{}/mid{}/leaf{}", top, mid, leaf))
}).collect(); })
.collect();
for i in 0..num_facets * 10 { for i in 0..num_facets * 10 {
let mut doc = Document::new(); let mut doc = Document::new();
doc.add_facet(facet_field, facets[i % num_facets].clone()); doc.add_facet(facet_field, facets[i % num_facets].clone());
@@ -536,12 +486,10 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field); let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet(Facet::from("/top1")); facet_collector.add_facet(Facet::from("/top1"));
searcher.search(&AllQuery, &mut facet_collector).unwrap(); let counts = searcher.search(&AllQuery, &facet_collector).unwrap();
let counts: FacetCounts = facet_collector.harvest();
{ {
let facets: Vec<(String, u64)> = counts let facets: Vec<(String, u64)> = counts
.get("/top1") .get("/top1")
@@ -555,18 +503,16 @@ mod tests {
("/top1/mid2", 50), ("/top1/mid2", 50),
("/top1/mid3", 50), ("/top1/mid3", 50),
] ]
.iter() .iter()
.map(|&(facet_str, count)| (String::from(facet_str), count)) .map(|&(facet_str, count)| (String::from(facet_str), count))
.collect::<Vec<_>>() .collect::<Vec<_>>()
); );
} }
} }
#[test] #[test]
#[should_panic( #[should_panic(expected = "Tried to add a facet which is a descendant of \
expected = "Tried to add a facet which is a descendant of \ an already added facet.")]
an already added facet."
)]
fn test_misused_facet_collector() { fn test_misused_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field(0)); let mut facet_collector = FacetCollector::for_field(Field(0));
facet_collector.add_facet(Facet::from("/country")); facet_collector.add_facet(Facet::from("/country"));
@@ -575,7 +521,7 @@ mod tests {
#[test] #[test]
fn test_doc_unsorted_multifacet() { fn test_doc_unsorted_multifacet() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facets"); let facet_field = schema_builder.add_facet_field("facets");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -592,8 +538,7 @@ mod tests {
assert_eq!(searcher.num_docs(), 1); assert_eq!(searcher.num_docs(), 1);
let mut facet_collector = FacetCollector::for_field(facet_field); let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet("/subjects"); facet_collector.add_facet("/subjects");
searcher.search(&AllQuery, &mut facet_collector).unwrap(); let counts = searcher.search(&AllQuery, &facet_collector).unwrap();
let counts = facet_collector.harvest();
let facets: Vec<(&Facet, u64)> = counts.get("/subjects").collect(); let facets: Vec<(&Facet, u64)> = counts.get("/subjects").collect();
assert_eq!(facets[0].1, 1); assert_eq!(facets[0].1, 1);
} }
@@ -607,7 +552,7 @@ mod tests {
#[test] #[test]
fn test_facet_collector_topk() { fn test_facet_collector_topk() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet"); let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -619,14 +564,16 @@ mod tests {
let facet = Facet::from(&format!("/facet/{}", c)); let facet = Facet::from(&format!("/facet/{}", c));
let doc = doc!(facet_field => facet); let doc = doc!(facet_field => facet);
iter::repeat(doc).take(count) iter::repeat(doc).take(count)
}).map(|mut doc| { })
.map(|mut doc| {
doc.add_facet( doc.add_facet(
facet_field, facet_field,
&format!("/facet/{}", thread_rng().sample(&uniform)), &format!("/facet/{}", thread_rng().sample(&uniform)),
); );
doc doc
}).collect(); })
thread_rng().shuffle(&mut docs[..]); .collect();
docs[..].shuffle(&mut thread_rng());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs { for doc in docs {
@@ -639,9 +586,8 @@ mod tests {
let mut facet_collector = FacetCollector::for_field(facet_field); let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet("/facet"); facet_collector.add_facet("/facet");
searcher.search(&AllQuery, &mut facet_collector).unwrap(); let counts: FacetCounts = searcher.search(&AllQuery, &facet_collector).unwrap();
let counts: FacetCounts = facet_collector.harvest();
{ {
let facets: Vec<(&Facet, u64)> = counts.top_k("/facet", 3); let facets: Vec<(&Facet, u64)> = counts.top_k("/facet", 3);
assert_eq!( assert_eq!(
@@ -664,13 +610,13 @@ mod bench {
use query::AllQuery; use query::AllQuery;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use schema::Facet; use schema::Facet;
use schema::SchemaBuilder; use schema::Schema;
use test::Bencher; use test::Bencher;
use Index; use Index;
#[bench] #[bench]
fn bench_facet_collector(b: &mut Bencher) { fn bench_facet_collector(b: &mut Bencher) {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet"); let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -694,8 +640,8 @@ mod bench {
b.iter(|| { b.iter(|| {
let searcher = index.searcher(); let searcher = index.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field); let facet_collector = FacetCollector::for_field(facet_field);
searcher.search(&AllQuery, &mut facet_collector).unwrap(); searcher.search(&AllQuery, &facet_collector).unwrap();
}); });
} }
} }

View File

@@ -79,7 +79,7 @@ mod tests {
// make sure we have facet counters correctly filled // make sure we have facet counters correctly filled
fn test_facet_collector_results() { fn test_facet_collector_results() {
let mut schema_builder = schema::SchemaBuilder::new(); let mut schema_builder = schema::Schema::builder();
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST); let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST); let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
let text_field = schema_builder.add_text_field("text", STRING); let text_field = schema_builder.add_text_field("text", STRING);

View File

@@ -1,7 +1,91 @@
/*! /*!
Defines how the documents matching a search query should be processed.
# Collectors
Collectors define the information you want to extract from the documents matching the queries.
In tantivy jargon, we call this information your search "fruit".
Your fruit could for instance be :
- [the count of matching documents](./struct.Count.html)
- [the top 10 documents, by relevancy or by a fast field](./struct.TopDocs.html)
- [facet counts](./struct.FacetCollector.html)
At one point in your code, you will trigger the actual search operation by calling
[the `search(...)` method of your `Searcher` object](../struct.Searcher.html#method.search).
This call will look like this.
```verbatim
let fruit = searcher.search(&query, &collector)?;
```
Here the type of fruit is actually determined as an associated type of the collector (`Collector::Fruit`).
# Combining several collectors
A rich search experience often requires to run several collectors on your search query.
For instance,
- selecting the top-K products matching your query
- counting the matching documents
- computing several facets
- computing statistics about the matching product prices
A simple and efficient way to do that is to pass your collectors as one tuple.
The resulting `Fruit` will then be a typed tuple with each collector's original fruits
in their respective position.
```rust
# extern crate tantivy;
# use tantivy::schema::*;
# use tantivy::*;
# use tantivy::query::*;
use tantivy::collector::{Count, TopDocs};
#
# fn main() -> tantivy::Result<()> {
# let mut schema_builder = Schema::builder();
# let title = schema_builder.add_text_field("title", TEXT);
# let schema = schema_builder.build();
# let index = Index::create_in_ram(schema);
# let mut index_writer = index.writer(3_000_000)?;
# index_writer.add_document(doc!(
# title => "The Name of the Wind",
# ));
# index_writer.add_document(doc!(
# title => "The Diary of Muadib",
# ));
# index_writer.commit().unwrap();
# index.load_searchers()?;
# let searcher = index.searcher();
# let query_parser = QueryParser::for_index(&index, vec![title]);
# let query = query_parser.parse_query("diary")?;
let (doc_count, top_docs): (usize, Vec<(Score, DocAddress)>) =
searcher.search(&query, &(Count, TopDocs::with_limit(2)))?;
# Ok(())
# }
```
The `Collector` trait is implemented for up to 4 collectors.
If you have more than 4 collectors, you can either group them into
tuples of tuples `(a,(b,(c,d)))`, or rely on `MultiCollector`'s.
# Combining several collectors dynamically
Combining collectors into a tuple is a zero-cost abstraction: everything
happens as if you had manually implemented a single collector
combining all of our features.
Unfortunately it requires you to know at compile time your collector types.
If on the other hand, the collectors depend on some query parameter,
you can rely on `MultiCollector`'s.
# Implementing your own collectors.
See the `custom_collector` example.
*/ */
use downcast;
use DocId; use DocId;
use Result; use Result;
use Score; use Score;
@@ -9,7 +93,7 @@ use SegmentLocalId;
use SegmentReader; use SegmentReader;
mod count_collector; mod count_collector;
pub use self::count_collector::CountCollector; pub use self::count_collector::Count;
mod multi_collector; mod multi_collector;
pub use self::multi_collector::MultiCollector; pub use self::multi_collector::MultiCollector;
@@ -17,237 +101,274 @@ pub use self::multi_collector::MultiCollector;
mod top_collector; mod top_collector;
mod top_score_collector; mod top_score_collector;
pub use self::top_score_collector::TopScoreCollector; pub use self::top_score_collector::TopDocs;
#[deprecated]
pub use self::top_score_collector::TopScoreCollector as TopCollector;
mod top_field_collector; mod top_field_collector;
pub use self::top_field_collector::TopFieldCollector; pub use self::top_field_collector::TopDocsByField;
mod facet_collector; mod facet_collector;
pub use self::facet_collector::FacetCollector; pub use self::facet_collector::FacetCollector;
mod chained_collector; /// `Fruit` is the type for the result of our collection.
pub use self::chained_collector::{chain, ChainedCollector}; /// e.g. `usize` for the `Count` collector.
pub trait Fruit: Send + downcast::Any {}
impl<T> Fruit for T where T: Send + downcast::Any {}
/// Collectors are in charge of collecting and retaining relevant /// Collectors are in charge of collecting and retaining relevant
/// information from the document found and scored by the query. /// information from the document found and scored by the query.
/// ///
///
/// For instance, /// For instance,
/// ///
/// - keeping track of the top 10 best documents /// - keeping track of the top 10 best documents
/// - computing a breakdown over a fast field /// - computing a breakdown over a fast field
/// - computing the number of documents matching the query /// - computing the number of documents matching the query
/// ///
/// Queries are in charge of pushing the `DocSet` to the collector. /// Our search index is in fact a collection of segments, so
/// a `Collector` trait is actually more of a factory to instance
/// `SegmentCollector`s for each segments.
/// ///
/// As they work on multiple segments, they first inform /// The collection logic itself is in the `SegmentCollector`.
/// the collector of a change in a segment and then
/// call the `collect` method to push the document to the collector.
///
/// Temporally, our collector will receive calls
/// - `.set_segment(0, segment_reader_0)`
/// - `.collect(doc0_of_segment_0)`
/// - `.collect(...)`
/// - `.collect(last_doc_of_segment_0)`
/// - `.set_segment(1, segment_reader_1)`
/// - `.collect(doc0_of_segment_1)`
/// - `.collect(...)`
/// - `.collect(last_doc_of_segment_1)`
/// - `...`
/// - `.collect(last_doc_of_last_segment)`
/// ///
/// Segments are not guaranteed to be visited in any specific order. /// Segments are not guaranteed to be visited in any specific order.
pub trait Collector { pub trait Collector: Sync {
/// `Fruit` is the type for the result of our collection.
/// e.g. `usize` for the `Count` collector.
type Fruit: Fruit;
type SegmentFruit: Fruit;
/// Type of the `SegmentCollector` associated to this collector.
type Child: SegmentCollector<Fruit = Self::SegmentFruit>;
/// `set_segment` is called before beginning to enumerate /// `set_segment` is called before beginning to enumerate
/// on this segment. /// on this segment.
fn set_segment( fn for_segment(
&mut self, &self,
segment_local_id: SegmentLocalId, segment_local_id: SegmentLocalId,
segment: &SegmentReader, segment: &SegmentReader,
) -> Result<()>; ) -> Result<Self::Child>;
/// The query pushes the scored document to the collector via this method.
fn collect(&mut self, doc: DocId, score: Score);
/// Returns true iff the collector requires to compute scores for documents. /// Returns true iff the collector requires to compute scores for documents.
fn requires_scoring(&self) -> bool; fn requires_scoring(&self) -> bool;
/// Combines the fruit associated to the collection of each segments
/// into one fruit.
fn merge_fruits(&self, segment_fruits: Vec<Self::SegmentFruit>) -> Result<Self::Fruit>;
} }
impl<'a, C: Collector> Collector for &'a mut C { /// The `SegmentCollector` is the trait in charge of defining the
fn set_segment( /// collect operation at the scale of the segment.
&mut self, ///
segment_local_id: SegmentLocalId, /// `.collect(doc, score)` will be called for every documents
segment: &SegmentReader, /// matching the query.
) -> Result<()> { pub trait SegmentCollector: 'static {
(*self).set_segment(segment_local_id, segment) /// `Fruit` is the type for the result of our collection.
} /// e.g. `usize` for the `Count` collector.
type Fruit: Fruit;
/// The query pushes the scored document to the collector via this method. /// The query pushes the scored document to the collector via this method.
fn collect(&mut self, doc: DocId, score: Score) { fn collect(&mut self, doc: DocId, score: Score);
C::collect(self, doc, score)
/// Extract the fruit of the collection from the `SegmentCollector`.
fn harvest(self) -> Self::Fruit;
}
// -----------------------------------------------
// Tuple implementations.
impl<Left, Right> Collector for (Left, Right)
where
Left: Collector,
Right: Collector,
{
type Fruit = (Left::Fruit, Right::Fruit);
type SegmentFruit = (Left::SegmentFruit, Right::SegmentFruit);
type Child = (Left::Child, Right::Child);
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
let left = self.0.for_segment(segment_local_id, segment)?;
let right = self.1.for_segment(segment_local_id, segment)?;
Ok((left, right))
} }
fn requires_scoring(&self) -> bool { fn requires_scoring(&self) -> bool {
C::requires_scoring(self) self.0.requires_scoring() || self.1.requires_scoring()
} }
fn merge_fruits(
&self,
children: Vec<(Left::SegmentFruit, Right::SegmentFruit)>,
) -> Result<(Left::Fruit, Right::Fruit)> {
let mut left_fruits = vec![];
let mut right_fruits = vec![];
for (left_fruit, right_fruit) in children {
left_fruits.push(left_fruit);
right_fruits.push(right_fruit);
}
Ok((
self.0.merge_fruits(left_fruits)?,
self.1.merge_fruits(right_fruits)?,
))
}
}
impl<Left, Right> SegmentCollector for (Left, Right)
where
Left: SegmentCollector,
Right: SegmentCollector,
{
type Fruit = (Left::Fruit, Right::Fruit);
fn collect(&mut self, doc: DocId, score: Score) {
self.0.collect(doc, score);
self.1.collect(doc, score);
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
(self.0.harvest(), self.1.harvest())
}
}
// 3-Tuple
impl<One, Two, Three> Collector for (One, Two, Three)
where
One: Collector,
Two: Collector,
Three: Collector,
{
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit);
type SegmentFruit = (One::SegmentFruit, Two::SegmentFruit, Three::SegmentFruit);
type Child = (One::Child, Two::Child, Three::Child);
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
let one = self.0.for_segment(segment_local_id, segment)?;
let two = self.1.for_segment(segment_local_id, segment)?;
let three = self.2.for_segment(segment_local_id, segment)?;
Ok((one, two, three))
}
fn requires_scoring(&self) -> bool {
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
}
fn merge_fruits(&self, children: Vec<Self::SegmentFruit>) -> Result<Self::Fruit> {
let mut one_fruits = vec![];
let mut two_fruits = vec![];
let mut three_fruits = vec![];
for (one_fruit, two_fruit, three_fruit) in children {
one_fruits.push(one_fruit);
two_fruits.push(two_fruit);
three_fruits.push(three_fruit);
}
Ok((
self.0.merge_fruits(one_fruits)?,
self.1.merge_fruits(two_fruits)?,
self.2.merge_fruits(three_fruits)?,
))
}
}
impl<One, Two, Three> SegmentCollector for (One, Two, Three)
where
One: SegmentCollector,
Two: SegmentCollector,
Three: SegmentCollector,
{
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit);
fn collect(&mut self, doc: DocId, score: Score) {
self.0.collect(doc, score);
self.1.collect(doc, score);
self.2.collect(doc, score);
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
(self.0.harvest(), self.1.harvest(), self.2.harvest())
}
}
// 4-Tuple
impl<One, Two, Three, Four> Collector for (One, Two, Three, Four)
where
One: Collector,
Two: Collector,
Three: Collector,
Four: Collector,
{
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit, Four::Fruit);
type SegmentFruit = (One::SegmentFruit, Two::SegmentFruit, Three::SegmentFruit, Four::SegmentFruit);
type Child = (One::Child, Two::Child, Three::Child, Four::Child);
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
let one = self.0.for_segment(segment_local_id, segment)?;
let two = self.1.for_segment(segment_local_id, segment)?;
let three = self.2.for_segment(segment_local_id, segment)?;
let four = self.3.for_segment(segment_local_id, segment)?;
Ok((one, two, three, four))
}
fn requires_scoring(&self) -> bool {
self.0.requires_scoring()
|| self.1.requires_scoring()
|| self.2.requires_scoring()
|| self.3.requires_scoring()
}
fn merge_fruits(&self, children: Vec<Self::SegmentFruit>) -> Result<Self::Fruit> {
let mut one_fruits = vec![];
let mut two_fruits = vec![];
let mut three_fruits = vec![];
let mut four_fruits = vec![];
for (one_fruit, two_fruit, three_fruit, four_fruit) in children {
one_fruits.push(one_fruit);
two_fruits.push(two_fruit);
three_fruits.push(three_fruit);
four_fruits.push(four_fruit);
}
Ok((
self.0.merge_fruits(one_fruits)?,
self.1.merge_fruits(two_fruits)?,
self.2.merge_fruits(three_fruits)?,
self.3.merge_fruits(four_fruits)?,
))
}
}
impl<One, Two, Three, Four> SegmentCollector for (One, Two, Three, Four)
where
One: SegmentCollector,
Two: SegmentCollector,
Three: SegmentCollector,
Four: SegmentCollector,
{
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit, Four::Fruit);
fn collect(&mut self, doc: DocId, score: Score) {
self.0.collect(doc, score);
self.1.collect(doc, score);
self.2.collect(doc, score);
self.3.collect(doc, score);
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
(
self.0.harvest(),
self.1.harvest(),
self.2.harvest(),
self.3.harvest(),
)
}
}
#[allow(missing_docs)]
mod downcast_impl {
downcast!(super::Fruit);
} }
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests;
use super::*;
use core::SegmentReader;
use fastfield::BytesFastFieldReader;
use fastfield::FastFieldReader;
use schema::Field;
use DocId;
use Score;
use SegmentLocalId;
/// Stores all of the doc ids.
/// This collector is only used for tests.
/// It is unusable in practise, as it does not store
/// the segment ordinals
pub struct TestCollector {
offset: DocId,
segment_max_doc: DocId,
docs: Vec<DocId>,
scores: Vec<Score>,
}
impl TestCollector {
/// Return the exhalist of documents.
pub fn docs(self) -> Vec<DocId> {
self.docs
}
pub fn scores(self) -> Vec<Score> {
self.scores
}
}
impl Default for TestCollector {
fn default() -> TestCollector {
TestCollector {
offset: 0,
segment_max_doc: 0,
docs: Vec::new(),
scores: Vec::new(),
}
}
}
impl Collector for TestCollector {
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
self.offset += self.segment_max_doc;
self.segment_max_doc = reader.max_doc();
Ok(())
}
fn collect(&mut self, doc: DocId, score: Score) {
self.docs.push(doc + self.offset);
self.scores.push(score);
}
fn requires_scoring(&self) -> bool {
true
}
}
/// Collects in order all of the fast fields for all of the
/// doc in the `DocSet`
///
/// This collector is mainly useful for tests.
pub struct FastFieldTestCollector {
vals: Vec<u64>,
field: Field,
ff_reader: Option<FastFieldReader<u64>>,
}
impl FastFieldTestCollector {
pub fn for_field(field: Field) -> FastFieldTestCollector {
FastFieldTestCollector {
vals: Vec::new(),
field,
ff_reader: None,
}
}
pub fn vals(self) -> Vec<u64> {
self.vals
}
}
impl Collector for FastFieldTestCollector {
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
self.ff_reader = Some(reader.fast_field_reader(self.field)?);
Ok(())
}
fn collect(&mut self, doc: DocId, _score: Score) {
let val = self.ff_reader.as_ref().unwrap().get(doc);
self.vals.push(val);
}
fn requires_scoring(&self) -> bool {
false
}
}
/// Collects in order all of the fast field bytes for all of the
/// docs in the `DocSet`
///
/// This collector is mainly useful for tests.
pub struct BytesFastFieldTestCollector {
vals: Vec<u8>,
field: Field,
ff_reader: Option<BytesFastFieldReader>,
}
impl BytesFastFieldTestCollector {
pub fn for_field(field: Field) -> BytesFastFieldTestCollector {
BytesFastFieldTestCollector {
vals: Vec::new(),
field,
ff_reader: None,
}
}
pub fn vals(self) -> Vec<u8> {
self.vals
}
}
impl Collector for BytesFastFieldTestCollector {
fn set_segment(&mut self, _segment_local_id: u32, segment: &SegmentReader) -> Result<()> {
self.ff_reader = Some(segment.bytes_fast_field_reader(self.field)?);
Ok(())
}
fn collect(&mut self, doc: u32, _score: f32) {
let val = self.ff_reader.as_ref().unwrap().get_val(doc);
self.vals.extend(val);
}
fn requires_scoring(&self) -> bool {
false
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use collector::{Collector, CountCollector};
use test::Bencher;
#[bench]
fn build_collector(b: &mut Bencher) {
b.iter(|| {
let mut count_collector = CountCollector::default();
let docs: Vec<u32> = (0..1_000_000).collect();
for doc in docs {
count_collector.collect(doc, 1f32);
}
count_collector.count()
});
}
}

View File

@@ -1,9 +1,98 @@
use super::Collector; use super::Collector;
use super::SegmentCollector;
use collector::Fruit;
use downcast::Downcast;
use std::marker::PhantomData;
use DocId; use DocId;
use Result; use Result;
use Score; use Score;
use SegmentLocalId; use SegmentLocalId;
use SegmentReader; use SegmentReader;
use TantivyError;
pub struct MultiFruit {
sub_fruits: Vec<Option<Box<Fruit>>>,
}
pub struct CollectorWrapper<TCollector: Collector>(TCollector);
impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
type Fruit = Box<Fruit>;
type SegmentFruit = Box<Fruit>;
type Child = Box<BoxableSegmentCollector>;
fn for_segment(
&self,
segment_local_id: u32,
reader: &SegmentReader,
) -> Result<Box<BoxableSegmentCollector>> {
let child = self.0.for_segment(segment_local_id, reader)?;
Ok(Box::new(SegmentCollectorWrapper(child)))
}
fn requires_scoring(&self) -> bool {
self.0.requires_scoring()
}
fn merge_fruits(&self, children: Vec<<Self as Collector>::Fruit>) -> Result<Box<Fruit>> {
let typed_fruit: Vec<TCollector::SegmentFruit> = children
.into_iter()
.map(|untyped_fruit| {
Downcast::<TCollector::SegmentFruit>::downcast(untyped_fruit)
.map(|boxed_but_typed| *boxed_but_typed)
.map_err(|e| {
let err_msg = format!("Failed to cast child collector fruit. {:?}", e);
TantivyError::InvalidArgument(err_msg)
})
})
.collect::<Result<_>>()?;
let merged_fruit = self.0.merge_fruits(typed_fruit)?;
Ok(Box::new(merged_fruit))
}
}
impl SegmentCollector for Box<BoxableSegmentCollector> {
type Fruit = Box<Fruit>;
fn collect(&mut self, doc: u32, score: f32) {
self.as_mut().collect(doc, score);
}
fn harvest(self) -> Box<Fruit> {
BoxableSegmentCollector::harvest_from_box(self)
}
}
pub trait BoxableSegmentCollector {
fn collect(&mut self, doc: u32, score: f32);
fn harvest_from_box(self: Box<Self>) -> Box<Fruit>;
}
pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegmentCollector);
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
for SegmentCollectorWrapper<TSegmentCollector>
{
fn collect(&mut self, doc: u32, score: f32) {
self.0.collect(doc, score);
}
fn harvest_from_box(self: Box<Self>) -> Box<Fruit> {
Box::new(self.0.harvest())
}
}
pub struct FruitHandle<TFruit: Fruit> {
pos: usize,
_phantom: PhantomData<TFruit>,
}
impl<TFruit: Fruit> FruitHandle<TFruit> {
pub fn extract(self, fruits: &mut MultiFruit) -> TFruit {
let boxed_fruit = fruits.sub_fruits[self.pos].take().expect("");
*Downcast::<TFruit>::downcast(boxed_fruit).expect("Failed")
}
}
/// Multicollector makes it possible to collect on more than one collector. /// Multicollector makes it possible to collect on more than one collector.
/// It should only be used for use cases where the Collector types is unknown /// It should only be used for use cases where the Collector types is unknown
@@ -13,14 +102,14 @@ use SegmentReader;
/// ```rust /// ```rust
/// #[macro_use] /// #[macro_use]
/// extern crate tantivy; /// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result}; /// use tantivy::{Index, Result};
/// use tantivy::collector::{CountCollector, TopCollector, MultiCollector}; /// use tantivy::collector::{Count, TopDocs, MultiCollector};
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
@@ -44,55 +133,117 @@ use SegmentReader;
/// index.load_searchers()?; /// index.load_searchers()?;
/// let searcher = index.searcher(); /// let searcher = index.searcher();
/// ///
/// { /// let mut collectors = MultiCollector::new();
/// let mut top_collector = TopCollector::with_limit(2); /// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
/// let mut count_collector = CountCollector::default(); /// let count_handle = collectors.add_collector(Count);
/// { /// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let mut collectors = /// let query = query_parser.parse_query("diary")?;
/// MultiCollector::from(vec![&mut top_collector, &mut count_collector]); /// let mut multi_fruit = searcher.search(&query, &collectors)?;
/// let query_parser = QueryParser::for_index(&index, vec![title]); ///
/// let query = query_parser.parse_query("diary")?; /// let count = count_handle.extract(&mut multi_fruit);
/// searcher.search(&*query, &mut collectors).unwrap(); /// let top_docs = top_docs_handle.extract(&mut multi_fruit);
/// } ///
/// assert_eq!(count_collector.count(), 2); /// # assert_eq!(count, 2);
/// assert!(top_collector.at_capacity()); /// # assert_eq!(top_docs.len(), 2);
/// }
/// ///
/// Ok(()) /// Ok(())
/// } /// }
/// ``` /// ```
#[allow(clippy::type_complexity)]
#[derive(Default)]
pub struct MultiCollector<'a> { pub struct MultiCollector<'a> {
collectors: Vec<&'a mut Collector>, collector_wrappers:
Vec<Box<Collector<Child = Box<BoxableSegmentCollector>, Fruit = Box<Fruit>, SegmentFruit = Box<Fruit>> + 'a>>,
} }
impl<'a> MultiCollector<'a> { impl<'a> MultiCollector<'a> {
/// Constructor /// Create a new `MultiCollector`
pub fn from(collectors: Vec<&'a mut Collector>) -> MultiCollector { pub fn new() -> Self {
MultiCollector { collectors } Default::default()
}
/// Add a new collector to our `MultiCollector`.
pub fn add_collector<'b: 'a, TCollector: Collector + 'b>(
&mut self,
collector: TCollector,
) -> FruitHandle<TCollector::Fruit> {
let pos = self.collector_wrappers.len();
self.collector_wrappers
.push(Box::new(CollectorWrapper(collector)));
FruitHandle {
pos,
_phantom: PhantomData,
}
} }
} }
impl<'a> Collector for MultiCollector<'a> { impl<'a> Collector for MultiCollector<'a> {
fn set_segment(
&mut self, type Fruit = MultiFruit;
type SegmentFruit = MultiFruit;
type Child = MultiCollectorChild;
fn for_segment(
&self,
segment_local_id: SegmentLocalId, segment_local_id: SegmentLocalId,
segment: &SegmentReader, segment: &SegmentReader,
) -> Result<()> { ) -> Result<MultiCollectorChild> {
for collector in &mut self.collectors { let children = self
collector.set_segment(segment_local_id, segment)?; .collector_wrappers
} .iter()
Ok(()) .map(|collector_wrapper| collector_wrapper.for_segment(segment_local_id, segment))
.collect::<Result<Vec<_>>>()?;
Ok(MultiCollectorChild { children })
} }
fn requires_scoring(&self) -> bool {
self.collector_wrappers.iter().any(|c| c.requires_scoring())
}
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> {
let mut segment_fruits_list: Vec<Vec<Box<Fruit>>> = (0..self.collector_wrappers.len())
.map(|_| Vec::with_capacity(segments_multifruits.len()))
.collect::<Vec<_>>();
for segment_multifruit in segments_multifruits {
for (idx, segment_fruit_opt) in segment_multifruit.sub_fruits.into_iter().enumerate() {
if let Some(segment_fruit) = segment_fruit_opt {
segment_fruits_list[idx].push(segment_fruit);
}
}
}
let sub_fruits = self
.collector_wrappers
.iter()
.zip(segment_fruits_list)
.map(|(child_collector, segment_fruits)| {
Ok(Some(child_collector.merge_fruits(segment_fruits)?))
})
.collect::<Result<_>>()?;
Ok(MultiFruit { sub_fruits })
}
}
pub struct MultiCollectorChild {
children: Vec<Box<BoxableSegmentCollector>>,
}
impl SegmentCollector for MultiCollectorChild {
type Fruit = MultiFruit;
fn collect(&mut self, doc: DocId, score: Score) { fn collect(&mut self, doc: DocId, score: Score) {
for collector in &mut self.collectors { for child in &mut self.children {
collector.collect(doc, score); child.collect(doc, score);
} }
} }
fn requires_scoring(&self) -> bool {
self.collectors fn harvest(self) -> MultiFruit {
.iter() MultiFruit {
.any(|collector| collector.requires_scoring()) sub_fruits: self
.children
.into_iter()
.map(|child| Some(child.harvest()))
.collect(),
}
} }
} }
@@ -100,20 +251,42 @@ impl<'a> Collector for MultiCollector<'a> {
mod tests { mod tests {
use super::*; use super::*;
use collector::{Collector, CountCollector, TopScoreCollector}; use collector::{Count, TopDocs};
use query::TermQuery;
use schema::IndexRecordOption;
use schema::{Schema, TEXT};
use Index;
use Term;
#[test] #[test]
fn test_multi_collector() { fn test_multi_collector() {
let mut top_collector = TopScoreCollector::with_limit(2); let mut schema_builder = Schema::builder();
let mut count_collector = CountCollector::default(); let text = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{ {
let mut collectors = let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
MultiCollector::from(vec![&mut top_collector, &mut count_collector]); index_writer.add_document(doc!(text=>"abc"));
collectors.collect(1, 0.2); index_writer.add_document(doc!(text=>"abc abc abc"));
collectors.collect(2, 0.1); index_writer.add_document(doc!(text=>"abc abc"));
collectors.collect(3, 0.5); index_writer.commit().unwrap();
index_writer.add_document(doc!(text=>""));
index_writer.add_document(doc!(text=>"abc abc abc abc"));
index_writer.add_document(doc!(text=>"abc"));
index_writer.commit().unwrap();
} }
assert_eq!(count_collector.count(), 3); index.load_searchers().unwrap();
assert!(top_collector.at_capacity()); let searcher = index.searcher();
let term = Term::from_field_text(text, "abc");
let query = TermQuery::new(term, IndexRecordOption::Basic);
let mut collectors = MultiCollector::new();
let topdocs_handler = collectors.add_collector(TopDocs::with_limit(2));
let count_handler = collectors.add_collector(Count);
let mut multifruits = searcher.search(&query, &mut collectors).unwrap();
assert_eq!(count_handler.extract(&mut multifruits), 5);
assert_eq!(topdocs_handler.extract(&mut multifruits).len(), 2);
} }
} }

205
src/collector/tests.rs Normal file
View File

@@ -0,0 +1,205 @@
use super::*;
use core::SegmentReader;
use fastfield::BytesFastFieldReader;
use fastfield::FastFieldReader;
use schema::Field;
use DocAddress;
use DocId;
use Score;
use SegmentLocalId;
/// Stores all of the doc ids.
/// This collector is only used for tests.
/// It is unusable in pr
///
/// actise, as it does not store
/// the segment ordinals
pub struct TestCollector;
pub struct TestSegmentCollector {
segment_id: SegmentLocalId,
fruit: TestFruit,
}
#[derive(Default)]
pub struct TestFruit {
docs: Vec<DocAddress>,
scores: Vec<Score>,
}
impl TestFruit {
/// Return the list of matching documents exhaustively.
pub fn docs(&self) -> &[DocAddress] {
&self.docs[..]
}
pub fn scores(&self) -> &[Score] {
&self.scores[..]
}
}
impl Collector for TestCollector {
type Fruit = TestFruit;
type SegmentFruit = Self::Fruit;
type Child = TestSegmentCollector;
fn for_segment(
&self,
segment_id: SegmentLocalId,
_reader: &SegmentReader,
) -> Result<TestSegmentCollector> {
Ok(TestSegmentCollector {
segment_id,
fruit: TestFruit::default(),
})
}
fn requires_scoring(&self) -> bool {
true
}
fn merge_fruits(&self, mut children: Vec<TestFruit>) -> Result<TestFruit> {
children.sort_by_key(|fruit| {
if fruit.docs().is_empty() {
0
} else {
fruit.docs()[0].segment_ord()
}
});
let mut docs = vec![];
let mut scores = vec![];
for child in children {
docs.extend(child.docs());
scores.extend(child.scores);
}
Ok(TestFruit { docs, scores })
}
}
impl SegmentCollector for TestSegmentCollector {
type Fruit = TestFruit;
fn collect(&mut self, doc: DocId, score: Score) {
self.fruit.docs.push(DocAddress(self.segment_id, doc));
self.fruit.scores.push(score);
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
self.fruit
}
}
/// Collects in order all of the fast fields for all of the
/// doc in the `DocSet`
///
/// This collector is mainly useful for tests.
pub struct FastFieldTestCollector {
field: Field,
}
pub struct FastFieldSegmentCollector {
vals: Vec<u64>,
reader: FastFieldReader<u64>,
}
impl FastFieldTestCollector {
pub fn for_field(field: Field) -> FastFieldTestCollector {
FastFieldTestCollector { field }
}
}
impl Collector for FastFieldTestCollector {
type Fruit = Vec<u64>;
type SegmentFruit = Self::Fruit;
type Child = FastFieldSegmentCollector;
fn for_segment(
&self,
_: SegmentLocalId,
reader: &SegmentReader,
) -> Result<FastFieldSegmentCollector> {
Ok(FastFieldSegmentCollector {
vals: Vec::new(),
reader: reader.fast_field_reader(self.field)?,
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(&self, children: Vec<Vec<u64>>) -> Result<Vec<u64>> {
Ok(children.into_iter().flat_map(|v| v.into_iter()).collect())
}
}
impl SegmentCollector for FastFieldSegmentCollector {
type Fruit = Vec<u64>;
fn collect(&mut self, doc: DocId, _score: Score) {
let val = self.reader.get(doc);
self.vals.push(val);
}
fn harvest(self) -> Vec<u64> {
self.vals
}
}
/// Collects in order all of the fast field bytes for all of the
/// docs in the `DocSet`
///
/// This collector is mainly useful for tests.
pub struct BytesFastFieldTestCollector {
field: Field,
}
pub struct BytesFastFieldSegmentCollector {
vals: Vec<u8>,
reader: BytesFastFieldReader,
}
impl BytesFastFieldTestCollector {
pub fn for_field(field: Field) -> BytesFastFieldTestCollector {
BytesFastFieldTestCollector { field }
}
}
impl Collector for BytesFastFieldTestCollector {
type Fruit = Vec<u8>;
type SegmentFruit = Self::Fruit;
type Child = BytesFastFieldSegmentCollector;
fn for_segment(
&self,
_segment_local_id: u32,
segment: &SegmentReader,
) -> Result<BytesFastFieldSegmentCollector> {
Ok(BytesFastFieldSegmentCollector {
vals: Vec::new(),
reader: segment.bytes_fast_field_reader(self.field)?,
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(&self, children: Vec<Vec<u8>>) -> Result<Vec<u8>> {
Ok(children.into_iter().flat_map(|c| c.into_iter()).collect())
}
}
impl SegmentCollector for BytesFastFieldSegmentCollector {
type Fruit = Vec<u8>;
fn collect(&mut self, doc: u32, _score: f32) {
let data = self.reader.get_val(doc);
self.vals.extend(data);
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
self.vals
}
}

View File

@@ -1,56 +1,59 @@
use serde::export::PhantomData;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::BinaryHeap; use std::collections::BinaryHeap;
use DocAddress; use DocAddress;
use DocId; use DocId;
use Result;
use SegmentLocalId; use SegmentLocalId;
use SegmentReader;
/// Contains a feature (field, score, etc.) of a document along with the document address. /// Contains a feature (field, score, etc.) of a document along with the document address.
/// ///
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the /// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
/// default Rust heap is a max heap, whereas a min heap is needed. /// default Rust heap is a max heap, whereas a min heap is needed.
#[derive(Clone, Copy)] ///
pub struct ComparableDoc<T> { /// WARNING: equality is not what you would expect here.
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
/// is equal. This should be perfectly fine for this usage, but let's make sure this
/// struct is never public.
struct ComparableDoc<T, D> {
feature: T, feature: T,
doc_address: DocAddress, doc: D,
} }
impl<T: PartialOrd> PartialOrd for ComparableDoc<T> { impl<T: PartialOrd, D> PartialOrd for ComparableDoc<T, D> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other)) Some(self.cmp(other))
} }
} }
impl<T: PartialOrd> Ord for ComparableDoc<T> { impl<T: PartialOrd, D> Ord for ComparableDoc<T, D> {
#[inline] #[inline]
fn cmp(&self, other: &Self) -> Ordering { fn cmp(&self, other: &Self) -> Ordering {
other other
.feature .feature
.partial_cmp(&self.feature) .partial_cmp(&self.feature)
.unwrap_or_else(|| other.doc_address.cmp(&self.doc_address)) .unwrap_or_else(|| Ordering::Equal)
} }
} }
impl<T: PartialOrd> PartialEq for ComparableDoc<T> { impl<T: PartialOrd, D> PartialEq for ComparableDoc<T, D> {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal self.cmp(other) == Ordering::Equal
} }
} }
impl<T: PartialOrd> Eq for ComparableDoc<T> {} impl<T: PartialOrd, D> Eq for ComparableDoc<T, D> {}
/// The Top Collector keeps track of the K documents pub(crate) struct TopCollector<T> {
/// sorted by type `T`.
///
/// The implementation is based on a `BinaryHeap`.
/// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`.
pub struct TopCollector<T> {
limit: usize, limit: usize,
heap: BinaryHeap<ComparableDoc<T>>, _marker: PhantomData<T>,
segment_id: u32,
} }
impl<T: PartialOrd + Clone> TopCollector<T> { impl<T> TopCollector<T>
where
T: PartialOrd + Clone,
{
/// Creates a top collector, with a number of documents equal to "limit". /// Creates a top collector, with a number of documents equal to "limit".
/// ///
/// # Panics /// # Panics
@@ -61,127 +64,156 @@ impl<T: PartialOrd + Clone> TopCollector<T> {
} }
TopCollector { TopCollector {
limit, limit,
heap: BinaryHeap::with_capacity(limit), _marker: PhantomData,
segment_id: 0,
} }
} }
/// Returns K best documents sorted in decreasing order. pub fn limit(&self) -> usize {
/// self.limit
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn docs(&self) -> Vec<DocAddress> {
self.top_docs()
.into_iter()
.map(|(_feature, doc)| doc)
.collect()
} }
/// Returns K best FeatureDocuments sorted in decreasing order. pub fn merge_fruits(
/// &self,
/// Calling this method triggers the sort. children: Vec<Vec<(T, DocAddress)>>,
/// The result of the sort is not cached. ) -> Result<Vec<(T, DocAddress)>> {
pub fn top_docs(&self) -> Vec<(T, DocAddress)> { if self.limit == 0 {
let mut feature_docs: Vec<ComparableDoc<T>> = self.heap.iter().cloned().collect(); return Ok(Vec::new());
feature_docs.sort(); }
feature_docs let mut top_collector = BinaryHeap::new();
for child_fruit in children {
for (feature, doc) in child_fruit {
if top_collector.len() < self.limit {
top_collector.push(ComparableDoc { feature, doc });
} else if let Some(mut head) = top_collector.peek_mut() {
if head.feature < feature {
*head = ComparableDoc { feature, doc };
}
}
}
}
Ok(top_collector
.into_sorted_vec()
.into_iter() .into_iter()
.map( .map(|cdoc| (cdoc.feature, cdoc.doc))
|ComparableDoc { .collect())
feature, }
doc_address,
}| (feature, doc_address), pub(crate) fn for_segment(
).collect() &self,
segment_id: SegmentLocalId,
_: &SegmentReader,
) -> Result<TopSegmentCollector<T>> {
Ok(TopSegmentCollector::new(segment_id, self.limit))
}
}
/// The Top Collector keeps track of the K documents
/// sorted by type `T`.
///
/// The implementation is based on a `BinaryHeap`.
/// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`.
pub(crate) struct TopSegmentCollector<T> {
limit: usize,
heap: BinaryHeap<ComparableDoc<T, DocId>>,
segment_id: u32,
}
impl<T: PartialOrd> TopSegmentCollector<T> {
fn new(segment_id: SegmentLocalId, limit: usize) -> TopSegmentCollector<T> {
TopSegmentCollector {
limit,
heap: BinaryHeap::with_capacity(limit),
segment_id,
}
}
}
impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
pub fn harvest(self) -> Vec<(T, DocAddress)> {
let segment_id = self.segment_id;
self.heap
.into_sorted_vec()
.into_iter()
.map(|comparable_doc| {
(
comparable_doc.feature,
DocAddress(segment_id, comparable_doc.doc),
)
})
.collect()
} }
/// Return true iff at least K documents have gone through /// Return true iff at least K documents have gone through
/// the collector. /// the collector.
#[inline] #[inline(always)]
pub fn at_capacity(&self) -> bool { pub(crate) fn at_capacity(&self) -> bool {
self.heap.len() >= self.limit self.heap.len() >= self.limit
} }
/// Sets the segment local ID for the collector
pub fn set_segment_id(&mut self, segment_id: SegmentLocalId) {
self.segment_id = segment_id;
}
/// Collects a document scored by the given feature /// Collects a document scored by the given feature
/// ///
/// It collects documents until it has reached the max capacity. Once it reaches capacity, it /// It collects documents until it has reached the max capacity. Once it reaches capacity, it
/// will compare the lowest scoring item with the given one and keep whichever is greater. /// will compare the lowest scoring item with the given one and keep whichever is greater.
#[inline(always)]
pub fn collect(&mut self, doc: DocId, feature: T) { pub fn collect(&mut self, doc: DocId, feature: T) {
if self.at_capacity() { if self.at_capacity() {
// It's ok to unwrap as long as a limit of 0 is forbidden. // It's ok to unwrap as long as a limit of 0 is forbidden.
let limit_doc: ComparableDoc<T> = self if let Some(limit_feature) = self.heap.peek().map(|head| head.feature.clone()) {
.heap if limit_feature < feature {
.peek() if let Some(mut head) = self.heap.peek_mut() {
.expect("Top collector with size 0 is forbidden") head.feature = feature;
.clone(); head.doc = doc;
if limit_doc.feature < feature { }
let mut mut_head = self }
.heap
.peek_mut()
.expect("Top collector with size 0 is forbidden");
mut_head.feature = feature;
mut_head.doc_address = DocAddress(self.segment_id, doc);
} }
} else { } else {
let wrapped_doc = ComparableDoc { // we have not reached capacity yet, so we can just push the
feature, // element.
doc_address: DocAddress(self.segment_id, doc), self.heap.push(ComparableDoc { feature, doc });
};
self.heap.push(wrapped_doc);
} }
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::{TopCollector, TopSegmentCollector};
use DocId; use DocAddress;
use Score; use Score;
#[test] #[test]
fn test_top_collector_not_at_capacity() { fn test_top_collector_not_at_capacity() {
let mut top_collector = TopCollector::with_limit(4); let mut top_collector = TopSegmentCollector::new(0, 4);
top_collector.collect(1, 0.8); top_collector.collect(1, 0.8);
top_collector.collect(3, 0.2); top_collector.collect(3, 0.2);
top_collector.collect(5, 0.3); top_collector.collect(5, 0.3);
assert!(!top_collector.at_capacity()); assert_eq!(
let score_docs: Vec<(Score, DocId)> = top_collector top_collector.harvest(),
.top_docs() vec![
.into_iter() (0.8, DocAddress(0, 1)),
.map(|(score, doc_address)| (score, doc_address.doc())) (0.3, DocAddress(0, 5)),
.collect(); (0.2, DocAddress(0, 3))
assert_eq!(score_docs, vec![(0.8, 1), (0.3, 5), (0.2, 3)]); ]
);
} }
#[test] #[test]
fn test_top_collector_at_capacity() { fn test_top_collector_at_capacity() {
let mut top_collector = TopCollector::with_limit(4); let mut top_collector = TopSegmentCollector::new(0, 4);
top_collector.collect(1, 0.8); top_collector.collect(1, 0.8);
top_collector.collect(3, 0.2); top_collector.collect(3, 0.2);
top_collector.collect(5, 0.3); top_collector.collect(5, 0.3);
top_collector.collect(7, 0.9); top_collector.collect(7, 0.9);
top_collector.collect(9, -0.2); top_collector.collect(9, -0.2);
assert!(top_collector.at_capacity()); assert_eq!(
{ top_collector.harvest(),
let score_docs: Vec<(Score, DocId)> = top_collector vec![
.top_docs() (0.9, DocAddress(0, 7)),
.into_iter() (0.8, DocAddress(0, 1)),
.map(|(score, doc_address)| (score, doc_address.doc())) (0.3, DocAddress(0, 5)),
.collect(); (0.2, DocAddress(0, 3))
assert_eq!(score_docs, vec![(0.9, 7), (0.8, 1), (0.3, 5), (0.2, 3)]); ]
} );
{
let docs: Vec<DocId> = top_collector
.docs()
.into_iter()
.map(|doc_address| doc_address.doc())
.collect();
assert_eq!(docs, vec![7, 1, 5, 3]);
}
} }
#[test] #[test]
@@ -189,5 +221,4 @@ mod tests {
fn test_top_0() { fn test_top_0() {
let _collector: TopCollector<Score> = TopCollector::with_limit(0); let _collector: TopCollector<Score> = TopCollector::with_limit(0);
} }
} }

View File

@@ -1,12 +1,13 @@
use super::Collector; use super::Collector;
use collector::top_collector::TopCollector; use collector::top_collector::TopCollector;
use collector::top_collector::TopSegmentCollector;
use collector::SegmentCollector;
use fastfield::FastFieldReader; use fastfield::FastFieldReader;
use fastfield::FastValue; use fastfield::FastValue;
use schema::Field; use schema::Field;
use DocAddress; use DocAddress;
use DocId;
use Result; use Result;
use Score; use SegmentLocalId;
use SegmentReader; use SegmentReader;
/// The Top Field Collector keeps track of the K documents /// The Top Field Collector keeps track of the K documents
@@ -19,67 +20,57 @@ use SegmentReader;
/// ```rust /// ```rust
/// #[macro_use] /// #[macro_use]
/// extern crate tantivy; /// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT, FAST}; /// # use tantivy::schema::{Schema, Field, FAST, TEXT};
/// use tantivy::{Index, Result, DocId}; /// # use tantivy::{Index, Result, DocAddress};
/// use tantivy::collector::TopFieldCollector; /// # use tantivy::query::{Query, QueryParser};
/// use tantivy::query::QueryParser; /// use tantivy::collector::TopDocs;
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() {
/// fn example() -> Result<()> { /// # let mut schema_builder = Schema::builder();
/// let mut schema_builder = SchemaBuilder::new(); /// # let title = schema_builder.add_text_field("title", TEXT);
/// let title = schema_builder.add_text_field("title", TEXT); /// # let rating = schema_builder.add_u64_field("rating", FAST);
/// let rating = schema_builder.add_u64_field("rating", FAST); /// # let schema = schema_builder.build();
/// let schema = schema_builder.build(); /// # let index = Index::create_in_ram(schema);
/// let index = Index::create_in_ram(schema); /// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
/// { /// # index_writer.add_document(doc!(
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?; /// # title => "The Name of the Wind",
/// index_writer.add_document(doc!( /// # rating => 92u64,
/// title => "The Name of the Wind", /// # ));
/// rating => 92u64, /// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// )); /// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// index_writer.add_document(doc!( /// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// title => "The Diary of Muadib", /// # index_writer.commit().unwrap();
/// rating => 97u64, /// # index.load_searchers().unwrap();
/// )); /// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary").unwrap();
/// index_writer.add_document(doc!( /// # let top_docs = docs_sorted_by_rating(&index, &query, rating).unwrap();
/// title => "A Dairy Cow", /// # assert_eq!(top_docs,
/// rating => 63u64, /// # vec![(97u64, DocAddress(0u32, 1)),
/// )); /// # (80u64, DocAddress(0u32, 3))]);
/// index_writer.add_document(doc!( /// # }
/// title => "The Diary of a Young Girl", /// #
/// rating => 80u64, /// /// Searches the document matching the given query, and
/// )); /// /// collects the top 10 documents, order by the `field`
/// index_writer.commit().unwrap(); /// /// given in argument.
/// } /// ///
/// /// `field` is required to be a FAST field.
/// fn docs_sorted_by_rating(index: &Index, query: &Query, sort_by_field: Field)
/// -> Result<Vec<(u64, DocAddress)>> {
/// ///
/// index.load_searchers()?; /// // This is where we build our collector!
/// let searcher = index.searcher(); /// let top_docs_by_rating = TopDocs::with_limit(2).order_by_field(sort_by_field);
/// ///
/// { /// // ... and here is our documents. Not this is a simple vec.
/// let mut top_collector = TopFieldCollector::with_limit(rating, 2); /// // The `u64` in the pair is the value of our fast field for each documents.
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// index.searcher()
/// let query = query_parser.parse_query("diary")?; /// .search(query, &top_docs_by_rating)
/// searcher.search(&*query, &mut top_collector).unwrap();
///
/// let score_docs: Vec<(u64, DocId)> = top_collector
/// .top_docs()
/// .into_iter()
/// .map(|(field, doc_address)| (field, doc_address.doc()))
/// .collect();
///
/// assert_eq!(score_docs, vec![(97u64, 1), (80, 3)]);
/// }
///
/// Ok(())
/// } /// }
/// ``` /// ```
pub struct TopFieldCollector<T: FastValue> { pub struct TopDocsByField<T> {
field: Field,
collector: TopCollector<T>, collector: TopCollector<T>,
fast_field: Option<FastFieldReader<T>>, field: Field,
} }
impl<T: FastValue + PartialOrd + Clone> TopFieldCollector<T> { impl<T: FastValue + PartialOrd + Clone> TopDocsByField<T> {
/// Creates a top field collector, with a number of documents equal to "limit". /// Creates a top field collector, with a number of documents equal to "limit".
/// ///
/// The given field name must be a fast field, otherwise the collector have an error while /// The given field name must be a fast field, otherwise the collector have an error while
@@ -87,68 +78,73 @@ impl<T: FastValue + PartialOrd + Clone> TopFieldCollector<T> {
/// ///
/// # Panics /// # Panics
/// The method panics if limit is 0 /// The method panics if limit is 0
pub fn with_limit(field: Field, limit: usize) -> Self { pub(crate) fn new(field: Field, limit: usize) -> TopDocsByField<T> {
TopFieldCollector { TopDocsByField {
field,
collector: TopCollector::with_limit(limit), collector: TopCollector::with_limit(limit),
fast_field: None, field,
} }
} }
/// Returns K best documents sorted the given field name in decreasing order.
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn docs(&self) -> Vec<DocAddress> {
self.collector.docs()
}
/// Returns K best FieldDocuments sorted in decreasing order.
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn top_docs(&self) -> Vec<(T, DocAddress)> {
self.collector.top_docs()
}
/// Return true iff at least K documents have gone through
/// the collector.
#[inline]
pub fn at_capacity(&self) -> bool {
self.collector.at_capacity()
}
} }
impl<T: FastValue + PartialOrd + Clone> Collector for TopFieldCollector<T> { impl<T: FastValue + PartialOrd + Send + Sync + 'static> Collector for TopDocsByField<T> {
fn set_segment(&mut self, segment_id: u32, segment: &SegmentReader) -> Result<()> { type Fruit = Vec<(T, DocAddress)>;
self.collector.set_segment_id(segment_id); type SegmentFruit = Vec<(T, DocAddress)>;
self.fast_field = Some(segment.fast_field_reader(self.field)?);
Ok(())
}
fn collect(&mut self, doc: DocId, _score: Score) { type Child = TopFieldSegmentCollector<T>;
let field_value = self
.fast_field fn for_segment(
.as_ref() &self,
.expect("collect() was called before set_segment. This should never happen.") segment_local_id: SegmentLocalId,
.get(doc); reader: &SegmentReader,
self.collector.collect(doc, field_value); ) -> Result<TopFieldSegmentCollector<T>> {
let collector = self.collector.for_segment(segment_local_id, reader)?;
let reader = reader.fast_field_reader(self.field)?;
Ok(TopFieldSegmentCollector { collector, reader })
} }
fn requires_scoring(&self) -> bool { fn requires_scoring(&self) -> bool {
false false
} }
fn merge_fruits(
&self,
segment_fruits: Vec<Vec<(T, DocAddress)>>,
) -> Result<Vec<(T, DocAddress)>> {
self.collector.merge_fruits(segment_fruits)
}
}
pub struct TopFieldSegmentCollector<T: FastValue + PartialOrd> {
collector: TopSegmentCollector<T>,
reader: FastFieldReader<T>,
}
impl<T: FastValue + PartialOrd + Send + Sync + 'static> SegmentCollector
for TopFieldSegmentCollector<T>
{
type Fruit = Vec<(T, DocAddress)>;
fn collect(&mut self, doc: u32, _score: f32) {
let field_value = self.reader.get(doc);
self.collector.collect(doc, field_value);
}
fn harvest(self) -> Vec<(T, DocAddress)> {
self.collector.harvest()
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::TopDocsByField;
use collector::Collector;
use collector::TopDocs;
use query::Query; use query::Query;
use query::QueryParser; use query::QueryParser;
use schema::Field; use schema::Field;
use schema::IntOptions; use schema::IntOptions;
use schema::Schema; use schema::{Schema, FAST, TEXT};
use schema::{SchemaBuilder, FAST, TEXT}; use DocAddress;
use Index; use Index;
use IndexWriter; use IndexWriter;
use TantivyError; use TantivyError;
@@ -158,7 +154,7 @@ mod tests {
#[test] #[test]
fn test_top_collector_not_at_capacity() { fn test_top_collector_not_at_capacity() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT); let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST); let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -178,22 +174,22 @@ mod tests {
}); });
let searcher = index.searcher(); let searcher = index.searcher();
let mut top_collector = TopFieldCollector::with_limit(size, 4); let top_collector = TopDocs::with_limit(4).order_by_field(size);
searcher.search(&*query, &mut top_collector).unwrap(); let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
assert!(!top_collector.at_capacity()); assert_eq!(
top_docs,
let score_docs: Vec<(u64, DocId)> = top_collector vec![
.top_docs() (64, DocAddress(0, 1)),
.into_iter() (16, DocAddress(0, 2)),
.map(|(field, doc_address)| (field, doc_address.doc())) (12, DocAddress(0, 0))
.collect(); ]
assert_eq!(score_docs, vec![(64, 1), (16, 2), (12, 0)]); );
} }
#[test] #[test]
#[should_panic] #[should_panic]
fn test_field_does_not_exist() { fn test_field_does_not_exist() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT); let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST); let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -204,14 +200,16 @@ mod tests {
)); ));
}); });
let searcher = index.searcher(); let searcher = index.searcher();
let segment = searcher.segment_reader(0); let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(Field(2));
let mut top_collector: TopFieldCollector<u64> = TopFieldCollector::with_limit(Field(2), 4); let segment_reader = searcher.segment_reader(0u32);
let _ = top_collector.set_segment(0, segment); top_collector
.for_segment(0, segment_reader)
.expect("should panic");
} }
#[test] #[test]
fn test_field_not_fast_field() { fn test_field_not_fast_field() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT); let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, IntOptions::default()); let size = schema_builder.add_u64_field(SIZE, IntOptions::default());
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -223,26 +221,16 @@ mod tests {
}); });
let searcher = index.searcher(); let searcher = index.searcher();
let segment = searcher.segment_reader(0); let segment = searcher.segment_reader(0);
let mut top_collector: TopFieldCollector<u64> = TopFieldCollector::with_limit(size, 4); let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(size);
assert_matches!( assert_matches!(
top_collector.set_segment(0, segment), top_collector
Err(TantivyError::FastFieldError(_)) .for_segment(0, segment)
.map(|_| ())
.unwrap_err(),
TantivyError::FastFieldError(_)
); );
} }
#[test]
#[should_panic]
fn test_collect_before_set_segment() {
let mut top_collector: TopFieldCollector<u64> = TopFieldCollector::with_limit(Field(0), 4);
top_collector.collect(0, 0f32);
}
#[test]
#[should_panic]
fn test_top_0() {
let _: TopFieldCollector<u64> = TopFieldCollector::with_limit(Field(0), 0);
}
fn index( fn index(
query: &str, query: &str,
query_field: Field, query_field: Field,

View File

@@ -1,5 +1,10 @@
use super::Collector; use super::Collector;
use collector::top_collector::TopCollector; use collector::top_collector::TopCollector;
use collector::top_collector::TopSegmentCollector;
use collector::SegmentCollector;
use collector::TopDocsByField;
use fastfield::FastValue;
use schema::Field;
use DocAddress; use DocAddress;
use DocId; use DocId;
use Result; use Result;
@@ -17,14 +22,15 @@ use SegmentReader;
/// ```rust /// ```rust
/// #[macro_use] /// #[macro_use]
/// extern crate tantivy; /// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT}; /// use tantivy::DocAddress;
/// use tantivy::{Index, Result, DocId, Score}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::collector::TopScoreCollector; /// use tantivy::{Index, Result};
/// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
@@ -48,140 +54,148 @@ use SegmentReader;
/// index.load_searchers()?; /// index.load_searchers()?;
/// let searcher = index.searcher(); /// let searcher = index.searcher();
/// ///
/// { /// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let mut top_collector = TopScoreCollector::with_limit(2); /// let query = query_parser.parse_query("diary")?;
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// let top_docs = searcher.search(&query, &TopDocs::with_limit(2))?;
/// let query = query_parser.parse_query("diary")?;
/// searcher.search(&*query, &mut top_collector).unwrap();
/// ///
/// let score_docs: Vec<(Score, DocId)> = top_collector /// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
/// .top_docs() /// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
/// .into_iter()
/// .map(|(score, doc_address)| (score, doc_address.doc()))
/// .collect();
///
/// assert_eq!(score_docs, vec![(0.7261542, 1), (0.6099695, 3)]);
/// }
/// ///
/// Ok(()) /// Ok(())
/// } /// }
/// ``` /// ```
pub struct TopScoreCollector { pub struct TopDocs(TopCollector<Score>);
collector: TopCollector<Score>,
}
impl TopScoreCollector { impl TopDocs {
/// Creates a top score collector, with a number of documents equal to "limit". /// Creates a top score collector, with a number of documents equal to "limit".
/// ///
/// # Panics /// # Panics
/// The method panics if limit is 0 /// The method panics if limit is 0
pub fn with_limit(limit: usize) -> TopScoreCollector { pub fn with_limit(limit: usize) -> TopDocs {
TopScoreCollector { TopDocs(TopCollector::with_limit(limit))
collector: TopCollector::with_limit(limit),
}
} }
/// Returns K best scored documents sorted in decreasing order. /// Set top-K to rank documents by a given fast field.
/// ///
/// Calling this method triggers the sort. /// (By default, `TopDocs` collects the top-K documents sorted by
/// The result of the sort is not cached. /// the similarity score.)
pub fn docs(&self) -> Vec<DocAddress> { pub fn order_by_field<T: PartialOrd + FastValue + Clone>(
self.collector.docs() self,
} field: Field,
) -> TopDocsByField<T> {
/// Returns K best ScoredDocuments sorted in decreasing order. TopDocsByField::new(field, self.0.limit())
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn top_docs(&self) -> Vec<(Score, DocAddress)> {
self.collector.top_docs()
}
/// Returns K best ScoredDocuments sorted in decreasing order.
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
#[deprecated]
pub fn score_docs(&self) -> Vec<(Score, DocAddress)> {
self.collector.top_docs()
}
/// Return true iff at least K documents have gone through
/// the collector.
#[inline]
pub fn at_capacity(&self) -> bool {
self.collector.at_capacity()
} }
} }
impl Collector for TopScoreCollector { impl Collector for TopDocs {
fn set_segment(&mut self, segment_id: SegmentLocalId, _: &SegmentReader) -> Result<()> { type Fruit = Vec<(Score, DocAddress)>;
self.collector.set_segment_id(segment_id); type SegmentFruit = Vec<(Score, DocAddress)>;
Ok(())
}
fn collect(&mut self, doc: DocId, score: Score) { type Child = TopScoreSegmentCollector;
self.collector.collect(doc, score);
fn for_segment(
&self,
segment_local_id: SegmentLocalId,
reader: &SegmentReader,
) -> Result<Self::Child> {
let collector = self.0.for_segment(segment_local_id, reader)?;
Ok(TopScoreSegmentCollector(collector))
} }
fn requires_scoring(&self) -> bool { fn requires_scoring(&self) -> bool {
true true
} }
fn merge_fruits(&self, child_fruits: Vec<Vec<(Score, DocAddress)>>) -> Result<Self::Fruit> {
self.0.merge_fruits(child_fruits)
}
}
/// Segment Collector associated to `TopDocs`.
pub struct TopScoreSegmentCollector(TopSegmentCollector<Score>);
impl SegmentCollector for TopScoreSegmentCollector {
type Fruit = Vec<(Score, DocAddress)>;
fn collect(&mut self, doc: DocId, score: Score) {
self.0.collect(doc, score)
}
fn harvest(self) -> Vec<(Score, DocAddress)> {
self.0.harvest()
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::TopDocs;
use collector::Collector; use query::QueryParser;
use DocId; use schema::Schema;
use schema::TEXT;
use DocAddress;
use Index;
use Score; use Score;
fn make_index() -> Index {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
index_writer.add_document(doc!(text_field=>"I like Droopy"));
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
index
}
#[test] #[test]
fn test_top_collector_not_at_capacity() { fn test_top_collector_not_at_capacity() {
let mut top_collector = TopScoreCollector::with_limit(4); let index = make_index();
top_collector.collect(1, 0.8); let field = index.schema().get_field("text").unwrap();
top_collector.collect(3, 0.2); let query_parser = QueryParser::for_index(&index, vec![field]);
top_collector.collect(5, 0.3); let text_query = query_parser.parse_query("droopy tax").unwrap();
assert!(!top_collector.at_capacity()); let score_docs: Vec<(Score, DocAddress)> = index
let score_docs: Vec<(Score, DocId)> = top_collector .searcher()
.top_docs() .search(&text_query, &TopDocs::with_limit(4))
.into_iter() .unwrap();
.map(|(score, doc_address)| (score, doc_address.doc())) assert_eq!(
.collect(); score_docs,
assert_eq!(score_docs, vec![(0.8, 1), (0.3, 5), (0.2, 3)]); vec![
(0.81221175, DocAddress(0u32, 1)),
(0.5376842, DocAddress(0u32, 2)),
(0.48527452, DocAddress(0, 0))
]
);
} }
#[test] #[test]
fn test_top_collector_at_capacity() { fn test_top_collector_at_capacity() {
let mut top_collector = TopScoreCollector::with_limit(4); let index = make_index();
top_collector.collect(1, 0.8); let field = index.schema().get_field("text").unwrap();
top_collector.collect(3, 0.2); let query_parser = QueryParser::for_index(&index, vec![field]);
top_collector.collect(5, 0.3); let text_query = query_parser.parse_query("droopy tax").unwrap();
top_collector.collect(7, 0.9); let score_docs: Vec<(Score, DocAddress)> = index
top_collector.collect(9, -0.2); .searcher()
assert!(top_collector.at_capacity()); .search(&text_query, &TopDocs::with_limit(2))
{ .unwrap();
let score_docs: Vec<(Score, DocId)> = top_collector assert_eq!(
.top_docs() score_docs,
.into_iter() vec![
.map(|(score, doc_address)| (score, doc_address.doc())) (0.81221175, DocAddress(0u32, 1)),
.collect(); (0.5376842, DocAddress(0u32, 2)),
assert_eq!(score_docs, vec![(0.9, 7), (0.8, 1), (0.3, 5), (0.2, 3)]); ]
} );
{
let docs: Vec<DocId> = top_collector
.docs()
.into_iter()
.map(|doc_address| doc_address.doc())
.collect();
assert_eq!(docs, vec![7, 1, 5, 3]);
}
} }
#[test] #[test]
#[should_panic] #[should_panic]
fn test_top_0() { fn test_top_0() {
TopScoreCollector::with_limit(0); TopDocs::with_limit(0);
} }
} }

View File

@@ -4,8 +4,8 @@ use common::VInt;
use directory::ReadOnlySource; use directory::ReadOnlySource;
use directory::WritePtr; use directory::WritePtr;
use schema::Field; use schema::Field;
use space_usage::PerFieldSpaceUsage;
use space_usage::FieldUsage; use space_usage::FieldUsage;
use space_usage::PerFieldSpaceUsage;
use std::collections::HashMap; use std::collections::HashMap;
use std::io::Write; use std::io::Write;
use std::io::{self, Read}; use std::io::{self, Read};
@@ -172,7 +172,8 @@ impl CompositeFile {
pub fn space_usage(&self) -> PerFieldSpaceUsage { pub fn space_usage(&self) -> PerFieldSpaceUsage {
let mut fields = HashMap::new(); let mut fields = HashMap::new();
for (&field_addr, &(start, end)) in self.offsets_index.iter() { for (&field_addr, &(start, end)) in self.offsets_index.iter() {
fields.entry(field_addr.field) fields
.entry(field_addr.field)
.or_insert_with(|| FieldUsage::empty(field_addr.field)) .or_insert_with(|| FieldUsage::empty(field_addr.field))
.add_field_idx(field_addr.idx, end - start); .add_field_idx(field_addr.idx, end - start);
} }

136
src/core/executor.rs Normal file
View File

@@ -0,0 +1,136 @@
use crossbeam::channel;
use scoped_pool::{Pool, ThreadConfig};
use Result;
/// Search executor whether search request are single thread or multithread.
///
/// We don't expose Rayon thread pool directly here for several reasons.
///
/// First dependency hell. It is not a good idea to expose the
/// API of a dependency, knowing it might conflict with a different version
/// used by the client. Second, we may stop using rayon in the future.
pub enum Executor {
SingleThread,
ThreadPool(Pool),
}
impl Executor {
/// Creates an Executor that performs all task in the caller thread.
pub fn single_thread() -> Executor {
Executor::SingleThread
}
// Creates an Executor that dispatches the tasks in a thread pool.
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Executor {
let thread_config = ThreadConfig::new().prefix(prefix);
let pool = Pool::with_thread_config(num_threads, thread_config);
Executor::ThreadPool(pool)
}
// Perform a map in the thread pool.
//
// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task
// will propagate to the caller.
pub fn map<
A: Send,
R: Send,
AIterator: Iterator<Item = A>,
F: Sized + Sync + Fn(A) -> Result<R>,
>(
&self,
f: F,
args: AIterator,
) -> Result<Vec<R>> {
match self {
Executor::SingleThread => args.map(f).collect::<Result<_>>(),
Executor::ThreadPool(pool) => {
let args_with_indices: Vec<(usize, A)> = args.enumerate().collect();
let num_fruits = args_with_indices.len();
let fruit_receiver = {
let (fruit_sender, fruit_receiver) = channel::unbounded();
pool.scoped(|scope| {
for arg_with_idx in args_with_indices {
scope.execute(|| {
let (idx, arg) = arg_with_idx;
let fruit = f(arg);
if let Err(err) = fruit_sender.send((idx, fruit)) {
error!("Failed to send search task. It probably means all search threads have panicked. {:?}", err);
}
});
}
});
fruit_receiver
// This ends the scope of fruit_sender.
// This is important as it makes it possible for the fruit_receiver iteration to
// terminate.
};
let mut results = Vec::with_capacity(num_fruits);
unsafe { results.set_len(num_fruits) };
let mut num_items = 0;
for (pos, fruit_res) in fruit_receiver {
results[pos] = fruit_res?;
num_items += 1;
}
// this checks ensures that we filled of this
// uninitialized memory.
assert_eq!(num_items, results.len());
Ok(results)
}
}
}
}
#[cfg(test)]
mod tests {
use super::Executor;
#[test]
#[should_panic(expected = "panic should propagate")]
fn test_panic_propagates_single_thread() {
let _result: Vec<usize> = Executor::single_thread()
.map(
|_| {
panic!("panic should propagate");
},
vec![0].into_iter(),
)
.unwrap();
}
#[test]
#[should_panic] //< unfortunately the panic message is not propagated
fn test_panic_propagates_multi_thread() {
let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
.map(
|_| {
panic!("panic should propagate");
},
vec![0].into_iter(),
)
.unwrap();
}
#[test]
fn test_map_singlethread() {
let result: Vec<usize> = Executor::single_thread()
.map(|i| Ok(i * 2), 0..1_000)
.unwrap();
assert_eq!(result.len(), 1_000);
for i in 0..1_000 {
assert_eq!(result[i], i * 2);
}
}
}
#[test]
fn test_map_multithread() {
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
.map(|i| Ok(i * 2), 0..10)
.unwrap();
assert_eq!(result.len(), 10);
for i in 0..10 {
assert_eq!(result[i], i * 2);
}
}

View File

@@ -3,6 +3,7 @@ use super::pool::Pool;
use super::segment::create_segment; use super::segment::create_segment;
use super::segment::Segment; use super::segment::Segment;
use core::searcher::Searcher; use core::searcher::Searcher;
use core::Executor;
use core::IndexMeta; use core::IndexMeta;
use core::SegmentId; use core::SegmentId;
use core::SegmentMeta; use core::SegmentMeta;
@@ -45,6 +46,7 @@ pub struct Index {
schema: Schema, schema: Schema,
num_searchers: Arc<AtomicUsize>, num_searchers: Arc<AtomicUsize>,
searcher_pool: Arc<Pool<Searcher>>, searcher_pool: Arc<Pool<Searcher>>,
executor: Arc<Executor>,
tokenizers: TokenizerManager, tokenizers: TokenizerManager,
} }
@@ -54,6 +56,29 @@ impl Index {
dir.exists(&META_FILEPATH) dir.exists(&META_FILEPATH)
} }
/// Accessor to the search executor.
///
/// This pool is used by default when calling `searcher.search(...)`
/// to perform search on the individual segments.
///
/// By default the executor is single thread, and simply runs in the calling thread.
pub fn search_executor(&self) -> &Executor {
self.executor.as_ref()
}
/// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads.
pub fn set_multithread_executor(&mut self, num_threads: usize) {
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-"));
}
/// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads.
pub fn set_default_multithread_executor(&mut self) {
let default_num_threads = num_cpus::get();
self.set_multithread_executor(default_num_threads);
}
/// Creates a new index using the `RAMDirectory`. /// Creates a new index using the `RAMDirectory`.
/// ///
/// The index will be allocated in anonymous memory. /// The index will be allocated in anonymous memory.
@@ -85,7 +110,9 @@ impl Index {
if index.schema() == schema { if index.schema() == schema {
Ok(index) Ok(index)
} else { } else {
Err(TantivyError::SchemaError("An index exists but the schema does not match.".to_string())) Err(TantivyError::SchemaError(
"An index exists but the schema does not match.".to_string(),
))
} }
} else { } else {
Index::create(dir, schema) Index::create(dir, schema)
@@ -108,7 +135,7 @@ impl Index {
/// Creates a new index given an implementation of the trait `Directory` /// Creates a new index given an implementation of the trait `Directory`
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> { pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
let directory = ManagedDirectory::new(dir)?; let directory = ManagedDirectory::wrap(dir)?;
Index::from_directory(directory, schema) Index::from_directory(directory, schema)
} }
@@ -131,6 +158,7 @@ impl Index {
num_searchers: Arc::new(AtomicUsize::new(n_cpus)), num_searchers: Arc::new(AtomicUsize::new(n_cpus)),
searcher_pool: Arc::new(Pool::new()), searcher_pool: Arc::new(Pool::new()),
tokenizers: TokenizerManager::default(), tokenizers: TokenizerManager::default(),
executor: Arc::new(Executor::single_thread()),
}; };
index.load_searchers()?; index.load_searchers()?;
Ok(index) Ok(index)
@@ -171,7 +199,7 @@ impl Index {
/// Open the index using the provided directory /// Open the index using the provided directory
pub fn open<D: Directory>(directory: D) -> Result<Index> { pub fn open<D: Directory>(directory: D) -> Result<Index> {
let directory = ManagedDirectory::new(directory)?; let directory = ManagedDirectory::wrap(directory)?;
let metas = load_metas(&directory)?; let metas = load_metas(&directory)?;
Index::create_from_metas(directory, &metas) Index::create_from_metas(directory, &metas)
} }
@@ -348,19 +376,20 @@ impl Clone for Index {
num_searchers: Arc::clone(&self.num_searchers), num_searchers: Arc::clone(&self.num_searchers),
searcher_pool: Arc::clone(&self.searcher_pool), searcher_pool: Arc::clone(&self.searcher_pool),
tokenizers: self.tokenizers.clone(), tokenizers: self.tokenizers.clone(),
executor: self.executor.clone(),
} }
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use schema::{Schema, SchemaBuilder, INT_INDEXED, TEXT};
use Index;
use directory::RAMDirectory; use directory::RAMDirectory;
use schema::{Schema, INT_INDEXED, TEXT};
use Index;
#[test] #[test]
fn test_indexer_for_field() { fn test_indexer_for_field() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let num_likes_field = schema_builder.add_u64_field("num_likes", INT_INDEXED); let num_likes_field = schema_builder.add_u64_field("num_likes", INT_INDEXED);
let body_field = schema_builder.add_text_field("body", TEXT); let body_field = schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -388,7 +417,6 @@ mod tests {
assert!(Index::exists(&directory)); assert!(Index::exists(&directory));
} }
#[test] #[test]
fn open_or_create_should_open() { fn open_or_create_should_open() {
let directory = RAMDirectory::create(); let directory = RAMDirectory::create();
@@ -402,7 +430,7 @@ mod tests {
let directory = RAMDirectory::create(); let directory = RAMDirectory::create();
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok()); assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
assert!(Index::exists(&directory)); assert!(Index::exists(&directory));
assert!(Index::create(directory.clone(), SchemaBuilder::default().build()).is_ok()); assert!(Index::create(directory.clone(), Schema::builder().build()).is_ok());
} }
#[test] #[test]
@@ -411,12 +439,15 @@ mod tests {
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok()); assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
assert!(Index::exists(&directory)); assert!(Index::exists(&directory));
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok()); assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
let err = Index::open_or_create(directory, SchemaBuilder::default().build()); let err = Index::open_or_create(directory, Schema::builder().build());
assert_eq!(format!("{:?}", err.unwrap_err()), "SchemaError(\"An index exists but the schema does not match.\")"); assert_eq!(
format!("{:?}", err.unwrap_err()),
"SchemaError(\"An index exists but the schema does not match.\")"
);
} }
fn throw_away_schema() -> Schema { fn throw_away_schema() -> Schema {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let _ = schema_builder.add_u64_field("num_likes", INT_INDEXED); let _ = schema_builder.add_u64_field("num_likes", INT_INDEXED);
schema_builder.build() schema_builder.build()
} }

View File

@@ -46,13 +46,13 @@ impl fmt::Debug for IndexMeta {
mod tests { mod tests {
use super::IndexMeta; use super::IndexMeta;
use schema::{SchemaBuilder, TEXT}; use schema::{Schema, TEXT};
use serde_json; use serde_json;
#[test] #[test]
fn test_serialize_metas() { fn test_serialize_metas() {
let schema = { let schema = {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("text", TEXT); schema_builder.add_text_field("text", TEXT);
schema_builder.build() schema_builder.build()
}; };

View File

@@ -32,10 +32,7 @@ pub struct InvertedIndexReader {
} }
impl InvertedIndexReader { impl InvertedIndexReader {
#[cfg_attr( #[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symetry
feature = "cargo-clippy",
allow(clippy::needless_pass_by_value)
)] // for symetry
pub(crate) fn new( pub(crate) fn new(
termdict: TermDictionary, termdict: TermDictionary,
postings_source: ReadOnlySource, postings_source: ReadOnlySource,

View File

@@ -1,3 +1,4 @@
mod executor;
pub mod index; pub mod index;
mod index_meta; mod index_meta;
mod inverted_index_reader; mod inverted_index_reader;
@@ -9,6 +10,7 @@ mod segment_id;
mod segment_meta; mod segment_meta;
mod segment_reader; mod segment_reader;
pub use self::executor::Executor;
pub use self::index::Index; pub use self::index::Index;
pub use self::index_meta::IndexMeta; pub use self::index_meta::IndexMeta;
pub use self::inverted_index_reader::InvertedIndexReader; pub use self::inverted_index_reader::InvertedIndexReader;

View File

@@ -1,18 +1,43 @@
use collector::Collector; use collector::Collector;
use collector::SegmentCollector;
use core::Executor;
use core::InvertedIndexReader; use core::InvertedIndexReader;
use core::SegmentReader; use core::SegmentReader;
use query::Query; use query::Query;
use query::Scorer;
use query::Weight;
use schema::Document; use schema::Document;
use schema::Schema; use schema::Schema;
use schema::{Field, Term}; use schema::{Field, Term};
use space_usage::SearcherSpaceUsage; use space_usage::SearcherSpaceUsage;
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use store::StoreReader;
use termdict::TermMerger; use termdict::TermMerger;
use DocAddress; use DocAddress;
use Index; use Index;
use Result; use Result;
fn collect_segment<C: Collector>(
collector: &C,
weight: &Weight,
segment_ord: u32,
segment_reader: &SegmentReader,
) -> Result<C::SegmentFruit> {
let mut scorer = weight.scorer(segment_reader)?;
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
if let Some(delete_bitset) = segment_reader.delete_bitset() {
scorer.for_each(&mut |doc, score| {
if !delete_bitset.is_deleted(doc) {
segment_collector.collect(doc, score);
}
});
} else {
scorer.for_each(&mut |doc, score| segment_collector.collect(doc, score));
}
Ok(segment_collector.harvest())
}
/// Holds a list of `SegmentReader`s ready for search. /// Holds a list of `SegmentReader`s ready for search.
/// ///
/// It guarantees that the `Segment` will not be removed before /// It guarantees that the `Segment` will not be removed before
@@ -22,6 +47,7 @@ pub struct Searcher {
schema: Schema, schema: Schema,
index: Index, index: Index,
segment_readers: Vec<SegmentReader>, segment_readers: Vec<SegmentReader>,
store_readers: Vec<StoreReader>,
} }
impl Searcher { impl Searcher {
@@ -31,10 +57,15 @@ impl Searcher {
index: Index, index: Index,
segment_readers: Vec<SegmentReader>, segment_readers: Vec<SegmentReader>,
) -> Searcher { ) -> Searcher {
let store_readers = segment_readers
.iter()
.map(|segment_reader| segment_reader.get_store_reader())
.collect();
Searcher { Searcher {
schema, schema,
index, index,
segment_readers, segment_readers,
store_readers,
} }
} }
@@ -49,8 +80,8 @@ impl Searcher {
/// the request to the right `Segment`. /// the request to the right `Segment`.
pub fn doc(&self, doc_address: DocAddress) -> Result<Document> { pub fn doc(&self, doc_address: DocAddress) -> Result<Document> {
let DocAddress(segment_local_id, doc_id) = doc_address; let DocAddress(segment_local_id, doc_id) = doc_address;
let segment_reader = &self.segment_readers[segment_local_id as usize]; let store_reader = &self.store_readers[segment_local_id as usize];
segment_reader.doc(doc_id) store_reader.get(doc_id)
} }
/// Access the schema associated to the index of this searcher. /// Access the schema associated to the index of this searcher.
@@ -73,7 +104,8 @@ impl Searcher {
.iter() .iter()
.map(|segment_reader| { .map(|segment_reader| {
u64::from(segment_reader.inverted_index(term.field()).doc_freq(term)) u64::from(segment_reader.inverted_index(term.field()).doc_freq(term))
}).sum::<u64>() })
.sum::<u64>()
} }
/// Return the list of segment readers /// Return the list of segment readers
@@ -86,9 +118,58 @@ impl Searcher {
&self.segment_readers[segment_ord as usize] &self.segment_readers[segment_ord as usize]
} }
/// Runs a query on the segment readers wrapped by the searcher /// Runs a query on the segment readers wrapped by the searcher.
pub fn search<C: Collector>(&self, query: &Query, collector: &mut C) -> Result<()> { ///
query.search(self, collector) /// Search works as follows :
///
/// First the weight object associated to the query is created.
///
/// Then, the query loops over the segments and for each segment :
/// - setup the collector and informs it that the segment being processed has changed.
/// - creates a SegmentCollector for collecting documents associated to the segment
/// - creates a `Scorer` object associated for this segment
/// - iterate through the matched documents and push them to the segment collector.
///
/// Finally, the Collector merges each of the child collectors into itself for result usability
/// by the caller.
pub fn search<C: Collector>(&self, query: &Query, collector: &C) -> Result<C::Fruit> {
let executor = self.index.search_executor();
self.search_with_executor(query, collector, executor)
}
/// Same as [`search(...)`](#method.search) but multithreaded.
///
/// The current implementation is rather naive :
/// multithreading is by splitting search into as many task
/// as there are segments.
///
/// It is powerless at making search faster if your index consists in
/// one large segment.
///
/// Also, keep in my multithreading a single query on several
/// threads will not improve your throughput. It can actually
/// hurt it. It will however, decrease the average response time.
pub fn search_with_executor<C: Collector>(
&self,
query: &Query,
collector: &C,
executor: &Executor,
) -> Result<C::Fruit> {
let scoring_enabled = collector.requires_scoring();
let weight = query.weight(self, scoring_enabled)?;
let segment_readers = self.segment_readers();
let fruits = executor.map(
|(segment_ord, segment_reader)| {
collect_segment(
collector,
weight.as_ref(),
segment_ord as u32,
segment_reader,
)
},
segment_readers.iter().enumerate(),
)?;
collector.merge_fruits(fruits)
} }
/// Return the field searcher associated to a `Field`. /// Return the field searcher associated to a `Field`.

View File

@@ -4,6 +4,7 @@ use core::InvertedIndexReader;
use core::Segment; use core::Segment;
use core::SegmentComponent; use core::SegmentComponent;
use core::SegmentId; use core::SegmentId;
use directory::ReadOnlySource;
use error::TantivyError; use error::TantivyError;
use fastfield::DeleteBitSet; use fastfield::DeleteBitSet;
use fastfield::FacetReader; use fastfield::FacetReader;
@@ -12,7 +13,6 @@ use fastfield::{self, FastFieldNotAvailableError};
use fastfield::{BytesFastFieldReader, FastValue, MultiValueIntFastFieldReader}; use fastfield::{BytesFastFieldReader, FastValue, MultiValueIntFastFieldReader};
use fieldnorm::FieldNormReader; use fieldnorm::FieldNormReader;
use schema::Cardinality; use schema::Cardinality;
use schema::Document;
use schema::Field; use schema::Field;
use schema::FieldType; use schema::FieldType;
use schema::Schema; use schema::Schema;
@@ -54,7 +54,7 @@ pub struct SegmentReader {
fast_fields_composite: CompositeFile, fast_fields_composite: CompositeFile,
fieldnorms_composite: CompositeFile, fieldnorms_composite: CompositeFile,
store_reader: StoreReader, store_source: ReadOnlySource,
delete_bitset_opt: Option<DeleteBitSet>, delete_bitset_opt: Option<DeleteBitSet>,
schema: Schema, schema: Schema,
} }
@@ -197,8 +197,7 @@ impl SegmentReader {
/// Accessor to the segment's `Field norms`'s reader. /// Accessor to the segment's `Field norms`'s reader.
/// ///
/// Field norms are the length (in tokens) of the fields. /// Field norms are the length (in tokens) of the fields.
/// It is used in the computation of the [TfIdf] /// It is used in the computation of the [TfIdf](https://fulmicoton.gitbooks.io/tantivy-doc/content/tfidf.html).
/// (https://fulmicoton.gitbooks.io/tantivy-doc/content/tfidf.html).
/// ///
/// They are simply stored as a fast field, serialized in /// They are simply stored as a fast field, serialized in
/// the `.fieldnorm` file of the segment. /// the `.fieldnorm` file of the segment.
@@ -216,8 +215,8 @@ impl SegmentReader {
} }
/// Accessor to the segment's `StoreReader`. /// Accessor to the segment's `StoreReader`.
pub fn get_store_reader(&self) -> &StoreReader { pub fn get_store_reader(&self) -> StoreReader {
&self.store_reader StoreReader::from_source(self.store_source.clone())
} }
/// Open a new segment for reading. /// Open a new segment for reading.
@@ -226,7 +225,6 @@ impl SegmentReader {
let termdict_composite = CompositeFile::open(&termdict_source)?; let termdict_composite = CompositeFile::open(&termdict_source)?;
let store_source = segment.open_read(SegmentComponent::STORE)?; let store_source = segment.open_read(SegmentComponent::STORE)?;
let store_reader = StoreReader::from_source(store_source);
fail_point!("SegmentReader::open#middle"); fail_point!("SegmentReader::open#middle");
@@ -272,7 +270,7 @@ impl SegmentReader {
fast_fields_composite, fast_fields_composite,
fieldnorms_composite, fieldnorms_composite,
segment_id: segment.id(), segment_id: segment.id(),
store_reader, store_source,
delete_bitset_opt, delete_bitset_opt,
positions_composite, positions_composite,
positions_idx_composite, positions_idx_composite,
@@ -351,14 +349,6 @@ impl SegmentReader {
inv_idx_reader inv_idx_reader
} }
/// Returns the document (or to be accurate, its stored field)
/// bearing the given doc id.
/// This method is slow and should seldom be called from
/// within a collector.
pub fn doc(&self, doc_id: DocId) -> Result<Document> {
self.store_reader.get(doc_id)
}
/// Returns the segment id /// Returns the segment id
pub fn segment_id(&self) -> SegmentId { pub fn segment_id(&self) -> SegmentId {
self.segment_id self.segment_id
@@ -393,8 +383,11 @@ impl SegmentReader {
self.positions_idx_composite.space_usage(), self.positions_idx_composite.space_usage(),
self.fast_fields_composite.space_usage(), self.fast_fields_composite.space_usage(),
self.fieldnorms_composite.space_usage(), self.fieldnorms_composite.space_usage(),
self.store_reader.space_usage(), self.get_store_reader().space_usage(),
self.delete_bitset_opt.as_ref().map(|x| x.space_usage()).unwrap_or(0), self.delete_bitset_opt
.as_ref()
.map(|x| x.space_usage())
.unwrap_or(0),
) )
} }
} }
@@ -454,12 +447,12 @@ impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use core::Index; use core::Index;
use schema::{SchemaBuilder, Term, STORED, TEXT}; use schema::{Schema, Term, STORED, TEXT};
use DocId; use DocId;
#[test] #[test]
fn test_alive_docs_iterator() { fn test_alive_docs_iterator() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("name", TEXT | STORED); schema_builder.add_text_field("name", TEXT | STORED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());

View File

@@ -59,7 +59,7 @@ fn save_managed_paths(
impl ManagedDirectory { impl ManagedDirectory {
/// Wraps a directory as managed directory. /// Wraps a directory as managed directory.
pub fn new<Dir: Directory>(directory: Dir) -> Result<ManagedDirectory> { pub fn wrap<Dir: Directory>(directory: Dir) -> Result<ManagedDirectory> {
match directory.atomic_read(&MANAGED_FILEPATH) { match directory.atomic_read(&MANAGED_FILEPATH) {
Ok(data) => { Ok(data) => {
let managed_files_json = String::from_utf8_lossy(&data); let managed_files_json = String::from_utf8_lossy(&data);
@@ -260,7 +260,7 @@ mod tests {
let tempdir_path = PathBuf::from(tempdir.path()); let tempdir_path = PathBuf::from(tempdir.path());
{ {
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::new(mmap_directory).unwrap(); let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
{ {
let mut write_file = managed_directory.open_write(*TEST_PATH1).unwrap(); let mut write_file = managed_directory.open_write(*TEST_PATH1).unwrap();
write_file.flush().unwrap(); write_file.flush().unwrap();
@@ -286,7 +286,7 @@ mod tests {
} }
{ {
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::new(mmap_directory).unwrap(); let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
{ {
assert!(managed_directory.exists(*TEST_PATH1)); assert!(managed_directory.exists(*TEST_PATH1));
assert!(!managed_directory.exists(*TEST_PATH2)); assert!(!managed_directory.exists(*TEST_PATH2));
@@ -310,7 +310,7 @@ mod tests {
let living_files = HashSet::new(); let living_files = HashSet::new();
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::new(mmap_directory).unwrap(); let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
managed_directory managed_directory
.atomic_write(*TEST_PATH1, &vec![0u8, 1u8]) .atomic_write(*TEST_PATH1, &vec![0u8, 1u8])
.unwrap(); .unwrap();

View File

@@ -100,7 +100,8 @@ impl InnerDirectory {
); );
let io_err = make_io_err(msg); let io_err = make_io_err(msg);
OpenReadError::IOError(IOError::with_path(path.to_owned(), io_err)) OpenReadError::IOError(IOError::with_path(path.to_owned(), io_err))
}).and_then(|readable_map| { })
.and_then(|readable_map| {
readable_map readable_map
.get(path) .get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path))) .ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
@@ -120,7 +121,8 @@ impl InnerDirectory {
); );
let io_err = make_io_err(msg); let io_err = make_io_err(msg);
DeleteError::IOError(IOError::with_path(path.to_owned(), io_err)) DeleteError::IOError(IOError::with_path(path.to_owned(), io_err))
}).and_then(|mut writable_map| match writable_map.remove(path) { })
.and_then(|mut writable_map| match writable_map.remove(path) {
Some(_) => Ok(()), Some(_) => Ok(()),
None => Err(DeleteError::FileDoesNotExist(PathBuf::from(path))), None => Err(DeleteError::FileDoesNotExist(PathBuf::from(path))),
}) })

View File

@@ -15,13 +15,13 @@ use std::sync::PoisonError;
#[derive(Debug, Fail)] #[derive(Debug, Fail)]
pub enum TantivyError { pub enum TantivyError {
/// Path does not exist. /// Path does not exist.
#[fail(display = "path does not exist: '{:?}'", _0)] #[fail(display = "Path does not exist: '{:?}'", _0)]
PathDoesNotExist(PathBuf), PathDoesNotExist(PathBuf),
/// File already exists, this is a problem when we try to write into a new file. /// File already exists, this is a problem when we try to write into a new file.
#[fail(display = "file already exists: '{:?}'", _0)] #[fail(display = "File already exists: '{:?}'", _0)]
FileAlreadyExists(PathBuf), FileAlreadyExists(PathBuf),
/// Index already exists in this directory /// Index already exists in this directory
#[fail(display = "index already exists")] #[fail(display = "Index already exists")]
IndexAlreadyExists, IndexAlreadyExists,
/// Failed to acquire file lock /// Failed to acquire file lock
#[fail( #[fail(
@@ -30,28 +30,29 @@ pub enum TantivyError {
)] )]
LockFailure(LockType), LockFailure(LockType),
/// IO Error. /// IO Error.
#[fail(display = "an IO error occurred: '{}'", _0)] #[fail(display = "An IO error occurred: '{}'", _0)]
IOError(#[cause] IOError), IOError(#[cause] IOError),
/// The data within is corrupted. /// Data corruption.
/// #[fail(display = "File contains corrupted data: '{:?}'", _0)]
/// For instance, it contains invalid JSON.
#[fail(display = "file contains corrupted data: '{:?}'", _0)]
CorruptedFile(PathBuf), CorruptedFile(PathBuf),
/// A thread holding the locked panicked and poisoned the lock. /// A thread holding the locked panicked and poisoned the lock.
#[fail(display = "a thread holding the locked panicked and poisoned the lock")] #[fail(display = "A thread holding the locked panicked and poisoned the lock")]
Poisoned, Poisoned,
/// Invalid argument was passed by the user. /// Invalid argument was passed by the user.
#[fail(display = "an invalid argument was passed: '{}'", _0)] #[fail(display = "An invalid argument was passed: '{}'", _0)]
InvalidArgument(String), InvalidArgument(String),
/// An Error happened in one of the thread. /// An Error happened in one of the thread.
#[fail(display = "an error occurred in a thread: '{}'", _0)] #[fail(display = "An error occurred in a thread: '{}'", _0)]
ErrorInThread(String), ErrorInThread(String),
/// An Error appeared related to the schema. /// An Error appeared related to the schema.
#[fail(display = "Schema error: '{}'", _0)] #[fail(display = "Schema error: '{}'", _0)]
SchemaError(String), SchemaError(String),
/// Tried to access a fastfield reader for a field not configured accordingly. /// Tried to access a fastfield reader for a field not configured accordingly.
#[fail(display = "fast field not available: '{:?}'", _0)] #[fail(display = "Fast field not available: '{:?}'", _0)]
FastFieldError(#[cause] FastFieldNotAvailableError), FastFieldError(#[cause] FastFieldNotAvailableError),
/// System error. (e.g.: We failed spawning a new thread)
#[fail(display = "System error.'{}'", _0)]
SystemError(String),
} }
impl From<FastFieldNotAvailableError> for TantivyError { impl From<FastFieldNotAvailableError> for TantivyError {

View File

@@ -6,12 +6,12 @@ pub use self::writer::BytesFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use schema::SchemaBuilder; use schema::Schema;
use Index; use Index;
#[test] #[test]
fn test_bytes() { fn test_bytes() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let field = schema_builder.add_bytes_field("bytesfield"); let field = schema_builder.add_bytes_field("bytesfield");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);

View File

@@ -127,19 +127,19 @@ mod tests {
use common::CompositeFile; use common::CompositeFile;
use directory::{Directory, RAMDirectory, WritePtr}; use directory::{Directory, RAMDirectory, WritePtr};
use fastfield::FastFieldReader; use fastfield::FastFieldReader;
use rand::Rng; use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
use rand::SeedableRng; use rand::SeedableRng;
use rand::XorShiftRng;
use schema::Document; use schema::Document;
use schema::Field; use schema::Field;
use schema::Schema;
use schema::FAST; use schema::FAST;
use schema::{Schema, SchemaBuilder};
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path; use std::path::Path;
lazy_static! { lazy_static! {
pub static ref SCHEMA: Schema = { pub static ref SCHEMA: Schema = {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
schema_builder.add_u64_field("field", FAST); schema_builder.add_u64_field("field", FAST);
schema_builder.build() schema_builder.build()
}; };
@@ -298,7 +298,7 @@ mod tests {
fn test_signed_intfastfield() { fn test_signed_intfastfield() {
let path = Path::new("test"); let path = Path::new("test");
let mut directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let i64_field = schema_builder.add_i64_field("field", FAST); let i64_field = schema_builder.add_i64_field("field", FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -342,7 +342,7 @@ mod tests {
fn test_signed_intfastfield_default_val() { fn test_signed_intfastfield_default_val() {
let path = Path::new("test"); let path = Path::new("test");
let mut directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let i64_field = schema_builder.add_i64_field("field", FAST); let i64_field = schema_builder.add_i64_field("field", FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -367,11 +367,10 @@ mod tests {
} }
} }
// Warning: this generates the same permutation at each call
pub fn generate_permutation() -> Vec<u64> { pub fn generate_permutation() -> Vec<u64> {
let seed: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut rng = XorShiftRng::from_seed(seed);
let mut permutation: Vec<u64> = (0u64..100_000u64).collect(); let mut permutation: Vec<u64> = (0u64..100_000u64).collect();
rng.shuffle(&mut permutation); permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
permutation permutation
} }

View File

@@ -9,12 +9,12 @@ mod tests {
use schema::Cardinality; use schema::Cardinality;
use schema::IntOptions; use schema::IntOptions;
use schema::SchemaBuilder; use schema::Schema;
use Index; use Index;
#[test] #[test]
fn test_multivalued_u64() { fn test_multivalued_u64() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field( let field = schema_builder.add_u64_field(
"multifield", "multifield",
IntOptions::default().set_fast(Cardinality::MultiValues), IntOptions::default().set_fast(Cardinality::MultiValues),
@@ -49,7 +49,7 @@ mod tests {
#[test] #[test]
fn test_multivalued_i64() { fn test_multivalued_i64() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let field = schema_builder.add_i64_field( let field = schema_builder.add_i64_field(
"multifield", "multifield",
IntOptions::default().set_fast(Cardinality::MultiValues), IntOptions::default().set_fast(Cardinality::MultiValues),

View File

@@ -47,11 +47,11 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
mod tests { mod tests {
use core::Index; use core::Index;
use schema::{Document, Facet, SchemaBuilder}; use schema::{Document, Facet, Schema};
#[test] #[test]
fn test_multifastfield_reader() { fn test_multifastfield_reader() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facets"); let facet_field = schema_builder.add_facet_field("facets");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);

View File

@@ -7,7 +7,7 @@ use directory::ReadOnlySource;
use directory::{Directory, RAMDirectory, WritePtr}; use directory::{Directory, RAMDirectory, WritePtr};
use fastfield::{FastFieldSerializer, FastFieldsWriter}; use fastfield::{FastFieldSerializer, FastFieldsWriter};
use owning_ref::OwningRef; use owning_ref::OwningRef;
use schema::SchemaBuilder; use schema::Schema;
use schema::FAST; use schema::FAST;
use std::collections::HashMap; use std::collections::HashMap;
use std::marker::PhantomData; use std::marker::PhantomData;
@@ -108,7 +108,7 @@ impl<Item: FastValue> FastFieldReader<Item> {
impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> { impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
fn from(vals: Vec<Item>) -> FastFieldReader<Item> { fn from(vals: Vec<Item>) -> FastFieldReader<Item> {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("field", FAST); let field = schema_builder.add_u64_field("field", FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
let path = Path::new("__dummy__"); let path = Path::new("__dummy__");

View File

@@ -15,7 +15,7 @@
//! precompute computationally expensive functions of the fieldnorm //! precompute computationally expensive functions of the fieldnorm
//! in a very short array. //! in a very short array.
//! //!
//! This trick is used by the [BM25 similarity](). //! This trick is used by the BM25 similarity.
mod code; mod code;
mod reader; mod reader;
mod serializer; mod serializer;

View File

@@ -1,7 +1,6 @@
use rand::thread_rng; use rand::thread_rng;
use std::collections::HashSet; use std::collections::HashSet;
use rand::distributions::Range;
use rand::Rng; use rand::Rng;
use schema::*; use schema::*;
use Index; use Index;
@@ -16,7 +15,7 @@ fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
#[ignore] #[ignore]
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
fn test_indexing() { fn test_indexing() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let id_field = schema_builder.add_u64_field("id", INT_INDEXED); let id_field = schema_builder.add_u64_field("id", INT_INDEXED);
let multiples_field = schema_builder.add_u64_field("multiples", INT_INDEXED); let multiples_field = schema_builder.add_u64_field("multiples", INT_INDEXED);
@@ -24,7 +23,6 @@ fn test_indexing() {
let index = Index::create_from_tempdir(schema).unwrap(); let index = Index::create_from_tempdir(schema).unwrap();
let universe = Range::new(0u64, 20u64);
let mut rng = thread_rng(); let mut rng = thread_rng();
let mut index_writer = index.writer_with_num_threads(3, 120_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(3, 120_000_000).unwrap();
@@ -33,7 +31,7 @@ fn test_indexing() {
let mut uncommitted_docs: HashSet<u64> = HashSet::new(); let mut uncommitted_docs: HashSet<u64> = HashSet::new();
for _ in 0..200 { for _ in 0..200 {
let random_val = rng.sample(&universe); let random_val = rng.gen_range(0, 20);
if random_val == 0 { if random_val == 0 {
index_writer.commit().expect("Commit failed"); index_writer.commit().expect("Commit failed");
committed_docs.extend(&uncommitted_docs); committed_docs.extend(&uncommitted_docs);

View File

@@ -191,10 +191,7 @@ impl DeleteCursor {
} }
} }
#[cfg_attr( #[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
feature = "cargo-clippy",
allow(clippy::wrong_self_convention)
)]
fn is_behind_opstamp(&mut self, target_opstamp: u64) -> bool { fn is_behind_opstamp(&mut self, target_opstamp: u64) -> bool {
self.get() self.get()
.map(|operation| operation.opstamp < target_opstamp) .map(|operation| operation.opstamp < target_opstamp)

View File

@@ -8,7 +8,7 @@ use core::SegmentComponent;
use core::SegmentId; use core::SegmentId;
use core::SegmentMeta; use core::SegmentMeta;
use core::SegmentReader; use core::SegmentReader;
use crossbeam_channel as channel; use crossbeam::channel;
use docset::DocSet; use docset::DocSet;
use error::TantivyError; use error::TantivyError;
use fastfield::write_delete_bitset; use fastfield::write_delete_bitset;
@@ -61,7 +61,8 @@ fn initial_table_size(per_thread_memory_budget: usize) -> usize {
"Per thread memory is too small: {}", "Per thread memory is too small: {}",
per_thread_memory_budget per_thread_memory_budget
) )
}).min(19) // we cap it at 512K })
.min(19) // we cap it at 512K
} }
/// `IndexWriter` is the user entry-point to add document to an index. /// `IndexWriter` is the user entry-point to add document to an index.
@@ -139,7 +140,7 @@ pub fn open_index_writer(
let stamper = Stamper::new(current_opstamp); let stamper = Stamper::new(current_opstamp);
let segment_updater = let segment_updater =
SegmentUpdater::new(index.clone(), stamper.clone(), &delete_queue.cursor())?; SegmentUpdater::create(index.clone(), stamper.clone(), &delete_queue.cursor())?;
let mut index_writer = IndexWriter { let mut index_writer = IndexWriter {
_directory_lock: Some(directory_lock), _directory_lock: Some(directory_lock),
@@ -388,11 +389,13 @@ impl IndexWriter {
let mem_budget = self.heap_size_in_bytes_per_thread; let mem_budget = self.heap_size_in_bytes_per_thread;
let join_handle: JoinHandle<Result<()>> = thread::Builder::new() let join_handle: JoinHandle<Result<()>> = thread::Builder::new()
.name(format!( .name(format!(
"indexing thread {} for gen {}", "thrd-tantivy-index{}-gen{}",
self.worker_id, generation self.worker_id, generation
)).spawn(move || { ))
.spawn(move || {
loop { loop {
let mut document_iterator = document_receiver_clone.clone().peekable(); let mut document_iterator =
document_receiver_clone.clone().into_iter().peekable();
// the peeking here is to avoid // the peeking here is to avoid
// creating a new segment's files // creating a new segment's files
@@ -464,10 +467,8 @@ impl IndexWriter {
/// ///
/// Returns the former segment_ready channel. /// Returns the former segment_ready channel.
fn recreate_document_channel(&mut self) -> DocumentReceiver { fn recreate_document_channel(&mut self) -> DocumentReceiver {
let (mut document_sender, mut document_receiver): ( let (mut document_sender, mut document_receiver): (DocumentSender, DocumentReceiver) =
DocumentSender, channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
DocumentReceiver,
) = channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
swap(&mut self.document_sender, &mut document_sender); swap(&mut self.document_sender, &mut document_sender);
swap(&mut self.document_receiver, &mut document_receiver); swap(&mut self.document_receiver, &mut document_receiver);
document_receiver document_receiver
@@ -640,7 +641,10 @@ impl IndexWriter {
pub fn add_document(&mut self, document: Document) -> u64 { pub fn add_document(&mut self, document: Document) -> u64 {
let opstamp = self.stamper.stamp(); let opstamp = self.stamper.stamp();
let add_operation = AddOperation { opstamp, document }; let add_operation = AddOperation { opstamp, document };
self.document_sender.send(add_operation); let send_result = self.document_sender.send(add_operation);
if let Err(e) = send_result {
panic!("Failed to index document. Sending to indexing channel failed. This probably means all of the indexing threads have panicked. {:?}", e);
}
opstamp opstamp
} }
} }
@@ -657,7 +661,7 @@ mod tests {
#[test] #[test]
fn test_lockfile_stops_duplicates() { fn test_lockfile_stops_duplicates() {
let schema_builder = schema::SchemaBuilder::default(); let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let _index_writer = index.writer(40_000_000).unwrap(); let _index_writer = index.writer(40_000_000).unwrap();
match index.writer(40_000_000) { match index.writer(40_000_000) {
@@ -668,7 +672,7 @@ mod tests {
#[test] #[test]
fn test_lockfile_already_exists_error_msg() { fn test_lockfile_already_exists_error_msg() {
let schema_builder = schema::SchemaBuilder::default(); let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
match index.writer_with_num_threads(1, 3_000_000) { match index.writer_with_num_threads(1, 3_000_000) {
@@ -683,7 +687,7 @@ mod tests {
#[test] #[test]
fn test_set_merge_policy() { fn test_set_merge_policy() {
let schema_builder = schema::SchemaBuilder::default(); let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let index_writer = index.writer(40_000_000).unwrap(); let index_writer = index.writer(40_000_000).unwrap();
assert_eq!( assert_eq!(
@@ -701,7 +705,7 @@ mod tests {
#[test] #[test]
fn test_lockfile_released_on_drop() { fn test_lockfile_released_on_drop() {
let schema_builder = schema::SchemaBuilder::default(); let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
{ {
let _index_writer = index.writer(40_000_000).unwrap(); let _index_writer = index.writer(40_000_000).unwrap();
@@ -713,7 +717,7 @@ mod tests {
#[test] #[test]
fn test_commit_and_rollback() { fn test_commit_and_rollback() {
let mut schema_builder = schema::SchemaBuilder::default(); let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
@@ -747,7 +751,7 @@ mod tests {
#[test] #[test]
fn test_with_merges() { fn test_with_merges() {
let mut schema_builder = schema::SchemaBuilder::default(); let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let num_docs_containing = |s: &str| { let num_docs_containing = |s: &str| {
@@ -784,7 +788,7 @@ mod tests {
#[test] #[test]
fn test_prepare_with_commit_message() { fn test_prepare_with_commit_message() {
let mut schema_builder = schema::SchemaBuilder::default(); let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
@@ -818,7 +822,7 @@ mod tests {
#[test] #[test]
fn test_prepare_but_rollback() { fn test_prepare_but_rollback() {
let mut schema_builder = schema::SchemaBuilder::default(); let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
@@ -866,7 +870,7 @@ mod tests {
#[test] #[test]
fn test_write_commit_fails() { fn test_write_commit_fails() {
use fail; use fail;
let mut schema_builder = schema::SchemaBuilder::default(); let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());

View File

@@ -40,13 +40,15 @@ fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
total_tokens += reader.inverted_index(field).total_num_tokens(); total_tokens += reader.inverted_index(field).total_num_tokens();
} }
} }
total_tokens + count total_tokens
.iter() + count
.cloned() .iter()
.enumerate() .cloned()
.map(|(fieldnorm_ord, count)| { .enumerate()
count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8)) .map(|(fieldnorm_ord, count)| {
}).sum::<u64>() count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8))
})
.sum::<u64>()
} }
pub struct IndexMerger { pub struct IndexMerger {
@@ -523,7 +525,8 @@ impl IndexMerger {
} }
} }
None None
}).collect(); })
.collect();
// At this point, `segment_postings` contains the posting list // At this point, `segment_postings` contains the posting list
// of all of the segments containing the given term. // of all of the segments containing the given term.
@@ -614,7 +617,7 @@ impl IndexMerger {
store_writer.store(&doc)?; store_writer.store(&doc)?;
} }
} else { } else {
store_writer.stack(store_reader)?; store_writer.stack(&store_reader)?;
} }
} }
Ok(()) Ok(())
@@ -635,10 +638,9 @@ impl SerializableSegment for IndexMerger {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use collector::chain;
use collector::tests::TestCollector; use collector::tests::TestCollector;
use collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector}; use collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use collector::FacetCollector; use collector::{Count, FacetCollector};
use core::Index; use core::Index;
use futures::Future; use futures::Future;
use query::AllQuery; use query::AllQuery;
@@ -647,6 +649,7 @@ mod tests {
use schema; use schema;
use schema::Cardinality; use schema::Cardinality;
use schema::Document; use schema::Document;
use schema::Facet;
use schema::IndexRecordOption; use schema::IndexRecordOption;
use schema::IntOptions; use schema::IntOptions;
use schema::Term; use schema::Term;
@@ -658,13 +661,14 @@ mod tests {
#[test] #[test]
fn test_index_merger_no_deletes() { fn test_index_merger_no_deletes() {
let mut schema_builder = schema::SchemaBuilder::default(); let mut schema_builder = schema::Schema::builder();
let text_fieldtype = schema::TextOptions::default() let text_fieldtype = schema::TextOptions::default()
.set_indexing_options( .set_indexing_options(
TextFieldIndexing::default() TextFieldIndexing::default()
.set_tokenizer("default") .set_tokenizer("default")
.set_index_option(IndexRecordOption::WithFreqs), .set_index_option(IndexRecordOption::WithFreqs),
).set_stored(); )
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype); let text_field = schema_builder.add_text_field("text", text_fieldtype);
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue); let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype); let score_field = schema_builder.add_u64_field("score", score_fieldtype);
@@ -742,27 +746,32 @@ mod tests {
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.searcher();
let get_doc_ids = |terms: Vec<Term>| { let get_doc_ids = |terms: Vec<Term>| {
let mut collector = TestCollector::default();
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
assert!(searcher.search(&query, &mut collector).is_ok()); let top_docs = searcher.search(&query, &TestCollector).unwrap();
collector.docs() top_docs.docs().to_vec()
}; };
{ {
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "a")]), get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
vec![1, 2, 4] vec![DocAddress(0, 1), DocAddress(0, 2), DocAddress(0, 4)]
); );
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "af")]), get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
vec![0, 3] vec![DocAddress(0, 0), DocAddress(0, 3)]
); );
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "g")]), get_doc_ids(vec![Term::from_field_text(text_field, "g")]),
vec![4] vec![DocAddress(0, 4)]
); );
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "b")]), get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
vec![0, 1, 2, 3, 4] vec![
DocAddress(0, 0),
DocAddress(0, 1),
DocAddress(0, 2),
DocAddress(0, 3),
DocAddress(0, 4)
]
); );
} }
{ {
@@ -788,17 +797,18 @@ mod tests {
{ {
let get_fast_vals = |terms: Vec<Term>| { let get_fast_vals = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
let mut collector = FastFieldTestCollector::for_field(score_field); searcher
assert!(searcher.search(&query, &mut collector).is_ok()); .search(&query, &FastFieldTestCollector::for_field(score_field))
collector.vals() .unwrap()
}; };
let get_fast_vals_bytes = |terms: Vec<Term>| { let get_fast_vals_bytes = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
let mut collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
searcher searcher
.search(&query, &mut collector) .search(
.expect("failed to search"); &query,
collector.vals() &BytesFastFieldTestCollector::for_field(bytes_score_field),
)
.expect("failed to search")
}; };
assert_eq!( assert_eq!(
get_fast_vals(vec![Term::from_field_text(text_field, "a")]), get_fast_vals(vec![Term::from_field_text(text_field, "a")]),
@@ -814,11 +824,12 @@ mod tests {
#[test] #[test]
fn test_index_merger_with_deletes() { fn test_index_merger_with_deletes() {
let mut schema_builder = schema::SchemaBuilder::default(); let mut schema_builder = schema::Schema::builder();
let text_fieldtype = schema::TextOptions::default() let text_fieldtype = schema::TextOptions::default()
.set_indexing_options( .set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs), TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
).set_stored(); )
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype); let text_field = schema_builder.add_text_field("text", text_fieldtype);
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue); let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype); let score_field = schema_builder.add_u64_field("score", score_fieldtype);
@@ -827,21 +838,13 @@ mod tests {
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
let search_term = |searcher: &Searcher, term: Term| { let search_term = |searcher: &Searcher, term: Term| {
let mut collector = FastFieldTestCollector::for_field(score_field); let collector = FastFieldTestCollector::for_field(score_field);
let mut bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field); let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
let term_query = TermQuery::new(term, IndexRecordOption::Basic); let term_query = TermQuery::new(term, IndexRecordOption::Basic);
let (scores, bytes) = searcher
{ .search(&term_query, &(collector, bytes_collector))
let mut combined_collector = .unwrap();
chain().push(&mut collector).push(&mut bytes_collector); let mut score_bytes = Cursor::new(bytes);
searcher
.search(&term_query, &mut combined_collector)
.unwrap();
}
let scores = collector.vals();
let mut score_bytes = Cursor::new(bytes_collector.vals());
for &score in &scores { for &score in &scores {
assert_eq!(score as u32, score_bytes.read_u32::<BigEndian>().unwrap()); assert_eq!(score as u32, score_bytes.read_u32::<BigEndian>().unwrap());
} }
@@ -854,21 +857,21 @@ mod tests {
{ {
// a first commit // a first commit
index_writer.add_document(doc!( index_writer.add_document(doc!(
text_field => "a b d", text_field => "a b d",
score_field => 1u64, score_field => 1u64,
bytes_score_field => vec![0u8, 0, 0, 1], bytes_score_field => vec![0u8, 0, 0, 1],
)); ));
index_writer.add_document(doc!( index_writer.add_document(doc!(
text_field => "b c", text_field => "b c",
score_field => 2u64, score_field => 2u64,
bytes_score_field => vec![0u8, 0, 0, 2], bytes_score_field => vec![0u8, 0, 0, 2],
)); ));
index_writer.delete_term(Term::from_field_text(text_field, "c")); index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.add_document(doc!( index_writer.add_document(doc!(
text_field => "c d", text_field => "c d",
score_field => 3u64, score_field => 3u64,
bytes_score_field => vec![0u8, 0, 0, 3], bytes_score_field => vec![0u8, 0, 0, 3],
)); ));
index_writer.commit().expect("committed"); index_writer.commit().expect("committed");
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let ref searcher = *index.searcher(); let ref searcher = *index.searcher();
@@ -895,37 +898,37 @@ mod tests {
{ {
// a second commit // a second commit
index_writer.add_document(doc!( index_writer.add_document(doc!(
text_field => "a d e", text_field => "a d e",
score_field => 4_000u64, score_field => 4_000u64,
bytes_score_field => vec![0u8, 0, 0, 4], bytes_score_field => vec![0u8, 0, 0, 4],
)); ));
index_writer.add_document(doc!( index_writer.add_document(doc!(
text_field => "e f", text_field => "e f",
score_field => 5_000u64, score_field => 5_000u64,
bytes_score_field => vec![0u8, 0, 0, 5], bytes_score_field => vec![0u8, 0, 0, 5],
)); ));
index_writer.delete_term(Term::from_field_text(text_field, "a")); index_writer.delete_term(Term::from_field_text(text_field, "a"));
index_writer.delete_term(Term::from_field_text(text_field, "f")); index_writer.delete_term(Term::from_field_text(text_field, "f"));
index_writer.add_document(doc!( index_writer.add_document(doc!(
text_field => "f g", text_field => "f g",
score_field => 6_000u64, score_field => 6_000u64,
bytes_score_field => vec![0u8, 0, 23, 112], bytes_score_field => vec![0u8, 0, 23, 112],
)); ));
index_writer.add_document(doc!( index_writer.add_document(doc!(
text_field => "g h", text_field => "g h",
score_field => 7_000u64, score_field => 7_000u64,
bytes_score_field => vec![0u8, 0, 27, 88], bytes_score_field => vec![0u8, 0, 27, 88],
)); ));
index_writer.commit().expect("committed"); index_writer.commit().expect("committed");
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.searcher();
assert_eq!(searcher.segment_readers().len(), 2); assert_eq!(searcher.segment_readers().len(), 2);
assert_eq!(searcher.num_docs(), 3); assert_eq!(searcher.num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].num_docs(), 1); assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3); assert_eq!(searcher.segment_readers()[0].max_doc(), 4);
assert_eq!(searcher.segment_readers()[1].num_docs(), 2); assert_eq!(searcher.segment_readers()[1].num_docs(), 1);
assert_eq!(searcher.segment_readers()[1].max_doc(), 4); assert_eq!(searcher.segment_readers()[1].max_doc(), 3);
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a")), search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec empty_vec
@@ -959,15 +962,15 @@ mod tests {
.segment_reader(0) .segment_reader(0)
.fast_field_reader::<u64>(score_field) .fast_field_reader::<u64>(score_field)
.unwrap(); .unwrap();
assert_eq!(score_field_reader.min_value(), 1); assert_eq!(score_field_reader.min_value(), 4000);
assert_eq!(score_field_reader.max_value(), 3); assert_eq!(score_field_reader.max_value(), 7000);
let score_field_reader = searcher let score_field_reader = searcher
.segment_reader(1) .segment_reader(1)
.fast_field_reader::<u64>(score_field) .fast_field_reader::<u64>(score_field)
.unwrap(); .unwrap();
assert_eq!(score_field_reader.min_value(), 4000); assert_eq!(score_field_reader.min_value(), 1);
assert_eq!(score_field_reader.max_value(), 7000); assert_eq!(score_field_reader.max_value(), 3);
} }
{ {
// merging the segments // merging the segments
@@ -1140,10 +1143,9 @@ mod tests {
#[test] #[test]
fn test_merge_facets() { fn test_merge_facets() {
let mut schema_builder = schema::SchemaBuilder::default(); let mut schema_builder = schema::Schema::builder();
let facet_field = schema_builder.add_facet_field("facet"); let facet_field = schema_builder.add_facet_field("facet");
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
use schema::Facet;
{ {
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| { let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| {
@@ -1172,20 +1174,16 @@ mod tests {
index_doc(&mut index_writer, &["/top/e", "/top/f"]); index_doc(&mut index_writer, &["/top/e", "/top/f"]);
index_writer.commit().expect("committed"); index_writer.commit().expect("committed");
} }
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let test_searcher = |expected_num_docs: usize, expected: &[(&str, u64)]| { let test_searcher = |expected_num_docs: usize, expected: &[(&str, u64)]| {
let searcher = index.searcher(); let searcher = index.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field); let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet(Facet::from("/top")); facet_collector.add_facet(Facet::from("/top"));
use collector::{CountCollector, MultiCollector}; let (count, facet_counts) = searcher
let mut count_collector = CountCollector::default(); .search(&AllQuery, &(Count, facet_collector))
{ .unwrap();
let mut multi_collectors = assert_eq!(count, expected_num_docs);
MultiCollector::from(vec![&mut count_collector, &mut facet_collector]);
searcher.search(&AllQuery, &mut multi_collectors).unwrap();
}
assert_eq!(count_collector.count(), expected_num_docs);
let facet_counts = facet_collector.harvest();
let facets: Vec<(String, u64)> = facet_counts let facets: Vec<(String, u64)> = facet_counts
.get("/top") .get("/top")
.map(|(facet, count)| (facet.to_string(), count)) .map(|(facet, count)| (facet.to_string(), count))
@@ -1209,7 +1207,6 @@ mod tests {
("/top/f", 1), ("/top/f", 1),
], ],
); );
// Merging the segments // Merging the segments
{ {
let segment_ids = index let segment_ids = index
@@ -1222,7 +1219,6 @@ mod tests {
.wait() .wait()
.expect("Merging failed"); .expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
index.load_searchers().unwrap(); index.load_searchers().unwrap();
test_searcher( test_searcher(
11, 11,
@@ -1261,7 +1257,7 @@ mod tests {
#[test] #[test]
fn test_merge_multivalued_int_fields_all_deleted() { fn test_merge_multivalued_int_fields_all_deleted() {
let mut schema_builder = schema::SchemaBuilder::default(); let mut schema_builder = schema::Schema::builder();
let int_options = IntOptions::default() let int_options = IntOptions::default()
.set_fast(Cardinality::MultiValues) .set_fast(Cardinality::MultiValues)
.set_indexed(); .set_indexed();
@@ -1302,7 +1298,7 @@ mod tests {
#[test] #[test]
fn test_merge_multivalued_int_fields() { fn test_merge_multivalued_int_fields() {
let mut schema_builder = schema::SchemaBuilder::default(); let mut schema_builder = schema::Schema::builder();
let int_options = IntOptions::default() let int_options = IntOptions::default()
.set_fast(Cardinality::MultiValues) .set_fast(Cardinality::MultiValues)
.set_indexed(); .set_indexed();
@@ -1368,15 +1364,17 @@ mod tests {
assert_eq!(&vals, &[17]); assert_eq!(&vals, &[17]);
} }
{ println!(
let segment = searcher.segment_reader(1u32); "{:?}",
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap(); searcher
ff_reader.get_vals(0, &mut vals); .segment_readers()
assert_eq!(&vals, &[20]); .iter()
} .map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
{ {
let segment = searcher.segment_reader(2u32); let segment = searcher.segment_reader(1u32);
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap(); let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
ff_reader.get_vals(0, &mut vals); ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[28, 27]); assert_eq!(&vals, &[28, 27]);
@@ -1385,6 +1383,13 @@ mod tests {
assert_eq!(&vals, &[1_000]); assert_eq!(&vals, &[1_000]);
} }
{
let segment = searcher.segment_reader(2u32);
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[20]);
}
// Merging the segments // Merging the segments
{ {
let segment_ids = index let segment_ids = index
@@ -1403,6 +1408,14 @@ mod tests {
{ {
let searcher = index.searcher(); let searcher = index.searcher();
println!(
"{:?}",
searcher
.segment_readers()
.iter()
.map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
let segment = searcher.segment_reader(0u32); let segment = searcher.segment_reader(0u32);
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap(); let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
@@ -1428,13 +1441,13 @@ mod tests {
assert_eq!(&vals, &[17]); assert_eq!(&vals, &[17]);
ff_reader.get_vals(7, &mut vals); ff_reader.get_vals(7, &mut vals);
assert_eq!(&vals, &[20]);
ff_reader.get_vals(8, &mut vals);
assert_eq!(&vals, &[28, 27]); assert_eq!(&vals, &[28, 27]);
ff_reader.get_vals(9, &mut vals); ff_reader.get_vals(8, &mut vals);
assert_eq!(&vals, &[1_000]); assert_eq!(&vals, &[1_000]);
ff_reader.get_vals(9, &mut vals);
assert_eq!(&vals, &[20]);
} }
} }
} }

View File

@@ -138,7 +138,7 @@ struct InnerSegmentUpdater {
} }
impl SegmentUpdater { impl SegmentUpdater {
pub fn new( pub fn create(
index: Index, index: Index,
stamper: Stamper, stamper: Stamper,
delete_cursor: &DeleteCursor, delete_cursor: &DeleteCursor,
@@ -195,7 +195,8 @@ impl SegmentUpdater {
segment_updater.0.segment_manager.add_segment(segment_entry); segment_updater.0.segment_manager.add_segment(segment_entry);
segment_updater.consider_merge_options(); segment_updater.consider_merge_options();
true true
}).forget(); })
.forget();
true true
} else { } else {
false false
@@ -227,20 +228,38 @@ impl SegmentUpdater {
if self.is_alive() { if self.is_alive() {
let index = &self.0.index; let index = &self.0.index;
let directory = index.directory(); let directory = index.directory();
let mut commited_segment_metas = self.0.segment_manager.committed_segment_metas();
// We sort segment_readers by number of documents.
// This is an heuristic to make multithreading more efficient.
//
// This is not done at the searcher level because I had a strange
// use case in which I was dealing with a large static index,
// dispatched over 5 SSD drives.
//
// A `UnionDirectory` makes it possible to read from these
// 5 different drives and creates a meta.json on the fly.
// In order to optimize the throughput, it creates a lasagna of segments
// from the different drives.
//
// Segment 1 from disk 1, Segment 1 from disk 2, etc.
commited_segment_metas.sort_by_key(|segment_meta| -(segment_meta.max_doc() as i32));
save_metas( save_metas(
self.0.segment_manager.committed_segment_metas(), commited_segment_metas,
index.schema(), index.schema(),
opstamp, opstamp,
commit_message, commit_message,
directory.box_clone().borrow_mut(), directory.box_clone().borrow_mut(),
).expect("Could not save metas."); )
.expect("Could not save metas.");
} }
} }
pub fn garbage_collect_files(&self) -> Result<()> { pub fn garbage_collect_files(&self) -> Result<()> {
self.run_async(move |segment_updater| { self.run_async(move |segment_updater| {
segment_updater.garbage_collect_files_exec(); segment_updater.garbage_collect_files_exec();
}).wait() })
.wait()
} }
fn garbage_collect_files_exec(&self) { fn garbage_collect_files_exec(&self) {
@@ -262,7 +281,8 @@ impl SegmentUpdater {
segment_updater.garbage_collect_files_exec(); segment_updater.garbage_collect_files_exec();
segment_updater.consider_merge_options(); segment_updater.consider_merge_options();
} }
}).wait() })
.wait()
} }
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> Result<Receiver<SegmentMeta>> { pub fn start_merge(&self, segment_ids: &[SegmentId]) -> Result<Receiver<SegmentMeta>> {
@@ -270,7 +290,8 @@ impl SegmentUpdater {
let segment_ids_vec = segment_ids.to_vec(); let segment_ids_vec = segment_ids.to_vec();
self.run_async(move |segment_updater| { self.run_async(move |segment_updater| {
segment_updater.start_merge_impl(&segment_ids_vec[..]) segment_updater.start_merge_impl(&segment_ids_vec[..])
}).wait()? })
.wait()?
} }
// `segment_ids` is required to be non-empty. // `segment_ids` is required to be non-empty.
@@ -336,7 +357,8 @@ impl SegmentUpdater {
.unwrap() .unwrap()
.remove(&merging_thread_id); .remove(&merging_thread_id);
Ok(()) Ok(())
}).expect("Failed to spawn a thread."); })
.expect("Failed to spawn a thread.");
self.0 self.0
.merging_threads .merging_threads
.write() .write()
@@ -427,7 +449,8 @@ impl SegmentUpdater {
let previous_metas = segment_updater.0.index.load_metas().unwrap(); let previous_metas = segment_updater.0.index.load_metas().unwrap();
segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload); segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload);
segment_updater.garbage_collect_files_exec(); segment_updater.garbage_collect_files_exec();
}).wait() })
.wait()
} }
/// Wait for current merging threads. /// Wait for current merging threads.
@@ -484,7 +507,7 @@ mod tests {
#[test] #[test]
fn test_delete_during_merge() { fn test_delete_during_merge() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();

View File

@@ -62,7 +62,8 @@ impl SegmentWriter {
segment.index().tokenizers().get(tokenizer_name) segment.index().tokenizers().get(tokenizer_name)
}), }),
_ => None, _ => None,
}).collect(); })
.collect();
Ok(SegmentWriter { Ok(SegmentWriter {
max_doc: 0, max_doc: 0,
multifield_postings, multifield_postings,
@@ -117,7 +118,8 @@ impl SegmentWriter {
_ => { _ => {
panic!("Expected hierarchical facet"); panic!("Expected hierarchical facet");
} }
}).collect(); })
.collect();
let mut term = Term::for_field(field); // we set the Term let mut term = Term::for_field(field); // we set the Term
for facet_bytes in facets { for facet_bytes in facets {
let mut unordered_term_id_opt = None; let mut unordered_term_id_opt = None;
@@ -145,7 +147,8 @@ impl SegmentWriter {
.flat_map(|field_value| match *field_value.value() { .flat_map(|field_value| match *field_value.value() {
Value::Str(ref text) => Some(text.as_str()), Value::Str(ref text) => Some(text.as_str()),
_ => None, _ => None,
}).collect(); })
.collect();
if texts.is_empty() { if texts.is_empty() {
0 0
} else { } else {

View File

@@ -1,6 +1,5 @@
#![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")] #![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")]
#![cfg_attr(all(feature = "unstable", test), feature(test))] #![cfg_attr(all(feature = "unstable", test), feature(test))]
#![cfg_attr(feature = "cargo-clippy", feature(tool_lints))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))] #![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))]
#![doc(test(attr(allow(unused_variables), deny(warnings))))] #![doc(test(attr(allow(unused_variables), deny(warnings))))]
#![warn(missing_docs)] #![warn(missing_docs)]
@@ -24,7 +23,8 @@
//! # use tempdir::TempDir; //! # use tempdir::TempDir;
//! # use tantivy::Index; //! # use tantivy::Index;
//! # use tantivy::schema::*; //! # use tantivy::schema::*;
//! # use tantivy::collector::TopCollector; //! # use tantivy::{Score, DocAddress};
//! # use tantivy::collector::TopDocs;
//! # use tantivy::query::QueryParser; //! # use tantivy::query::QueryParser;
//! # //! #
//! # fn main() { //! # fn main() {
@@ -46,7 +46,7 @@
//! // in a compressed, row-oriented key-value store. //! // in a compressed, row-oriented key-value store.
//! // This store is useful to reconstruct the //! // This store is useful to reconstruct the
//! // documents that were selected during the search phase. //! // documents that were selected during the search phase.
//! let mut schema_builder = SchemaBuilder::default(); //! let mut schema_builder = Schema::builder();
//! let title = schema_builder.add_text_field("title", TEXT | STORED); //! let title = schema_builder.add_text_field("title", TEXT | STORED);
//! let body = schema_builder.add_text_field("body", TEXT); //! let body = schema_builder.add_text_field("body", TEXT);
//! let schema = schema_builder.build(); //! let schema = schema_builder.build();
@@ -86,13 +86,13 @@
//! // A ticket has been opened regarding this problem. //! // A ticket has been opened regarding this problem.
//! let query = query_parser.parse_query("sea whale")?; //! let query = query_parser.parse_query("sea whale")?;
//! //!
//! let mut top_collector = TopCollector::with_limit(10); //! // Perform search.
//! searcher.search(&*query, &mut top_collector)?; //! // `topdocs` contains the 10 most relevant doc ids, sorted by decreasing scores...
//! let top_docs: Vec<(Score, DocAddress)> =
//! searcher.search(&query, &TopDocs::with_limit(10))?;
//! //!
//! // Our top collector now contains the 10 //! for (_score, doc_address) in top_docs {
//! // most relevant doc ids... //! // Retrieve the actual content of documents given its `doc_address`.
//! let doc_addresses = top_collector.docs();
//! for doc_address in doc_addresses {
//! let retrieved_doc = searcher.doc(doc_address)?; //! let retrieved_doc = searcher.doc(doc_address)?;
//! println!("{}", schema.to_json(&retrieved_doc)); //! println!("{}", schema.to_json(&retrieved_doc));
//! } //! }
@@ -129,11 +129,11 @@ extern crate base64;
extern crate bit_set; extern crate bit_set;
extern crate bitpacking; extern crate bitpacking;
extern crate byteorder; extern crate byteorder;
extern crate scoped_pool;
extern crate combine; extern crate combine;
extern crate crossbeam; extern crate crossbeam;
extern crate crossbeam_channel;
extern crate fnv; extern crate fnv;
extern crate fst; extern crate fst;
extern crate fst_regex; extern crate fst_regex;
@@ -152,8 +152,6 @@ extern crate tempdir;
extern crate tempfile; extern crate tempfile;
extern crate uuid; extern crate uuid;
#[cfg(test)] #[cfg(test)]
#[macro_use] #[macro_use]
extern crate matches; extern crate matches;
@@ -185,10 +183,7 @@ mod macros;
pub use error::TantivyError; pub use error::TantivyError;
#[deprecated( #[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")]
since = "0.7.0",
note = "please use `tantivy::TantivyError` instead"
)]
pub use error::TantivyError as Error; pub use error::TantivyError as Error;
extern crate census; extern crate census;
@@ -218,7 +213,7 @@ pub mod store;
pub mod termdict; pub mod termdict;
mod snippet; mod snippet;
pub use self::snippet::SnippetGenerator; pub use self::snippet::{Snippet, SnippetGenerator};
mod docset; mod docset;
pub use self::docset::{DocSet, SkipResult}; pub use self::docset::{DocSet, SkipResult};
@@ -301,9 +296,11 @@ mod tests {
use docset::DocSet; use docset::DocSet;
use query::BooleanQuery; use query::BooleanQuery;
use rand::distributions::Bernoulli; use rand::distributions::Bernoulli;
use rand::distributions::Range; use rand::distributions::Uniform;
use rand::{Rng, SeedableRng, XorShiftRng}; use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use schema::*; use schema::*;
use DocAddress;
use Index; use Index;
use IndexWriter; use IndexWriter;
use Postings; use Postings;
@@ -322,16 +319,15 @@ mod tests {
} }
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> { pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
let seed: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]; let seed: [u8; 32] = [1; 32];
XorShiftRng::from_seed(seed) StdRng::from_seed(seed)
.sample_iter(&Range::new(0u32, max_value)) .sample_iter(&Uniform::new(0u32, max_value))
.take(n_elems) .take(n_elems)
.collect::<Vec<u32>>() .collect::<Vec<u32>>()
} }
pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> { pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> {
let seed: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, seed_val]; StdRng::from_seed([seed_val; 32])
XorShiftRng::from_seed(seed)
.sample_iter(&Bernoulli::new(ratio)) .sample_iter(&Bernoulli::new(ratio))
.take(n as usize) .take(n as usize)
.enumerate() .enumerate()
@@ -346,7 +342,7 @@ mod tests {
#[test] #[test]
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
fn test_indexing() { fn test_indexing() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_from_tempdir(schema).unwrap(); let index = Index::create_from_tempdir(schema).unwrap();
@@ -371,7 +367,7 @@ mod tests {
#[test] #[test]
fn test_docfreq1() { fn test_docfreq1() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
@@ -411,7 +407,7 @@ mod tests {
#[test] #[test]
fn test_fieldnorm_no_docs_with_field() { fn test_fieldnorm_no_docs_with_field() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let title_field = schema_builder.add_text_field("title", TEXT); let title_field = schema_builder.add_text_field("title", TEXT);
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
@@ -440,7 +436,7 @@ mod tests {
#[test] #[test]
fn test_fieldnorm() { fn test_fieldnorm() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
{ {
@@ -481,7 +477,7 @@ mod tests {
#[test] #[test]
fn test_delete_postings1() { fn test_delete_postings1() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let term_abcd = Term::from_field_text(text_field, "abcd"); let term_abcd = Term::from_field_text(text_field, "abcd");
let term_a = Term::from_field_text(text_field, "a"); let term_a = Term::from_field_text(text_field, "a");
@@ -492,42 +488,21 @@ mod tests {
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{ // 0
// 0 index_writer.add_document(doc!(text_field=>"a b"));
let doc = doc!(text_field=>"a b"); // 1
index_writer.add_document(doc); index_writer.add_document(doc!(text_field=>" a c"));
} // 2
{ index_writer.add_document(doc!(text_field=>" b c"));
// 1 // 3
let doc = doc!(text_field=>" a c"); index_writer.add_document(doc!(text_field=>" b d"));
index_writer.add_document(doc);
} index_writer.delete_term(Term::from_field_text(text_field, "c"));
{ index_writer.delete_term(Term::from_field_text(text_field, "a"));
// 2 // 4
let doc = doc!(text_field=>" b c"); index_writer.add_document(doc!(text_field=>" b c"));
index_writer.add_document(doc); // 5
} index_writer.add_document(doc!(text_field=>" a"));
{
// 3
let doc = doc!(text_field=>" b d");
index_writer.add_document(doc);
}
{
index_writer.delete_term(Term::from_field_text(text_field, "c"));
}
{
index_writer.delete_term(Term::from_field_text(text_field, "a"));
}
{
// 4
let doc = doc!(text_field=>" b c");
index_writer.add_document(doc);
}
{
// 5
let doc = doc!(text_field=>" a");
index_writer.add_document(doc);
}
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
{ {
@@ -535,11 +510,9 @@ mod tests {
let searcher = index.searcher(); let searcher = index.searcher();
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(text_field); let inverted_index = reader.inverted_index(text_field);
assert!( assert!(inverted_index
inverted_index .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) .is_none());
.is_none()
);
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
@@ -562,15 +535,10 @@ mod tests {
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{ // 0
// 0 index_writer.add_document(doc!(text_field=>"a b"));
let doc = doc!(text_field=>"a b"); // 1
index_writer.add_document(doc); index_writer.delete_term(Term::from_field_text(text_field, "c"));
}
{
// 1
index_writer.delete_term(Term::from_field_text(text_field, "c"));
}
index_writer.rollback().unwrap(); index_writer.rollback().unwrap();
} }
{ {
@@ -579,11 +547,9 @@ mod tests {
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(term_abcd.field()); let inverted_index = reader.inverted_index(term_abcd.field());
assert!( assert!(inverted_index
inverted_index .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) .is_none());
.is_none()
);
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
@@ -606,13 +572,8 @@ mod tests {
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{ index_writer.add_document(doc!(text_field=>"a b"));
let doc = doc!(text_field=>"a b"); index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.add_document(doc);
}
{
index_writer.delete_term(Term::from_field_text(text_field, "c"));
}
index_writer.rollback().unwrap(); index_writer.rollback().unwrap();
index_writer.delete_term(Term::from_field_text(text_field, "a")); index_writer.delete_term(Term::from_field_text(text_field, "a"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
@@ -622,11 +583,9 @@ mod tests {
let searcher = index.searcher(); let searcher = index.searcher();
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(term_abcd.field()); let inverted_index = reader.inverted_index(term_abcd.field());
assert!( assert!(inverted_index
inverted_index .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) .is_none());
.is_none()
);
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
@@ -656,7 +615,7 @@ mod tests {
#[test] #[test]
fn test_indexed_u64() { fn test_indexed_u64() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("value", INT_INDEXED); let field = schema_builder.add_u64_field("value", INT_INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -679,7 +638,7 @@ mod tests {
#[test] #[test]
fn test_indexed_i64() { fn test_indexed_i64() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_i64_field("value", INT_INDEXED); let value_field = schema_builder.add_i64_field("value", INT_INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -703,7 +662,7 @@ mod tests {
#[test] #[test]
fn test_indexedfield_not_in_documents() { fn test_indexedfield_not_in_documents() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let absent_field = schema_builder.add_text_field("text", TEXT); let absent_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -719,7 +678,7 @@ mod tests {
#[test] #[test]
fn test_delete_postings2() { fn test_delete_postings2() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -755,7 +714,7 @@ mod tests {
#[test] #[test]
fn test_termfreq() { fn test_termfreq() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -774,11 +733,9 @@ mod tests {
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(text_field); let inverted_index = reader.inverted_index(text_field);
let term_abcd = Term::from_field_text(text_field, "abcd"); let term_abcd = Term::from_field_text(text_field, "abcd");
assert!( assert!(inverted_index
inverted_index .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) .is_none());
.is_none()
);
let term_af = Term::from_field_text(text_field, "af"); let term_af = Term::from_field_text(text_field, "af");
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)
@@ -792,7 +749,7 @@ mod tests {
#[test] #[test]
fn test_searcher_1() { fn test_searcher_1() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -800,18 +757,9 @@ mod tests {
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{ index_writer.add_document(doc!(text_field=>"af af af b"));
let doc = doc!(text_field=>"af af af b"); index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.add_document(doc); index_writer.add_document(doc!(text_field=>"a b c d"));
}
{
let doc = doc!(text_field=>"a b c");
index_writer.add_document(doc);
}
{
let doc = doc!(text_field=>"a b c d");
index_writer.add_document(doc);
}
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
{ {
@@ -819,55 +767,42 @@ mod tests {
let searcher = index.searcher(); let searcher = index.searcher();
let get_doc_ids = |terms: Vec<Term>| { let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
let mut collector = TestCollector::default(); let topdocs = searcher.search(&query, &TestCollector).unwrap();
assert!(searcher.search(&query, &mut collector).is_ok()); topdocs.docs().to_vec()
collector.docs()
}; };
{ assert_eq!(
assert_eq!( get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
get_doc_ids(vec![Term::from_field_text(text_field, "a")]), vec![DocAddress(0, 1), DocAddress(0, 2)]
vec![1, 2] );
); assert_eq!(
} get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
{ vec![DocAddress(0, 0)]
assert_eq!( );
get_doc_ids(vec![Term::from_field_text(text_field, "af")]), assert_eq!(
vec![0] get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
); vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
} );
{ assert_eq!(
assert_eq!( get_doc_ids(vec![Term::from_field_text(text_field, "c")]),
get_doc_ids(vec![Term::from_field_text(text_field, "b")]), vec![DocAddress(0, 1), DocAddress(0, 2)]
vec![0, 1, 2] );
); assert_eq!(
} get_doc_ids(vec![Term::from_field_text(text_field, "d")]),
{ vec![DocAddress(0, 2)]
assert_eq!( );
get_doc_ids(vec![Term::from_field_text(text_field, "c")]), assert_eq!(
vec![1, 2] get_doc_ids(vec![
); Term::from_field_text(text_field, "b"),
} Term::from_field_text(text_field, "a"),
{ ]),
assert_eq!( vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
get_doc_ids(vec![Term::from_field_text(text_field, "d")]), );
vec![2]
);
}
{
assert_eq!(
get_doc_ids(vec![
Term::from_field_text(text_field, "b"),
Term::from_field_text(text_field, "a"),
]),
vec![0, 1, 2]
);
}
} }
} }
#[test] #[test]
fn test_searcher_2() { fn test_searcher_2() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -894,7 +829,7 @@ mod tests {
#[test] #[test]
fn test_doc_macro() { fn test_doc_macro() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let other_text_field = schema_builder.add_text_field("text2", TEXT); let other_text_field = schema_builder.add_text_field("text2", TEXT);
let document = doc!(text_field => "tantivy", let document = doc!(text_field => "tantivy",
@@ -912,7 +847,7 @@ mod tests {
#[test] #[test]
fn test_wrong_fast_field_type() { fn test_wrong_fast_field_type() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST); let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
let fast_field_signed = schema_builder.add_i64_field("signed", FAST); let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);

View File

@@ -26,12 +26,12 @@
/// #[macro_use] /// #[macro_use]
/// extern crate tantivy; /// extern crate tantivy;
/// ///
/// use tantivy::schema::{SchemaBuilder, TEXT, FAST}; /// use tantivy::schema::{Schema, TEXT, FAST};
/// ///
/// //... /// //...
/// ///
/// # fn main() { /// # fn main() {
/// let mut schema_builder = SchemaBuilder::new(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
/// let author = schema_builder.add_text_field("text", TEXT); /// let author = schema_builder.add_text_field("text", TEXT);
/// let likes = schema_builder.add_u64_field("num_u64", FAST); /// let likes = schema_builder.add_u64_field("num_u64", FAST);
@@ -67,33 +67,33 @@ macro_rules! doc(
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use schema::{SchemaBuilder, FAST, TEXT}; use schema::{Schema, FAST, TEXT};
#[test] #[test]
fn test_doc_basic() { fn test_doc_basic() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT); let title = schema_builder.add_text_field("title", TEXT);
let author = schema_builder.add_text_field("text", TEXT); let author = schema_builder.add_text_field("text", TEXT);
let likes = schema_builder.add_u64_field("num_u64", FAST); let likes = schema_builder.add_u64_field("num_u64", FAST);
let _schema = schema_builder.build(); let _schema = schema_builder.build();
let _doc = doc!( let _doc = doc!(
title => "Life Aquatic", title => "Life Aquatic",
author => "Wes Anderson", author => "Wes Anderson",
likes => 4u64 likes => 4u64
); );
} }
#[test] #[test]
fn test_doc_trailing_comma() { fn test_doc_trailing_comma() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT); let title = schema_builder.add_text_field("title", TEXT);
let author = schema_builder.add_text_field("text", TEXT); let author = schema_builder.add_text_field("text", TEXT);
let likes = schema_builder.add_u64_field("num_u64", FAST); let likes = schema_builder.add_u64_field("num_u64", FAST);
let _schema = schema_builder.build(); let _schema = schema_builder.build();
let _doc = doc!( let _doc = doc!(
title => "Life Aquatic", title => "Life Aquatic",
author => "Wes Anderson", author => "Wes Anderson",
likes => 4u64, likes => 4u64,
); );
} }
} }

View File

@@ -271,12 +271,9 @@ mod bench {
use test::Bencher; use test::Bencher;
fn generate_array_with_seed(n: usize, ratio: f64, seed_val: u8) -> Vec<u32> { fn generate_array_with_seed(n: usize, ratio: f64, seed_val: u8) -> Vec<u32> {
let seed: &[u8; 16] = &[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,seed_val]; let seed: &[u8; 16] = &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, seed_val];
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed); let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
(0u32..) (0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect()
.filter(|_| rng.gen_bool(ratio))
.take(n)
.collect()
} }
pub fn generate_array(n: usize, ratio: f64) -> Vec<u32> { pub fn generate_array(n: usize, ratio: f64) -> Vec<u32> {

View File

@@ -54,17 +54,18 @@ pub mod tests {
use indexer::operation::AddOperation; use indexer::operation::AddOperation;
use indexer::SegmentWriter; use indexer::SegmentWriter;
use query::Scorer; use query::Scorer;
use rand::{Rng, SeedableRng, XorShiftRng}; use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use schema::Field; use schema::Field;
use schema::IndexRecordOption; use schema::IndexRecordOption;
use schema::{Document, SchemaBuilder, Term, INT_INDEXED, STRING, TEXT}; use schema::{Document, Schema, Term, INT_INDEXED, STRING, TEXT};
use std::iter; use std::iter;
use DocId; use DocId;
use Score; use Score;
#[test] #[test]
pub fn test_position_write() { pub fn test_position_write() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -88,7 +89,7 @@ pub mod tests {
#[test] #[test]
pub fn test_skip_positions() { pub fn test_skip_positions() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT); let title = schema_builder.add_text_field("title", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -163,7 +164,7 @@ pub mod tests {
#[test] #[test]
pub fn test_position_and_fieldnorm1() { pub fn test_position_and_fieldnorm1() {
let mut positions = Vec::new(); let mut positions = Vec::new();
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
@@ -220,12 +221,10 @@ pub mod tests {
} }
{ {
let term_a = Term::from_field_text(text_field, "abcdef"); let term_a = Term::from_field_text(text_field, "abcdef");
assert!( assert!(segment_reader
segment_reader .inverted_index(term_a.field())
.inverted_index(term_a.field()) .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) .is_none());
.is_none()
);
} }
{ {
let term_a = Term::from_field_text(text_field, "a"); let term_a = Term::from_field_text(text_field, "a");
@@ -276,7 +275,7 @@ pub mod tests {
#[test] #[test]
pub fn test_position_and_fieldnorm2() { pub fn test_position_and_fieldnorm2() {
let mut positions: Vec<u32> = Vec::new(); let mut positions: Vec<u32> = Vec::new();
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -317,7 +316,7 @@ pub mod tests {
let num_docs = 300u32; let num_docs = 300u32;
let index = { let index = {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_u64_field("value", INT_INDEXED); let value_field = schema_builder.add_u64_field("value", INT_INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -498,12 +497,11 @@ pub mod tests {
Term::from_field_text(field, "d") Term::from_field_text(field, "d")
}; };
pub static ref INDEX: Index = { pub static ref INDEX: Index = {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING); let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build(); let schema = schema_builder.build();
let seed: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
let mut rng: XorShiftRng = XorShiftRng::from_seed(seed);
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let posting_list_size = 1_000_000; let posting_list_size = 1_000_000;

View File

@@ -29,7 +29,8 @@ fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<PostingsWriter> {
IndexRecordOption::WithFreqsAndPositions => { IndexRecordOption::WithFreqsAndPositions => {
SpecializedPostingsWriter::<TFAndPositionRecorder>::new_boxed() SpecializedPostingsWriter::<TFAndPositionRecorder>::new_boxed()
} }
}).unwrap_or_else(|| SpecializedPostingsWriter::<NothingRecorder>::new_boxed()), })
.unwrap_or_else(|| SpecializedPostingsWriter::<NothingRecorder>::new_boxed()),
FieldType::U64(_) | FieldType::I64(_) | FieldType::HierarchicalFacet => { FieldType::U64(_) | FieldType::I64(_) | FieldType::HierarchicalFacet => {
SpecializedPostingsWriter::<NothingRecorder>::new_boxed() SpecializedPostingsWriter::<NothingRecorder>::new_boxed()
} }
@@ -107,10 +108,8 @@ impl MultiFieldPostingsWriter {
.map(|(key, _, _)| Term::wrap(key).field()) .map(|(key, _, _)| Term::wrap(key).field())
.enumerate(); .enumerate();
let mut unordered_term_mappings: HashMap< let mut unordered_term_mappings: HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>> =
Field, HashMap::new();
HashMap<UnorderedTermId, TermOrdinal>,
> = HashMap::new();
let mut prev_field = Field(u32::max_value()); let mut prev_field = Field(u32::max_value());
for (offset, field) in term_offsets_it { for (offset, field) in term_offsets_it {
@@ -138,7 +137,8 @@ impl MultiFieldPostingsWriter {
.enumerate() .enumerate()
.map(|(term_ord, unord_term_id)| { .map(|(term_ord, unord_term_id)| {
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal) (unord_term_id as UnorderedTermId, term_ord as TermOrdinal)
}).collect(); })
.collect();
unordered_term_mappings.insert(field, mapping); unordered_term_mappings.insert(field, mapping);
} }
FieldType::U64(_) | FieldType::I64(_) => {} FieldType::U64(_) | FieldType::I64(_) => {}

View File

@@ -533,7 +533,8 @@ impl BlockSegmentPostings {
} else { } else {
BlockSegmentPostingsSkipResult::Terminated BlockSegmentPostingsSkipResult::Terminated
} }
}).unwrap_or(BlockSegmentPostingsSkipResult::Terminated); })
.unwrap_or(BlockSegmentPostingsSkipResult::Terminated);
} }
BlockSegmentPostingsSkipResult::Terminated BlockSegmentPostingsSkipResult::Terminated
} }
@@ -630,7 +631,7 @@ mod tests {
use docset::DocSet; use docset::DocSet;
use fst::Streamer; use fst::Streamer;
use schema::IndexRecordOption; use schema::IndexRecordOption;
use schema::SchemaBuilder; use schema::Schema;
use schema::Term; use schema::Term;
use schema::INT_INDEXED; use schema::INT_INDEXED;
use DocId; use DocId;
@@ -707,7 +708,7 @@ mod tests {
} }
fn build_block_postings(docs: Vec<DocId>) -> BlockSegmentPostings { fn build_block_postings(docs: Vec<DocId>) -> BlockSegmentPostings {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let int_field = schema_builder.add_u64_field("id", INT_INDEXED); let int_field = schema_builder.add_u64_field("id", INT_INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -778,7 +779,7 @@ mod tests {
#[test] #[test]
fn test_reset_block_segment_postings() { fn test_reset_block_segment_postings() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let int_field = schema_builder.add_u64_field("id", INT_INDEXED); let int_field = schema_builder.add_u64_field("id", INT_INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);

View File

@@ -55,7 +55,7 @@ pub struct InvertedIndexSerializer {
impl InvertedIndexSerializer { impl InvertedIndexSerializer {
/// Open a new `PostingsSerializer` for the given segment /// Open a new `PostingsSerializer` for the given segment
fn new( fn create(
terms_write: CompositeWrite<WritePtr>, terms_write: CompositeWrite<WritePtr>,
postings_write: CompositeWrite<WritePtr>, postings_write: CompositeWrite<WritePtr>,
positions_write: CompositeWrite<WritePtr>, positions_write: CompositeWrite<WritePtr>,
@@ -74,7 +74,7 @@ impl InvertedIndexSerializer {
/// Open a new `PostingsSerializer` for the given segment /// Open a new `PostingsSerializer` for the given segment
pub fn open(segment: &mut Segment) -> Result<InvertedIndexSerializer> { pub fn open(segment: &mut Segment) -> Result<InvertedIndexSerializer> {
use SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS}; use SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS};
InvertedIndexSerializer::new( InvertedIndexSerializer::create(
CompositeWrite::wrap(segment.open_write(TERMS)?), CompositeWrite::wrap(segment.open_write(TERMS)?),
CompositeWrite::wrap(segment.open_write(POSTINGS)?), CompositeWrite::wrap(segment.open_write(POSTINGS)?),
CompositeWrite::wrap(segment.open_write(POSITIONS)?), CompositeWrite::wrap(segment.open_write(POSITIONS)?),
@@ -99,7 +99,7 @@ impl InvertedIndexSerializer {
let positions_write = self.positions_write.for_field(field); let positions_write = self.positions_write.for_field(field);
let positionsidx_write = self.positionsidx_write.for_field(field); let positionsidx_write = self.positionsidx_write.for_field(field);
let field_type: FieldType = (*field_entry.field_type()).clone(); let field_type: FieldType = (*field_entry.field_type()).clone();
FieldSerializer::new( FieldSerializer::create(
&field_type, &field_type,
term_dictionary_write, term_dictionary_write,
postings_write, postings_write,
@@ -130,7 +130,7 @@ pub struct FieldSerializer<'a> {
} }
impl<'a> FieldSerializer<'a> { impl<'a> FieldSerializer<'a> {
fn new( fn create(
field_type: &FieldType, field_type: &FieldType,
term_dictionary_write: &'a mut CountingWriter<WritePtr>, term_dictionary_write: &'a mut CountingWriter<WritePtr>,
postings_write: &'a mut CountingWriter<WritePtr>, postings_write: &'a mut CountingWriter<WritePtr>,
@@ -152,7 +152,7 @@ impl<'a> FieldSerializer<'a> {
_ => (false, false), _ => (false, false),
}; };
let term_dictionary_builder = let term_dictionary_builder =
TermDictionaryBuilder::new(term_dictionary_write, &field_type)?; TermDictionaryBuilder::create(term_dictionary_write, &field_type)?;
let postings_serializer = let postings_serializer =
PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled); PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
let positions_serializer_opt = if position_enabled { let positions_serializer_opt = if position_enabled {

View File

@@ -174,8 +174,8 @@ mod tests {
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use super::ExpUnrolledLinkedList;
use super::super::MemoryArena; use super::super::MemoryArena;
use super::ExpUnrolledLinkedList;
use test::Bencher; use test::Bencher;
const NUM_STACK: usize = 10_000; const NUM_STACK: usize = 10_000;

View File

@@ -86,12 +86,12 @@ mod tests {
use super::AllQuery; use super::AllQuery;
use query::Query; use query::Query;
use schema::{SchemaBuilder, TEXT}; use schema::{Schema, TEXT};
use Index; use Index;
#[test] #[test]
fn test_all_query() { fn test_all_query() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let field = schema_builder.add_text_field("text", TEXT); let field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);

View File

@@ -11,7 +11,7 @@ use Result;
/// A weight struct for Fuzzy Term and Regex Queries /// A weight struct for Fuzzy Term and Regex Queries
pub struct AutomatonWeight<A> pub struct AutomatonWeight<A>
where where
A: Automaton, A: Automaton + Send + Sync + 'static,
{ {
field: Field, field: Field,
automaton: A, automaton: A,
@@ -19,7 +19,7 @@ where
impl<A> AutomatonWeight<A> impl<A> AutomatonWeight<A>
where where
A: Automaton, A: Automaton + Send + Sync + 'static,
{ {
/// Create a new AutomationWeight /// Create a new AutomationWeight
pub fn new(field: Field, automaton: A) -> AutomatonWeight<A> { pub fn new(field: Field, automaton: A) -> AutomatonWeight<A> {
@@ -34,7 +34,7 @@ where
impl<A> Weight for AutomatonWeight<A> impl<A> Weight for AutomatonWeight<A>
where where
A: Automaton, A: Automaton + Send + Sync + 'static,
{ {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>> { fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>> {
let max_doc = reader.max_doc(); let max_doc = reader.max_doc();

View File

@@ -63,7 +63,8 @@ impl BM25Weight {
.map(|term| { .map(|term| {
let term_doc_freq = searcher.doc_freq(term); let term_doc_freq = searcher.doc_freq(term);
idf(term_doc_freq, total_num_docs) idf(term_doc_freq, total_num_docs)
}).sum::<f32>(); })
.sum::<f32>();
BM25Weight::new(idf, average_fieldnorm) BM25Weight::new(idf, average_fieldnorm)
} }

View File

@@ -47,7 +47,8 @@ impl Query for BooleanQuery {
.iter() .iter()
.map(|&(ref occur, ref subquery)| { .map(|&(ref occur, ref subquery)| {
Ok((*occur, subquery.weight(searcher, scoring_enabled)?)) Ok((*occur, subquery.weight(searcher, scoring_enabled)?))
}).collect::<Result<_>>()?; })
.collect::<Result<_>>()?;
Ok(Box::new(BooleanWeight::new(sub_weights, scoring_enabled))) Ok(Box::new(BooleanWeight::new(sub_weights, scoring_enabled)))
} }
@@ -68,7 +69,8 @@ impl BooleanQuery {
let term_query: Box<Query> = let term_query: Box<Query> =
Box::new(TermQuery::new(term, IndexRecordOption::WithFreqs)); Box::new(TermQuery::new(term, IndexRecordOption::WithFreqs));
(Occur::Should, term_query) (Occur::Should, term_query)
}).collect(); })
.collect();
BooleanQuery::from(occur_term_queries) BooleanQuery::from(occur_term_queries)
} }

View File

@@ -19,10 +19,11 @@ mod tests {
use query::Scorer; use query::Scorer;
use query::TermQuery; use query::TermQuery;
use schema::*; use schema::*;
use DocId;
use Index; use Index;
fn aux_test_helper() -> (Index, Field) { fn aux_test_helper() -> (Index, Field) {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -130,9 +131,13 @@ mod tests {
let matching_docs = |boolean_query: &Query| { let matching_docs = |boolean_query: &Query| {
let searcher = index.searcher(); let searcher = index.searcher();
let mut test_collector = TestCollector::default(); let test_docs = searcher.search(boolean_query, &TestCollector).unwrap();
searcher.search(boolean_query, &mut test_collector).unwrap(); test_docs
test_collector.docs() .docs()
.iter()
.cloned()
.map(|doc| doc.1)
.collect::<Vec<DocId>>()
}; };
{ {
@@ -186,9 +191,8 @@ mod tests {
let score_docs = |boolean_query: &Query| { let score_docs = |boolean_query: &Query| {
let searcher = index.searcher(); let searcher = index.searcher();
let mut test_collector = TestCollector::default(); let fruit = searcher.search(boolean_query, &TestCollector).unwrap();
searcher.search(boolean_query, &mut test_collector).unwrap(); fruit.scores().to_vec()
test_collector.scores()
}; };
{ {

View File

@@ -25,14 +25,14 @@ lazy_static! {
/// ```rust /// ```rust
/// #[macro_use] /// #[macro_use]
/// extern crate tantivy; /// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result, Term}; /// use tantivy::{Index, Result, Term};
/// use tantivy::collector::{CountCollector, TopCollector, chain}; /// use tantivy::collector::{Count, TopDocs};
/// use tantivy::query::FuzzyTermQuery; /// use tantivy::query::FuzzyTermQuery;
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
@@ -57,16 +57,12 @@ lazy_static! {
/// let searcher = index.searcher(); /// let searcher = index.searcher();
/// ///
/// { /// {
/// let mut top_collector = TopCollector::with_limit(2); ///
/// let mut count_collector = CountCollector::default(); /// let term = Term::from_field_text(title, "Diary");
/// { /// let query = FuzzyTermQuery::new(term, 1, true);
/// let mut collectors = chain().push(&mut top_collector).push(&mut count_collector); /// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
/// let term = Term::from_field_text(title, "Diary"); /// assert_eq!(count, 2);
/// let query = FuzzyTermQuery::new(term, 1, true); /// assert_eq!(top_docs.len(), 2);
/// searcher.search(&query, &mut collectors).unwrap();
/// }
/// assert_eq!(count_collector.count(), 2);
/// assert!(top_collector.at_capacity());
/// } /// }
/// ///
/// Ok(()) /// Ok(())
@@ -122,8 +118,8 @@ impl Query for FuzzyTermQuery {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::FuzzyTermQuery; use super::FuzzyTermQuery;
use collector::TopCollector; use collector::TopDocs;
use schema::SchemaBuilder; use schema::Schema;
use schema::TEXT; use schema::TEXT;
use tests::assert_nearly_equals; use tests::assert_nearly_equals;
use Index; use Index;
@@ -131,7 +127,7 @@ mod test {
#[test] #[test]
pub fn test_fuzzy_term() { pub fn test_fuzzy_term() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let country_field = schema_builder.add_text_field("country", TEXT); let country_field = schema_builder.add_text_field("country", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -148,14 +144,14 @@ mod test {
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.searcher();
{ {
let mut collector = TopCollector::with_limit(2);
let term = Term::from_field_text(country_field, "japon"); let term = Term::from_field_text(country_field, "japon");
let fuzzy_query = FuzzyTermQuery::new(term, 1, true); let fuzzy_query = FuzzyTermQuery::new(term, 1, true);
searcher.search(&fuzzy_query, &mut collector).unwrap(); let top_docs = searcher
let scored_docs = collector.top_docs(); .search(&fuzzy_query, &TopDocs::with_limit(2))
assert_eq!(scored_docs.len(), 1, "Expected only 1 document"); .unwrap();
let (score, _) = scored_docs[0]; assert_eq!(top_docs.len(), 1, "Expected only 1 document");
let (score, _) = top_docs[0];
assert_nearly_equals(1f32, score); assert_nearly_equals(1f32, score);
} }
} }

View File

@@ -56,15 +56,15 @@ pub use self::weight::Weight;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use Index;
use schema::{SchemaBuilder, TEXT};
use query::QueryParser; use query::QueryParser;
use Term; use schema::{Schema, TEXT};
use std::collections::BTreeSet; use std::collections::BTreeSet;
use Index;
use Term;
#[test] #[test]
fn test_query_terms() { fn test_query_terms() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -73,33 +73,48 @@ mod tests {
let term_b = Term::from_field_text(text_field, "b"); let term_b = Term::from_field_text(text_field, "b");
{ {
let mut terms_set: BTreeSet<Term> = BTreeSet::new(); let mut terms_set: BTreeSet<Term> = BTreeSet::new();
query_parser.parse_query("a").unwrap().query_terms(&mut terms_set); query_parser
.parse_query("a")
.unwrap()
.query_terms(&mut terms_set);
let terms: Vec<&Term> = terms_set.iter().collect(); let terms: Vec<&Term> = terms_set.iter().collect();
assert_eq!(vec![&term_a], terms); assert_eq!(vec![&term_a], terms);
} }
{ {
let mut terms_set: BTreeSet<Term> = BTreeSet::new(); let mut terms_set: BTreeSet<Term> = BTreeSet::new();
query_parser.parse_query("a b").unwrap().query_terms(&mut terms_set); query_parser
.parse_query("a b")
.unwrap()
.query_terms(&mut terms_set);
let terms: Vec<&Term> = terms_set.iter().collect(); let terms: Vec<&Term> = terms_set.iter().collect();
assert_eq!(vec![&term_a, &term_b], terms); assert_eq!(vec![&term_a, &term_b], terms);
} }
{ {
let mut terms_set: BTreeSet<Term> = BTreeSet::new(); let mut terms_set: BTreeSet<Term> = BTreeSet::new();
query_parser.parse_query("\"a b\"").unwrap().query_terms(&mut terms_set); query_parser
.parse_query("\"a b\"")
.unwrap()
.query_terms(&mut terms_set);
let terms: Vec<&Term> = terms_set.iter().collect(); let terms: Vec<&Term> = terms_set.iter().collect();
assert_eq!(vec![&term_a, &term_b], terms); assert_eq!(vec![&term_a, &term_b], terms);
} }
{ {
let mut terms_set: BTreeSet<Term> = BTreeSet::new(); let mut terms_set: BTreeSet<Term> = BTreeSet::new();
query_parser.parse_query("a a a a a").unwrap().query_terms(&mut terms_set); query_parser
.parse_query("a a a a a")
.unwrap()
.query_terms(&mut terms_set);
let terms: Vec<&Term> = terms_set.iter().collect(); let terms: Vec<&Term> = terms_set.iter().collect();
assert_eq!(vec![&term_a], terms); assert_eq!(vec![&term_a], terms);
} }
{ {
let mut terms_set: BTreeSet<Term> = BTreeSet::new(); let mut terms_set: BTreeSet<Term> = BTreeSet::new();
query_parser.parse_query("a -b").unwrap().query_terms(&mut terms_set); query_parser
.parse_query("a -b")
.unwrap()
.query_terms(&mut terms_set);
let terms: Vec<&Term> = terms_set.iter().collect(); let terms: Vec<&Term> = terms_set.iter().collect();
assert_eq!(vec![&term_a, &term_b], terms); assert_eq!(vec![&term_a, &term_b], terms);
} }
} }
} }

View File

@@ -13,11 +13,13 @@ mod tests {
use collector::tests::TestCollector; use collector::tests::TestCollector;
use core::Index; use core::Index;
use error::TantivyError; use error::TantivyError;
use schema::{SchemaBuilder, Term, TEXT}; use schema::{Schema, Term, TEXT};
use tests::assert_nearly_equals; use tests::assert_nearly_equals;
use DocAddress;
use DocId;
fn create_index(texts: &[&'static str]) -> Index { fn create_index(texts: &[&'static str]) -> Index {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -47,16 +49,19 @@ mod tests {
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.searcher();
let test_query = |texts: Vec<&str>| { let test_query = |texts: Vec<&str>| {
let mut test_collector = TestCollector::default();
let terms: Vec<Term> = texts let terms: Vec<Term> = texts
.iter() .iter()
.map(|text| Term::from_field_text(text_field, text)) .map(|text| Term::from_field_text(text_field, text))
.collect(); .collect();
let phrase_query = PhraseQuery::new(terms); let phrase_query = PhraseQuery::new(terms);
searcher let test_fruits = searcher
.search(&phrase_query, &mut test_collector) .search(&phrase_query, &TestCollector)
.expect("search should succeed"); .expect("search should succeed");
test_collector.docs() test_fruits
.docs()
.iter()
.map(|docaddr| docaddr.1)
.collect::<Vec<_>>()
}; };
assert_eq!(test_query(vec!["a", "b", "c"]), vec![2, 4]); assert_eq!(test_query(vec!["a", "b", "c"]), vec![2, 4]);
assert_eq!(test_query(vec!["a", "b"]), vec![1, 2, 3, 4]); assert_eq!(test_query(vec!["a", "b"]), vec![1, 2, 3, 4]);
@@ -67,7 +72,7 @@ mod tests {
#[test] #[test]
pub fn test_phrase_query_no_positions() { pub fn test_phrase_query_no_positions() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
use schema::IndexRecordOption; use schema::IndexRecordOption;
use schema::TextFieldIndexing; use schema::TextFieldIndexing;
use schema::TextOptions; use schema::TextOptions;
@@ -91,9 +96,9 @@ mod tests {
Term::from_field_text(text_field, "a"), Term::from_field_text(text_field, "a"),
Term::from_field_text(text_field, "b"), Term::from_field_text(text_field, "b"),
]); ]);
let mut test_collector = TestCollector::default();
if let TantivyError::SchemaError(ref msg) = searcher if let TantivyError::SchemaError(ref msg) = searcher
.search(&phrase_query, &mut test_collector) .search(&phrase_query, &TestCollector)
.map(|_| ())
.unwrap_err() .unwrap_err()
{ {
assert_eq!( assert_eq!(
@@ -113,16 +118,16 @@ mod tests {
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.searcher();
let test_query = |texts: Vec<&str>| { let test_query = |texts: Vec<&str>| {
let mut test_collector = TestCollector::default();
let terms: Vec<Term> = texts let terms: Vec<Term> = texts
.iter() .iter()
.map(|text| Term::from_field_text(text_field, text)) .map(|text| Term::from_field_text(text_field, text))
.collect(); .collect();
let phrase_query = PhraseQuery::new(terms); let phrase_query = PhraseQuery::new(terms);
searcher searcher
.search(&phrase_query, &mut test_collector) .search(&phrase_query, &TestCollector)
.expect("search should succeed"); .expect("search should succeed")
test_collector.scores() .scores()
.to_vec()
}; };
let scores = test_query(vec!["a", "b"]); let scores = test_query(vec!["a", "b"]);
assert_nearly_equals(scores[0], 0.40618482); assert_nearly_equals(scores[0], 0.40618482);
@@ -131,51 +136,39 @@ mod tests {
#[test] // motivated by #234 #[test] // motivated by #234
pub fn test_phrase_query_docfreq_order() { pub fn test_phrase_query_docfreq_order() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{ index_writer.add_document(doc!(text_field=>"b"));
// 0 index_writer.add_document(doc!(text_field=>"a b"));
let doc = doc!(text_field=>"b"); index_writer.add_document(doc!(text_field=>"b a"));
index_writer.add_document(doc);
}
{
// 1
let doc = doc!(text_field=>"a b");
index_writer.add_document(doc);
}
{
// 2
let doc = doc!(text_field=>"b a");
index_writer.add_document(doc);
}
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.searcher();
let test_query = |texts: Vec<&str>| { let test_query = |texts: Vec<&str>| {
let mut test_collector = TestCollector::default();
let terms: Vec<Term> = texts let terms: Vec<Term> = texts
.iter() .iter()
.map(|text| Term::from_field_text(text_field, text)) .map(|text| Term::from_field_text(text_field, text))
.collect(); .collect();
let phrase_query = PhraseQuery::new(terms); let phrase_query = PhraseQuery::new(terms);
searcher searcher
.search(&phrase_query, &mut test_collector) .search(&phrase_query, &TestCollector)
.expect("search should succeed"); .expect("search should succeed")
test_collector.docs() .docs()
.to_vec()
}; };
assert_eq!(test_query(vec!["a", "b"]), vec![1]); assert_eq!(test_query(vec!["a", "b"]), vec![DocAddress(0, 1)]);
assert_eq!(test_query(vec!["b", "a"]), vec![2]); assert_eq!(test_query(vec!["b", "a"]), vec![DocAddress(0, 2)]);
} }
#[test] // motivated by #234 #[test] // motivated by #234
pub fn test_phrase_query_non_trivial_offsets() { pub fn test_phrase_query_non_trivial_offsets() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -187,16 +180,18 @@ mod tests {
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.searcher();
let test_query = |texts: Vec<(usize, &str)>| { let test_query = |texts: Vec<(usize, &str)>| {
let mut test_collector = TestCollector::default();
let terms: Vec<(usize, Term)> = texts let terms: Vec<(usize, Term)> = texts
.iter() .iter()
.map(|(offset, text)| (*offset, Term::from_field_text(text_field, text))) .map(|(offset, text)| (*offset, Term::from_field_text(text_field, text)))
.collect(); .collect();
let phrase_query = PhraseQuery::new_with_offset(terms); let phrase_query = PhraseQuery::new_with_offset(terms);
searcher searcher
.search(&phrase_query, &mut test_collector) .search(&phrase_query, &TestCollector)
.expect("search should succeed"); .expect("search should succeed")
test_collector.docs() .docs()
.iter()
.map(|doc_address| doc_address.1)
.collect::<Vec<DocId>>()
}; };
assert_eq!(test_query(vec![(0, "a"), (1, "b")]), vec![0]); assert_eq!(test_query(vec![(0, "a"), (1, "b")]), vec![0]);
assert_eq!(test_query(vec![(1, "b"), (0, "a")]), vec![0]); assert_eq!(test_query(vec![(1, "b"), (0, "a")]), vec![0]);

View File

@@ -134,7 +134,8 @@ impl<TPostings: Postings> PhraseScorer<TPostings> {
.into_iter() .into_iter()
.map(|(offset, postings)| { .map(|(offset, postings)| {
PostingsWithOffset::new(postings, (max_offset - offset) as u32) PostingsWithOffset::new(postings, (max_offset - offset) as u32)
}).collect::<Vec<_>>(); })
.collect::<Vec<_>>();
PhraseScorer { PhraseScorer {
intersection_docset: Intersection::new(postings_with_offsets), intersection_docset: Intersection::new(postings_with_offsets),
num_docsets, num_docsets,

View File

@@ -1,11 +1,9 @@
use super::Weight; use super::Weight;
use collector::Collector;
use core::searcher::Searcher; use core::searcher::Searcher;
use downcast; use downcast;
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::fmt; use std::fmt;
use Result; use Result;
use SegmentLocalId;
use Term; use Term;
/// The `Query` trait defines a set of documents and a scoring method /// The `Query` trait defines a set of documents and a scoring method
@@ -63,26 +61,6 @@ pub trait Query: QueryClone + downcast::Any + fmt::Debug {
/// Extract all of the terms associated to the query and insert them in the /// Extract all of the terms associated to the query and insert them in the
/// term set given in arguments. /// term set given in arguments.
fn query_terms(&self, _term_set: &mut BTreeSet<Term>) {} fn query_terms(&self, _term_set: &mut BTreeSet<Term>) {}
/// Search works as follows :
///
/// First the weight object associated to the query is created.
///
/// Then, the query loops over the segments and for each segment :
/// - setup the collector and informs it that the segment being processed has changed.
/// - creates a `Scorer` object associated for this segment
/// - iterate throw the matched documents and push them to the collector.
///
fn search(&self, searcher: &Searcher, collector: &mut Collector) -> Result<()> {
let scoring_enabled = collector.requires_scoring();
let weight = self.weight(searcher, scoring_enabled)?;
for (segment_ord, segment_reader) in searcher.segment_readers().iter().enumerate() {
collector.set_segment(segment_ord as SegmentLocalId, segment_reader)?;
let mut scorer = weight.scorer(segment_reader)?;
scorer.collect(collector, segment_reader.delete_bitset());
}
Ok(())
}
} }
pub trait QueryClone { pub trait QueryClone {
@@ -98,6 +76,26 @@ where
} }
} }
impl Query for Box<Query> {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result<Box<Weight>> {
self.as_ref().weight(searcher, scoring_enabled)
}
fn count(&self, searcher: &Searcher) -> Result<usize> {
self.as_ref().count(searcher)
}
fn query_terms(&self, term_set: &mut BTreeSet<Term<Vec<u8>>>) {
self.as_ref().query_terms(term_set);
}
}
impl QueryClone for Box<Query> {
fn box_clone(&self) -> Box<Query> {
self.as_ref().box_clone()
}
}
#[allow(missing_docs)] #[allow(missing_docs)]
mod downcast_impl { mod downcast_impl {
downcast!(super::Query); downcast!(super::Query);

View File

@@ -68,7 +68,8 @@ fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
.into_iter() .into_iter()
.flat_map(|(occur, child)| { .flat_map(|(occur, child)| {
trim_ast(child).map(|trimmed_child| (occur, trimmed_child)) trim_ast(child).map(|trimmed_child| (occur, trimmed_child))
}).collect::<Vec<_>>(); })
.collect::<Vec<_>>();
if trimmed_children.is_empty() { if trimmed_children.is_empty() {
None None
} else { } else {
@@ -128,6 +129,7 @@ fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
/// ///
/// * all docs query: A plain `*` will match all documents in the index. /// * all docs query: A plain `*` will match all documents in the index.
/// ///
#[derive(Clone)]
pub struct QueryParser { pub struct QueryParser {
schema: Schema, schema: Schema,
default_fields: Vec<Field>, default_fields: Vec<Field>,
@@ -421,7 +423,8 @@ impl QueryParser {
lower: self.resolve_bound(field, &lower)?, lower: self.resolve_bound(field, &lower)?,
upper: self.resolve_bound(field, &upper)?, upper: self.resolve_bound(field, &upper)?,
}))) })))
}).collect::<Result<Vec<_>, QueryParserError>>()?; })
.collect::<Result<Vec<_>, QueryParserError>>()?;
let result_ast = if clauses.len() == 1 { let result_ast = if clauses.len() == 1 {
clauses.pop().unwrap() clauses.pop().unwrap()
} else { } else {
@@ -484,12 +487,12 @@ mod test {
use query::Query; use query::Query;
use schema::Field; use schema::Field;
use schema::{IndexRecordOption, TextFieldIndexing, TextOptions}; use schema::{IndexRecordOption, TextFieldIndexing, TextOptions};
use schema::{SchemaBuilder, Term, INT_INDEXED, STORED, STRING, TEXT}; use schema::{Schema, Term, INT_INDEXED, STORED, STRING, TEXT};
use tokenizer::{LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager}; use tokenizer::{LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager};
use Index; use Index;
fn make_query_parser() -> QueryParser { fn make_query_parser() -> QueryParser {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field_indexing = TextFieldIndexing::default() let text_field_indexing = TextFieldIndexing::default()
.set_tokenizer("en_with_stop_words") .set_tokenizer("en_with_stop_words")
.set_index_option(IndexRecordOption::WithFreqsAndPositions); .set_index_option(IndexRecordOption::WithFreqsAndPositions);
@@ -597,25 +600,19 @@ mod test {
assert!(query_parser.parse_query("signed:2324").is_ok()); assert!(query_parser.parse_query("signed:2324").is_ok());
assert!(query_parser.parse_query("signed:\"22\"").is_ok()); assert!(query_parser.parse_query("signed:\"22\"").is_ok());
assert!(query_parser.parse_query("signed:\"-2234\"").is_ok()); assert!(query_parser.parse_query("signed:\"-2234\"").is_ok());
assert!( assert!(query_parser
query_parser .parse_query("signed:\"-9999999999999\"")
.parse_query("signed:\"-9999999999999\"") .is_ok());
.is_ok()
);
assert!(query_parser.parse_query("signed:\"a\"").is_err()); assert!(query_parser.parse_query("signed:\"a\"").is_err());
assert!(query_parser.parse_query("signed:\"2a\"").is_err()); assert!(query_parser.parse_query("signed:\"2a\"").is_err());
assert!( assert!(query_parser
query_parser .parse_query("signed:\"18446744073709551615\"")
.parse_query("signed:\"18446744073709551615\"") .is_err());
.is_err()
);
assert!(query_parser.parse_query("unsigned:\"2\"").is_ok()); assert!(query_parser.parse_query("unsigned:\"2\"").is_ok());
assert!(query_parser.parse_query("unsigned:\"-2\"").is_err()); assert!(query_parser.parse_query("unsigned:\"-2\"").is_err());
assert!( assert!(query_parser
query_parser .parse_query("unsigned:\"18446744073709551615\"")
.parse_query("unsigned:\"18446744073709551615\"") .is_ok());
.is_ok()
);
test_parse_query_to_logical_ast_helper( test_parse_query_to_logical_ast_helper(
"unsigned:2324", "unsigned:2324",
"Term([0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 9, 20])", "Term([0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 9, 20])",
@@ -720,7 +717,7 @@ mod test {
#[test] #[test]
pub fn test_unknown_tokenizer() { pub fn test_unknown_tokenizer() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field_indexing = TextFieldIndexing::default() let text_field_indexing = TextFieldIndexing::default()
.set_tokenizer("nonexistingtokenizer") .set_tokenizer("nonexistingtokenizer")
.set_index_option(IndexRecordOption::Basic); .set_index_option(IndexRecordOption::Basic);
@@ -738,7 +735,7 @@ mod test {
#[test] #[test]
pub fn test_query_parser_no_positions() { pub fn test_query_parser_no_positions() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field_indexing = TextFieldIndexing::default() let text_field_indexing = TextFieldIndexing::default()
.set_tokenizer("customtokenizer") .set_tokenizer("customtokenizer")
.set_index_option(IndexRecordOption::Basic); .set_index_option(IndexRecordOption::Basic);

View File

@@ -40,14 +40,13 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// # #[macro_use] /// # #[macro_use]
/// # extern crate tantivy; /// # extern crate tantivy;
/// # use tantivy::Index; /// # use tantivy::Index;
/// # use tantivy::schema::{SchemaBuilder, INT_INDEXED}; /// # use tantivy::schema::{Schema, INT_INDEXED};
/// # use tantivy::collector::CountCollector; /// # use tantivy::collector::Count;
/// # use tantivy::query::Query;
/// # use tantivy::Result; /// # use tantivy::Result;
/// # use tantivy::query::RangeQuery; /// # use tantivy::query::RangeQuery;
/// # /// #
/// # fn run() -> Result<()> { /// # fn run() -> Result<()> {
/// # let mut schema_builder = SchemaBuilder::new(); /// # let mut schema_builder = Schema::builder();
/// # let year_field = schema_builder.add_u64_field("year", INT_INDEXED); /// # let year_field = schema_builder.add_u64_field("year", INT_INDEXED);
/// # let schema = schema_builder.build(); /// # let schema = schema_builder.build();
/// # /// #
@@ -67,10 +66,7 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// ///
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970); /// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
/// ///
/// let mut count_collector = CountCollector::default(); /// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
/// docs_in_the_sixties.search(&searcher, &mut count_collector)?;
///
/// let num_60s_books = count_collector.count();
/// ///
/// # assert_eq!(num_60s_books, 2285); /// # assert_eq!(num_60s_books, 2285);
/// # Ok(()) /// # Ok(())
@@ -296,9 +292,8 @@ impl Weight for RangeWeight {
mod tests { mod tests {
use super::RangeQuery; use super::RangeQuery;
use collector::CountCollector; use collector::Count;
use query::Query; use schema::{Document, Field, Schema, INT_INDEXED};
use schema::{Document, Field, SchemaBuilder, INT_INDEXED};
use std::collections::Bound; use std::collections::Bound;
use Index; use Index;
use Result; use Result;
@@ -306,7 +301,7 @@ mod tests {
#[test] #[test]
fn test_range_query_simple() { fn test_range_query_simple() {
fn run() -> Result<()> { fn run() -> Result<()> {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let year_field = schema_builder.add_u64_field("year", INT_INDEXED); let year_field = schema_builder.add_u64_field("year", INT_INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -327,9 +322,8 @@ mod tests {
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64); let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64);
// ... or `1960..=1969` if inclusive range is enabled. // ... or `1960..=1969` if inclusive range is enabled.
let mut count_collector = CountCollector::default(); let count = searcher.search(&docs_in_the_sixties, &Count)?;
docs_in_the_sixties.search(&searcher, &mut count_collector)?; assert_eq!(count, 2285);
assert_eq!(count_collector.count(), 2285);
Ok(()) Ok(())
} }
@@ -340,7 +334,7 @@ mod tests {
fn test_range_query() { fn test_range_query() {
let int_field: Field; let int_field: Field;
let schema = { let schema = {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
int_field = schema_builder.add_i64_field("intfield", INT_INDEXED); int_field = schema_builder.add_i64_field("intfield", INT_INDEXED);
schema_builder.build() schema_builder.build()
}; };
@@ -363,11 +357,8 @@ mod tests {
} }
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.searcher();
let count_multiples = |range_query: RangeQuery| { let count_multiples =
let mut count_collector = CountCollector::default(); |range_query: RangeQuery| searcher.search(&range_query, &Count).unwrap();
range_query.search(&searcher, &mut count_collector).unwrap();
count_collector.count()
};
assert_eq!(count_multiples(RangeQuery::new_i64(int_field, 10..11)), 9); assert_eq!(count_multiples(RangeQuery::new_i64(int_field, 10..11)), 9);
assert_eq!( assert_eq!(

View File

@@ -16,14 +16,14 @@ use Searcher;
/// ```rust /// ```rust
/// #[macro_use] /// #[macro_use]
/// extern crate tantivy; /// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result, Term}; /// use tantivy::{Index, Result, Term};
/// use tantivy::collector::{CountCollector, TopCollector, chain}; /// use tantivy::collector::Count;
/// use tantivy::query::RegexQuery; /// use tantivy::query::RegexQuery;
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
@@ -47,19 +47,10 @@ use Searcher;
/// index.load_searchers()?; /// index.load_searchers()?;
/// let searcher = index.searcher(); /// let searcher = index.searcher();
/// ///
/// { /// let term = Term::from_field_text(title, "Diary");
/// let mut top_collector = TopCollector::with_limit(2); /// let query = RegexQuery::new("d[ai]{2}ry".to_string(), title);
/// let mut count_collector = CountCollector::default(); /// let count = searcher.search(&query, &Count)?;
/// { /// assert_eq!(count, 3);
/// let mut collectors = chain().push(&mut top_collector).push(&mut count_collector);
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::new("d[ai]{2}ry".to_string(), title);
/// searcher.search(&query, &mut collectors).unwrap();
/// }
/// assert_eq!(count_collector.count(), 3);
/// assert!(top_collector.at_capacity());
/// }
///
/// Ok(()) /// Ok(())
/// } /// }
/// ``` /// ```
@@ -95,15 +86,15 @@ impl Query for RegexQuery {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::RegexQuery; use super::RegexQuery;
use collector::TopCollector; use collector::TopDocs;
use schema::SchemaBuilder; use schema::Schema;
use schema::TEXT; use schema::TEXT;
use tests::assert_nearly_equals; use tests::assert_nearly_equals;
use Index; use Index;
#[test] #[test]
pub fn test_regex_query() { pub fn test_regex_query() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let country_field = schema_builder.add_text_field("country", TEXT); let country_field = schema_builder.add_text_field("country", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -120,20 +111,18 @@ mod test {
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.searcher();
{ {
let mut collector = TopCollector::with_limit(2);
let regex_query = RegexQuery::new("jap[ao]n".to_string(), country_field); let regex_query = RegexQuery::new("jap[ao]n".to_string(), country_field);
searcher.search(&regex_query, &mut collector).unwrap(); let scored_docs = searcher
let scored_docs = collector.top_docs(); .search(&regex_query, &TopDocs::with_limit(2))
.unwrap();
assert_eq!(scored_docs.len(), 1, "Expected only 1 document"); assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
let (score, _) = scored_docs[0]; let (score, _) = scored_docs[0];
assert_nearly_equals(1f32, score); assert_nearly_equals(1f32, score);
} }
{ let regex_query = RegexQuery::new("jap[A-Z]n".to_string(), country_field);
let mut collector = TopCollector::with_limit(2); let top_docs = searcher
let regex_query = RegexQuery::new("jap[A-Z]n".to_string(), country_field); .search(&regex_query, &TopDocs::with_limit(2))
searcher.search(&regex_query, &mut collector).unwrap(); .unwrap();
let scored_docs = collector.top_docs(); assert!(top_docs.is_empty(), "Expected ZERO document");
assert_eq!(scored_docs.len(), 0, "Expected ZERO document");
}
} }
} }

View File

@@ -1,8 +1,6 @@
use collector::Collector;
use common::BitSet; use common::BitSet;
use docset::{DocSet, SkipResult}; use docset::{DocSet, SkipResult};
use downcast; use downcast;
use fastfield::DeleteBitSet;
use std::ops::DerefMut; use std::ops::DerefMut;
use DocId; use DocId;
use Score; use Score;
@@ -16,20 +14,11 @@ pub trait Scorer: downcast::Any + DocSet + 'static {
/// This method will perform a bit of computation and is not cached. /// This method will perform a bit of computation and is not cached.
fn score(&mut self) -> Score; fn score(&mut self) -> Score;
/// Consumes the complete `DocSet` and /// Iterates through all of the document matched by the DocSet
/// push the scored documents to the collector. /// `DocSet` and push the scored documents to the collector.
fn collect(&mut self, collector: &mut Collector, delete_bitset_opt: Option<&DeleteBitSet>) { fn for_each(&mut self, callback: &mut FnMut(DocId, Score)) {
if let Some(delete_bitset) = delete_bitset_opt { while self.advance() {
while self.advance() { callback(self.doc(), self.score());
let doc = self.doc();
if !delete_bitset.is_deleted(doc) {
collector.collect(doc, self.score());
}
}
} else {
while self.advance() {
collector.collect(self.doc(), self.score());
}
} }
} }
} }
@@ -44,9 +33,9 @@ impl Scorer for Box<Scorer> {
self.deref_mut().score() self.deref_mut().score()
} }
fn collect(&mut self, collector: &mut Collector, delete_bitset: Option<&DeleteBitSet>) { fn for_each(&mut self, callback: &mut FnMut(DocId, Score)) {
let scorer = self.deref_mut(); let scorer = self.deref_mut();
scorer.collect(collector, delete_bitset); scorer.for_each(callback);
} }
} }

View File

@@ -9,17 +9,17 @@ pub use self::term_weight::TermWeight;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use collector::TopCollector; use collector::TopDocs;
use docset::DocSet; use docset::DocSet;
use query::{Query, QueryParser, Scorer, TermQuery}; use query::{Query, QueryParser, Scorer, TermQuery};
use schema::{IndexRecordOption, SchemaBuilder, STRING, TEXT}; use schema::{IndexRecordOption, Schema, STRING, TEXT};
use tests::assert_nearly_equals; use tests::assert_nearly_equals;
use Index; use Index;
use Term; use Term;
#[test] #[test]
pub fn test_term_query_no_freq() { pub fn test_term_query_no_freq() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING); let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -49,7 +49,7 @@ mod tests {
#[test] #[test]
pub fn test_term_weight() { pub fn test_term_weight() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let left_field = schema_builder.add_text_field("left", TEXT); let left_field = schema_builder.add_text_field("left", TEXT);
let right_field = schema_builder.add_text_field("right", TEXT); let right_field = schema_builder.add_text_field("right", TEXT);
let large_field = schema_builder.add_text_field("large", TEXT); let large_field = schema_builder.add_text_field("large", TEXT);
@@ -68,37 +68,35 @@ mod tests {
index.load_searchers().unwrap(); index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.searcher();
{ {
let mut collector = TopCollector::with_limit(2);
let term = Term::from_field_text(left_field, "left2"); let term = Term::from_field_text(left_field, "left2");
let term_query = TermQuery::new(term, IndexRecordOption::WithFreqs); let term_query = TermQuery::new(term, IndexRecordOption::WithFreqs);
searcher.search(&term_query, &mut collector).unwrap(); let topdocs = searcher
let scored_docs = collector.top_docs(); .search(&term_query, &TopDocs::with_limit(2))
assert_eq!(scored_docs.len(), 1); .unwrap();
let (score, _) = scored_docs[0]; assert_eq!(topdocs.len(), 1);
let (score, _) = topdocs[0];
assert_nearly_equals(0.77802235, score); assert_nearly_equals(0.77802235, score);
} }
{ {
let mut collector = TopCollector::with_limit(2);
let term = Term::from_field_text(left_field, "left1"); let term = Term::from_field_text(left_field, "left1");
let term_query = TermQuery::new(term, IndexRecordOption::WithFreqs); let term_query = TermQuery::new(term, IndexRecordOption::WithFreqs);
searcher.search(&term_query, &mut collector).unwrap(); let top_docs = searcher
let scored_docs = collector.top_docs(); .search(&term_query, &TopDocs::with_limit(2))
assert_eq!(scored_docs.len(), 2); .unwrap();
let (score1, _) = scored_docs[0]; assert_eq!(top_docs.len(), 2);
let (score1, _) = top_docs[0];
assert_nearly_equals(0.27101856, score1); assert_nearly_equals(0.27101856, score1);
let (score2, _) = scored_docs[1]; let (score2, _) = top_docs[1];
assert_nearly_equals(0.13736556, score2); assert_nearly_equals(0.13736556, score2);
} }
{ {
let query_parser = QueryParser::for_index(&index, vec![]); let query_parser = QueryParser::for_index(&index, vec![]);
let query = query_parser.parse_query("left:left2 left:left1").unwrap(); let query = query_parser.parse_query("left:left2 left:left1").unwrap();
let mut collector = TopCollector::with_limit(2); let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
searcher.search(&*query, &mut collector).unwrap(); assert_eq!(top_docs.len(), 2);
let scored_docs = collector.top_docs(); let (score1, _) = top_docs[0];
assert_eq!(scored_docs.len(), 2);
let (score1, _) = scored_docs[0];
assert_nearly_equals(0.9153879, score1); assert_nearly_equals(0.9153879, score1);
let (score2, _) = scored_docs[1]; let (score2, _) = top_docs[1];
assert_nearly_equals(0.27101856, score2); assert_nearly_equals(0.27101856, score2);
} }
} }

View File

@@ -21,14 +21,14 @@ use Term;
/// ```rust /// ```rust
/// #[macro_use] /// #[macro_use]
/// extern crate tantivy; /// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT, IndexRecordOption}; /// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
/// use tantivy::{Index, Result, Term}; /// use tantivy::{Index, Result, Term};
/// use tantivy::collector::{CountCollector, TopCollector, chain}; /// use tantivy::collector::{Count, TopDocs};
/// use tantivy::query::TermQuery; /// use tantivy::query::TermQuery;
/// ///
/// # fn main() { example().unwrap(); } /// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
@@ -52,20 +52,12 @@ use Term;
/// index.load_searchers()?; /// index.load_searchers()?;
/// let searcher = index.searcher(); /// let searcher = index.searcher();
/// ///
/// { /// let query = TermQuery::new(
/// let mut top_collector = TopCollector::with_limit(2); /// Term::from_field_text(title, "diary"),
/// let mut count_collector = CountCollector::default(); /// IndexRecordOption::Basic,
/// { /// );
/// let mut collectors = chain().push(&mut top_collector).push(&mut count_collector); /// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
/// let query = TermQuery::new( /// assert_eq!(count, 2);
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// );
/// searcher.search(&query, &mut collectors).unwrap();
/// }
/// assert_eq!(count_collector.count(), 2);
/// assert!(top_collector.at_capacity());
/// }
/// ///
/// Ok(()) /// Ok(())
/// } /// }

View File

@@ -55,7 +55,8 @@ where
None None
} }
}, },
).collect(); )
.collect();
Union { Union {
docsets: non_empty_docsets, docsets: non_empty_docsets,
bitsets: Box::new([TinySet::empty(); HORIZON_NUM_TINYBITSETS]), bitsets: Box::new([TinySet::empty(); HORIZON_NUM_TINYBITSETS]),
@@ -214,10 +215,7 @@ where
// The target is outside of the buffered horizon. // The target is outside of the buffered horizon.
// advance all docsets to a doc >= to the target. // advance all docsets to a doc >= to the target.
#[cfg_attr( #[cfg_attr(feature = "cargo-clippy", allow(clippy::clippy::collapsible_if))]
feature = "cargo-clippy",
allow(clippy::clippy::collapsible_if)
)]
unordered_drain_filter(&mut self.docsets, |docset| { unordered_drain_filter(&mut self.docsets, |docset| {
if docset.doc() < target { if docset.doc() < target {
if docset.skip_next(target) == SkipResult::End { if docset.skip_next(target) == SkipResult::End {

View File

@@ -6,7 +6,7 @@ use Result;
/// for a given set of segments. /// for a given set of segments.
/// ///
/// See [`Query`](./trait.Query.html). /// See [`Query`](./trait.Query.html).
pub trait Weight { pub trait Weight: Send + Sync + 'static {
/// Returns the scorer for the given segment. /// Returns the scorer for the given segment.
/// See [`Query`](./trait.Query.html). /// See [`Query`](./trait.Query.html).
fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>>; fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>>;

View File

@@ -161,7 +161,7 @@ mod tests {
#[test] #[test]
fn test_doc() { fn test_doc() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("title", TEXT); let text_field = schema_builder.add_text_field("title", TEXT);
let mut doc = Document::default(); let mut doc = Document::default();
doc.add_text(text_field, "My title"); doc.add_text(text_field, "My title");

View File

@@ -27,7 +27,7 @@ directory.
``` ```
use tantivy::schema::*; use tantivy::schema::*;
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let title_options = TextOptions::default() let title_options = TextOptions::default()
.set_stored() .set_stored()
.set_indexing_options(TextFieldIndexing::default() .set_indexing_options(TextFieldIndexing::default()
@@ -44,11 +44,11 @@ We can split the problem of generating a search result page into two phases :
the search results page. (`doc_ids[] -> Document[]`) the search results page. (`doc_ids[] -> Document[]`)
In the first phase, the ability to search for documents by the given field is determined by the In the first phase, the ability to search for documents by the given field is determined by the
[`TextIndexingOptions`](enum.TextIndexingOptions.html) of our [`TextOptions`] [`TextIndexingOptions`](enum.TextIndexingOptions.html) of our
(struct.TextOptions.html). [`TextOptions`](struct.TextOptions.html).
The effect of each possible setting is described more in detail [`TextIndexingOptions`] The effect of each possible setting is described more in detail
(enum.TextIndexingOptions.html). [`TextIndexingOptions`](enum.TextIndexingOptions.html).
On the other hand setting the field as stored or not determines whether the field should be returned On the other hand setting the field as stored or not determines whether the field should be returned
when [`searcher.doc(doc_address)`](../struct.Searcher.html#method.doc) is called. when [`searcher.doc(doc_address)`](../struct.Searcher.html#method.doc) is called.
@@ -62,7 +62,7 @@ The example can be rewritten :
``` ```
use tantivy::schema::*; use tantivy::schema::*;
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title_options", TEXT | STORED); schema_builder.add_text_field("title_options", TEXT | STORED);
let schema = schema_builder.build(); let schema = schema_builder.build();
``` ```
@@ -75,7 +75,7 @@ let schema = schema_builder.build();
``` ```
use tantivy::schema::*; use tantivy::schema::*;
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let num_stars_options = IntOptions::default() let num_stars_options = IntOptions::default()
.set_stored() .set_stored()
.set_indexed(); .set_indexed();

View File

@@ -23,13 +23,14 @@ use std::fmt;
/// ``` /// ```
/// use tantivy::schema::*; /// use tantivy::schema::*;
/// ///
/// let mut schema_builder = SchemaBuilder::default(); /// let mut schema_builder = Schema::builder();
/// let id_field = schema_builder.add_text_field("id", STRING); /// let id_field = schema_builder.add_text_field("id", STRING);
/// let title_field = schema_builder.add_text_field("title", TEXT); /// let title_field = schema_builder.add_text_field("title", TEXT);
/// let body_field = schema_builder.add_text_field("body", TEXT); /// let body_field = schema_builder.add_text_field("body", TEXT);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// ///
/// ``` /// ```
#[derive(Default)]
pub struct SchemaBuilder { pub struct SchemaBuilder {
fields: Vec<FieldEntry>, fields: Vec<FieldEntry>,
fields_map: HashMap<String, Field>, fields_map: HashMap<String, Field>,
@@ -120,15 +121,6 @@ impl SchemaBuilder {
} }
} }
impl Default for SchemaBuilder {
fn default() -> SchemaBuilder {
SchemaBuilder {
fields: Vec::new(),
fields_map: HashMap::new(),
}
}
}
struct InnerSchema { struct InnerSchema {
fields: Vec<FieldEntry>, fields: Vec<FieldEntry>,
fields_map: HashMap<String, Field>, // transient fields_map: HashMap<String, Field>, // transient
@@ -142,7 +134,6 @@ impl PartialEq for InnerSchema {
impl Eq for InnerSchema {} impl Eq for InnerSchema {}
/// Tantivy has a very strict schema. /// Tantivy has a very strict schema.
/// You need to specify in advance, whether a field is indexed or not, /// You need to specify in advance, whether a field is indexed or not,
/// stored or not, and RAM-based or not. /// stored or not, and RAM-based or not.
@@ -156,7 +147,7 @@ impl Eq for InnerSchema {}
/// ``` /// ```
/// use tantivy::schema::*; /// use tantivy::schema::*;
/// ///
/// let mut schema_builder = SchemaBuilder::default(); /// let mut schema_builder = Schema::builder();
/// let id_field = schema_builder.add_text_field("id", STRING); /// let id_field = schema_builder.add_text_field("id", STRING);
/// let title_field = schema_builder.add_text_field("title", TEXT); /// let title_field = schema_builder.add_text_field("title", TEXT);
/// let body_field = schema_builder.add_text_field("body", TEXT); /// let body_field = schema_builder.add_text_field("body", TEXT);
@@ -182,6 +173,11 @@ impl Schema {
&self.0.fields &self.0.fields
} }
/// Creates a new builder.
pub fn builder() -> SchemaBuilder {
SchemaBuilder::default()
}
/// Returns the field options associated with a given name. /// Returns the field options associated with a given name.
/// ///
/// # Panics /// # Panics
@@ -236,12 +232,14 @@ impl Schema {
let field_entry = self.get_field_entry(field); let field_entry = self.get_field_entry(field);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
match *json_value { match *json_value {
JsonValue::Array(ref json_items) => for json_item in json_items { JsonValue::Array(ref json_items) => {
let value = field_type for json_item in json_items {
.value_from_json(json_item) let value = field_type.value_from_json(json_item).map_err(|e| {
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?; DocParsingError::ValueError(field_name.clone(), e)
doc.add(FieldValue::new(field, value)); })?;
}, doc.add(FieldValue::new(field, value));
}
}
_ => { _ => {
let value = field_type let value = field_type
.value_from_json(json_value) .value_from_json(json_value)
@@ -327,7 +325,7 @@ mod tests {
#[test] #[test]
pub fn is_indexed_test() { pub fn is_indexed_test() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let field_str = schema_builder.add_text_field("field_str", STRING); let field_str = schema_builder.add_text_field("field_str", STRING);
let schema = schema_builder.build(); let schema = schema_builder.build();
assert!(schema.get_field_entry(field_str).is_indexed()); assert!(schema.get_field_entry(field_str).is_indexed());
@@ -335,7 +333,7 @@ mod tests {
#[test] #[test]
pub fn test_schema_serialization() { pub fn test_schema_serialization() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let count_options = IntOptions::default() let count_options = IntOptions::default()
.set_stored() .set_stored()
.set_fast(Cardinality::SingleValue); .set_fast(Cardinality::SingleValue);
@@ -404,7 +402,7 @@ mod tests {
#[test] #[test]
pub fn test_document_to_json() { pub fn test_document_to_json() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let count_options = IntOptions::default() let count_options = IntOptions::default()
.set_stored() .set_stored()
.set_fast(Cardinality::SingleValue); .set_fast(Cardinality::SingleValue);
@@ -425,7 +423,7 @@ mod tests {
#[test] #[test]
pub fn test_parse_document() { pub fn test_parse_document() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let count_options = IntOptions::default() let count_options = IntOptions::default()
.set_stored() .set_stored()
.set_fast(Cardinality::SingleValue); .set_fast(Cardinality::SingleValue);
@@ -450,7 +448,8 @@ mod tests {
"count": 4, "count": 4,
"popularity": 10 "popularity": 10
}"#, }"#,
).unwrap(); )
.unwrap();
assert_eq!(doc.get_first(title_field).unwrap().text(), Some("my title")); assert_eq!(doc.get_first(title_field).unwrap().text(), Some("my title"));
assert_eq!( assert_eq!(
doc.get_first(author_field).unwrap().text(), doc.get_first(author_field).unwrap().text(),

View File

@@ -201,7 +201,7 @@ mod tests {
#[test] #[test]
pub fn test_term() { pub fn test_term() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("text", STRING); schema_builder.add_text_field("text", STRING);
let title_field = schema_builder.add_text_field("title", STRING); let title_field = schema_builder.add_text_field("title", STRING);
let count_field = schema_builder.add_text_field("count", STRING); let count_field = schema_builder.add_text_field("count", STRING);

View File

@@ -141,7 +141,7 @@ mod tests {
assert!(field_options.get_indexing_options().is_some()); assert!(field_options.get_indexing_options().is_some());
} }
{ {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("body", TEXT); schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let field = schema.get_field("body").unwrap(); let field = schema.get_field("body").unwrap();

View File

@@ -22,6 +22,11 @@ impl HighlightSection {
fn new(start: usize, stop: usize) -> HighlightSection { fn new(start: usize, stop: usize) -> HighlightSection {
HighlightSection { start, stop } HighlightSection { start, stop }
} }
/// Returns the bounds of the `HighlightSection`.
pub fn bounds(&self) -> (usize, usize) {
(self.start, self.stop)
}
} }
#[derive(Debug)] #[derive(Debug)]
@@ -65,6 +70,8 @@ impl FragmentCandidate {
} }
} }
/// `Snippet`
/// Contains a fragment of a document, and some highlighed parts inside it.
#[derive(Debug)] #[derive(Debug)]
pub struct Snippet { pub struct Snippet {
fragments: String, fragments: String,
@@ -75,6 +82,7 @@ const HIGHLIGHTEN_PREFIX: &str = "<b>";
const HIGHLIGHTEN_POSTFIX: &str = "</b>"; const HIGHLIGHTEN_POSTFIX: &str = "</b>";
impl Snippet { impl Snippet {
/// Create a new, empty, `Snippet`
pub fn empty() -> Snippet { pub fn empty() -> Snippet {
Snippet { Snippet {
fragments: String::new(), fragments: String::new(),
@@ -99,6 +107,16 @@ impl Snippet {
)); ));
html html
} }
/// Returns a fragment from the `Snippet`.
pub fn fragments(&self) -> &str {
&self.fragments
}
/// Returns a list of higlighted positions from the `Snippet`.
pub fn highlighted(&self) -> &[HighlightSection] {
&self.highlighted
}
} }
/// Returns a non-empty list of "good" fragments. /// Returns a non-empty list of "good" fragments.
@@ -174,7 +192,8 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
item.start - fragment.start_offset, item.start - fragment.start_offset,
item.stop - fragment.start_offset, item.stop - fragment.start_offset,
) )
}).collect(); })
.collect();
Snippet { Snippet {
fragments: fragment_text.to_string(), fragments: fragment_text.to_string(),
highlighted, highlighted,
@@ -197,12 +216,12 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
/// # #[macro_use] /// # #[macro_use]
/// # extern crate tantivy; /// # extern crate tantivy;
/// # use tantivy::Index; /// # use tantivy::Index;
/// # use tantivy::schema::{SchemaBuilder, TEXT}; /// # use tantivy::schema::{Schema, TEXT};
/// # use tantivy::query::QueryParser; /// # use tantivy::query::QueryParser;
/// use tantivy::SnippetGenerator; /// use tantivy::SnippetGenerator;
/// ///
/// # fn main() -> tantivy::Result<()> { /// # fn main() -> tantivy::Result<()> {
/// # let mut schema_builder = SchemaBuilder::default(); /// # let mut schema_builder = Schema::builder();
/// # let text_field = schema_builder.add_text_field("text", TEXT); /// # let text_field = schema_builder.add_text_field("text", TEXT);
/// # let schema = schema_builder.build(); /// # let schema = schema_builder.build();
/// # let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
@@ -224,7 +243,7 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
/// let query = query_parser.parse_query("haleurs flamands").unwrap(); /// let query = query_parser.parse_query("haleurs flamands").unwrap();
/// # index.load_searchers()?; /// # index.load_searchers()?;
/// # let searcher = index.searcher(); /// # let searcher = index.searcher();
/// let mut snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field)?; /// let mut snippet_generator = SnippetGenerator::create(&searcher, &*query, text_field)?;
/// snippet_generator.set_max_num_chars(100); /// snippet_generator.set_max_num_chars(100);
/// let snippet = snippet_generator.snippet_from_doc(&doc); /// let snippet = snippet_generator.snippet_from_doc(&doc);
/// let snippet_html: String = snippet.to_html(); /// let snippet_html: String = snippet.to_html();
@@ -241,7 +260,7 @@ pub struct SnippetGenerator {
impl SnippetGenerator { impl SnippetGenerator {
/// Creates a new snippet generator /// Creates a new snippet generator
pub fn new(searcher: &Searcher, query: &Query, field: Field) -> Result<SnippetGenerator> { pub fn create(searcher: &Searcher, query: &Query, field: Field) -> Result<SnippetGenerator> {
let mut terms = BTreeSet::new(); let mut terms = BTreeSet::new();
query.query_terms(&mut terms); query.query_terms(&mut terms);
let terms_text: BTreeMap<String, f32> = terms let terms_text: BTreeMap<String, f32> = terms
@@ -306,7 +325,7 @@ impl SnippetGenerator {
mod tests { mod tests {
use super::{search_fragments, select_best_fragment_combination}; use super::{search_fragments, select_best_fragment_combination};
use query::QueryParser; use query::QueryParser;
use schema::{IndexRecordOption, SchemaBuilder, TextFieldIndexing, TextOptions, TEXT}; use schema::{IndexRecordOption, Schema, TextFieldIndexing, TextOptions, TEXT};
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::iter::Iterator; use std::iter::Iterator;
use tokenizer::{box_tokenizer, SimpleTokenizer}; use tokenizer::{box_tokenizer, SimpleTokenizer};
@@ -328,8 +347,6 @@ to the project are from community members.[15]
Rust won first place for "most loved programming language" in the Stack Overflow Developer Rust won first place for "most loved programming language" in the Stack Overflow Developer
Survey in 2016, 2017, and 2018."#; Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet() { fn test_snippet() {
let boxed_tokenizer = box_tokenizer(SimpleTokenizer); let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
@@ -345,13 +362,18 @@ Survey in 2016, 2017, and 2018."#;
assert_eq!(first.stop_offset, 89); assert_eq!(first.stop_offset, 89);
} }
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT); let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
assert_eq!(snippet.fragments, "Rust is a systems programming language sponsored by \ assert_eq!(
Mozilla which\ndescribes it as a \"safe"); snippet.fragments,
assert_eq!(snippet.to_html(), "<b>Rust</b> is a systems programming <b>language</b> \ "Rust is a systems programming language sponsored by \
sponsored by Mozilla which\ndescribes it as a &quot;safe") Mozilla which\ndescribes it as a \"safe"
);
assert_eq!(
snippet.to_html(),
"<b>Rust</b> is a systems programming <b>language</b> \
sponsored by Mozilla which\ndescribes it as a &quot;safe"
)
} }
#[test] #[test]
fn test_snippet_scored_fragment() { fn test_snippet_scored_fragment() {
let boxed_tokenizer = box_tokenizer(SimpleTokenizer); let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
@@ -385,10 +407,8 @@ Survey in 2016, 2017, and 2018."#;
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT); let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
assert_eq!(snippet.to_html(), "programming <b>language</b>") assert_eq!(snippet.to_html(), "programming <b>language</b>")
} }
} }
#[test] #[test]
fn test_snippet_in_second_fragment() { fn test_snippet_in_second_fragment() {
let boxed_tokenizer = box_tokenizer(SimpleTokenizer); let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
@@ -495,10 +515,9 @@ Survey in 2016, 2017, and 2018."#;
assert_eq!(snippet.to_html(), ""); assert_eq!(snippet.to_html(), "");
} }
#[test] #[test]
fn test_snippet_generator_term_score() { fn test_snippet_generator_term_score() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -515,29 +534,42 @@ Survey in 2016, 2017, and 2018."#;
let query_parser = QueryParser::for_index(&index, vec![text_field]); let query_parser = QueryParser::for_index(&index, vec![text_field]);
{ {
let query = query_parser.parse_query("e").unwrap(); let query = query_parser.parse_query("e").unwrap();
let snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap(); let snippet_generator =
SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
assert!(snippet_generator.terms_text().is_empty()); assert!(snippet_generator.terms_text().is_empty());
} }
{ {
let query = query_parser.parse_query("a").unwrap(); let query = query_parser.parse_query("a").unwrap();
let snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap(); let snippet_generator =
assert_eq!(&btreemap!("a".to_string() => 0.25f32), snippet_generator.terms_text()); SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
assert_eq!(
&btreemap!("a".to_string() => 0.25f32),
snippet_generator.terms_text()
);
} }
{ {
let query = query_parser.parse_query("a b").unwrap(); let query = query_parser.parse_query("a b").unwrap();
let snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap(); let snippet_generator =
assert_eq!(&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5), snippet_generator.terms_text()); SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
assert_eq!(
&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5),
snippet_generator.terms_text()
);
} }
{ {
let query = query_parser.parse_query("a b c").unwrap(); let query = query_parser.parse_query("a b c").unwrap();
let snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap(); let snippet_generator =
assert_eq!(&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5), snippet_generator.terms_text()); SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
assert_eq!(
&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5),
snippet_generator.terms_text()
);
} }
} }
#[test] #[test]
fn test_snippet_generator() { fn test_snippet_generator() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_options = TextOptions::default().set_indexing_options( let text_options = TextOptions::default().set_indexing_options(
TextFieldIndexing::default() TextFieldIndexing::default()
.set_tokenizer("en_stem") .set_tokenizer("en_stem")
@@ -559,7 +591,8 @@ Survey in 2016, 2017, and 2018."#;
let searcher = index.searcher(); let searcher = index.searcher();
let query_parser = QueryParser::for_index(&index, vec![text_field]); let query_parser = QueryParser::for_index(&index, vec![text_field]);
let query = query_parser.parse_query("rust design").unwrap(); let query = query_parser.parse_query("rust design").unwrap();
let mut snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap(); let mut snippet_generator =
SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
{ {
let snippet = snippet_generator.snippet(TEST_TEXT); let snippet = snippet_generator.snippet(TEST_TEXT);
assert_eq!(snippet.to_html(), "imperative-procedural paradigms. <b>Rust</b> is syntactically similar to C++[according to whom?],\nbut its <b>designers</b> intend it to provide better memory safety"); assert_eq!(snippet.to_html(), "imperative-procedural paradigms. <b>Rust</b> is syntactically similar to C++[according to whom?],\nbut its <b>designers</b> intend it to provide better memory safety");

View File

@@ -80,6 +80,7 @@ pub struct SegmentSpaceUsage {
} }
impl SegmentSpaceUsage { impl SegmentSpaceUsage {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new( pub(crate) fn new(
num_docs: u32, num_docs: u32,
termdict: PerFieldSpaceUsage, termdict: PerFieldSpaceUsage,
@@ -117,8 +118,8 @@ impl SegmentSpaceUsage {
/// Clones the underlying data. /// Clones the underlying data.
/// Use the components directly if this is somehow in performance critical code. /// Use the components directly if this is somehow in performance critical code.
pub fn component(&self, component: SegmentComponent) -> ComponentSpaceUsage { pub fn component(&self, component: SegmentComponent) -> ComponentSpaceUsage {
use SegmentComponent::*;
use self::ComponentSpaceUsage::*; use self::ComponentSpaceUsage::*;
use SegmentComponent::*;
match component { match component {
POSTINGS => PerField(self.postings().clone()), POSTINGS => PerField(self.postings().clone()),
POSITIONS => PerField(self.positions().clone()), POSITIONS => PerField(self.positions().clone()),
@@ -221,7 +222,7 @@ impl StoreSpaceUsage {
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PerFieldSpaceUsage { pub struct PerFieldSpaceUsage {
fields: HashMap<Field, FieldUsage>, fields: HashMap<Field, FieldUsage>,
total: ByteCount total: ByteCount,
} }
impl PerFieldSpaceUsage { impl PerFieldSpaceUsage {
@@ -265,7 +266,7 @@ impl FieldUsage {
} }
pub(crate) fn add_field_idx(&mut self, idx: usize, size: ByteCount) { pub(crate) fn add_field_idx(&mut self, idx: usize, size: ByteCount) {
if self.sub_num_bytes.len() < idx + 1{ if self.sub_num_bytes.len() < idx + 1 {
self.sub_num_bytes.resize(idx + 1, None); self.sub_num_bytes.resize(idx + 1, None);
} }
assert!(self.sub_num_bytes[idx].is_none()); assert!(self.sub_num_bytes[idx].is_none());
@@ -292,17 +293,17 @@ impl FieldUsage {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use core::Index; use core::Index;
use schema::SchemaBuilder;
use schema::{FAST, INT_INDEXED, TEXT};
use schema::Field; use schema::Field;
use schema::Schema;
use schema::STORED;
use schema::{FAST, INT_INDEXED, TEXT};
use space_usage::ByteCount; use space_usage::ByteCount;
use space_usage::PerFieldSpaceUsage; use space_usage::PerFieldSpaceUsage;
use schema::STORED;
use Term; use Term;
#[test] #[test]
fn test_empty() { fn test_empty() {
let schema = SchemaBuilder::new().build(); let schema = Schema::builder().build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
index.load_searchers().unwrap(); index.load_searchers().unwrap();
@@ -311,18 +312,26 @@ mod test {
assert_eq!(0, searcher_space_usage.total()); assert_eq!(0, searcher_space_usage.total());
} }
fn expect_single_field(field_space: &PerFieldSpaceUsage, field: &Field, min_size: ByteCount, max_size: ByteCount) { fn expect_single_field(
field_space: &PerFieldSpaceUsage,
field: &Field,
min_size: ByteCount,
max_size: ByteCount,
) {
assert!(field_space.total() >= min_size); assert!(field_space.total() >= min_size);
assert!(field_space.total() <= max_size); assert!(field_space.total() <= max_size);
assert_eq!( assert_eq!(
vec![(field, field_space.total())], vec![(field, field_space.total())],
field_space.fields().map(|(x,y)| (x, y.total())).collect::<Vec<_>>() field_space
.fields()
.map(|(x, y)| (x, y.total()))
.collect::<Vec<_>>()
); );
} }
#[test] #[test]
fn test_fast_indexed() { fn test_fast_indexed() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let name = schema_builder.add_u64_field("name", FAST | INT_INDEXED); let name = schema_builder.add_u64_field("name", FAST | INT_INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
@@ -354,13 +363,13 @@ mod test {
expect_single_field(segment.fast_fields(), &name, 1, 512); expect_single_field(segment.fast_fields(), &name, 1, 512);
expect_single_field(segment.fieldnorms(), &name, 1, 512); expect_single_field(segment.fieldnorms(), &name, 1, 512);
// TODO: understand why the following fails // TODO: understand why the following fails
// assert_eq!(0, segment.store().total()); // assert_eq!(0, segment.store().total());
assert_eq!(0, segment.deletes()); assert_eq!(0, segment.deletes());
} }
#[test] #[test]
fn test_text() { fn test_text() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let name = schema_builder.add_text_field("name", TEXT); let name = schema_builder.add_text_field("name", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
@@ -369,7 +378,9 @@ mod test {
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(name => "hi")); index_writer.add_document(doc!(name => "hi"));
index_writer.add_document(doc!(name => "this is a test")); index_writer.add_document(doc!(name => "this is a test"));
index_writer.add_document(doc!(name => "some more documents with some word overlap with the other test")); index_writer.add_document(
doc!(name => "some more documents with some word overlap with the other test"),
);
index_writer.add_document(doc!(name => "hello hi goodbye")); index_writer.add_document(doc!(name => "hello hi goodbye"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
@@ -392,13 +403,13 @@ mod test {
assert_eq!(0, segment.fast_fields().total()); assert_eq!(0, segment.fast_fields().total());
expect_single_field(segment.fieldnorms(), &name, 1, 512); expect_single_field(segment.fieldnorms(), &name, 1, 512);
// TODO: understand why the following fails // TODO: understand why the following fails
// assert_eq!(0, segment.store().total()); // assert_eq!(0, segment.store().total());
assert_eq!(0, segment.deletes()); assert_eq!(0, segment.deletes());
} }
#[test] #[test]
fn test_store() { fn test_store() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let name = schema_builder.add_text_field("name", STORED); let name = schema_builder.add_text_field("name", STORED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
@@ -407,7 +418,9 @@ mod test {
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(name => "hi")); index_writer.add_document(doc!(name => "hi"));
index_writer.add_document(doc!(name => "this is a test")); index_writer.add_document(doc!(name => "this is a test"));
index_writer.add_document(doc!(name => "some more documents with some word overlap with the other test")); index_writer.add_document(
doc!(name => "some more documents with some word overlap with the other test"),
);
index_writer.add_document(doc!(name => "hello hi goodbye")); index_writer.add_document(doc!(name => "hello hi goodbye"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
@@ -436,7 +449,7 @@ mod test {
#[test] #[test]
fn test_deletes() { fn test_deletes() {
let mut schema_builder = SchemaBuilder::new(); let mut schema_builder = Schema::builder();
let name = schema_builder.add_u64_field("name", INT_INDEXED); let name = schema_builder.add_u64_field("name", INT_INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
@@ -478,7 +491,7 @@ mod test {
assert_eq!(0, segment.fast_fields().total()); assert_eq!(0, segment.fast_fields().total());
expect_single_field(segment.fieldnorms(), &name, 1, 512); expect_single_field(segment.fieldnorms(), &name, 1, 512);
// TODO: understand why the following fails // TODO: understand why the following fails
// assert_eq!(0, segment.store().total()); // assert_eq!(0, segment.store().total());
assert!(segment.deletes() > 0); assert!(segment.deletes() > 0);
} }
} }

View File

@@ -56,12 +56,12 @@ pub mod tests {
use directory::{Directory, RAMDirectory, WritePtr}; use directory::{Directory, RAMDirectory, WritePtr};
use schema::Document; use schema::Document;
use schema::FieldValue; use schema::FieldValue;
use schema::Schema;
use schema::TextOptions; use schema::TextOptions;
use schema::{Schema, SchemaBuilder};
use std::path::Path; use std::path::Path;
pub fn write_lorem_ipsum_store(writer: WritePtr, num_docs: usize) -> Schema { pub fn write_lorem_ipsum_store(writer: WritePtr, num_docs: usize) -> Schema {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let field_body = schema_builder.add_text_field("body", TextOptions::default().set_stored()); let field_body = schema_builder.add_text_field("body", TextOptions::default().set_stored());
let field_title = let field_title =
schema_builder.add_text_field("title", TextOptions::default().set_stored()); schema_builder.add_text_field("title", TextOptions::default().set_stored());

View File

@@ -95,10 +95,7 @@ impl StoreReader {
} }
} }
#[cfg_attr( #[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))]
feature = "cargo-clippy",
allow(clippy::needless_pass_by_value)
)]
fn split_source(data: ReadOnlySource) -> (ReadOnlySource, ReadOnlySource, DocId) { fn split_source(data: ReadOnlySource) -> (ReadOnlySource, ReadOnlySource, DocId) {
let data_len = data.len(); let data_len = data.len();
let footer_offset = data_len - size_of::<u64>() - size_of::<u32>(); let footer_offset = data_len - size_of::<u64>() - size_of::<u32>();

View File

@@ -53,7 +53,8 @@ impl<'a> TermMerger<'a> {
.map(|(ord, streamer)| HeapItem { .map(|(ord, streamer)| HeapItem {
streamer, streamer,
segment_ord: ord, segment_ord: ord,
}).collect(), })
.collect(),
} }
} }
@@ -122,10 +123,7 @@ impl<'a> TermMerger<'a> {
} }
/// Iterates through terms /// Iterates through terms
#[cfg_attr( #[cfg_attr(feature = "cargo-clippy", allow(clippy::should_implement_trait))]
feature = "cargo-clippy",
allow(clippy::should_implement_trait)
)]
pub fn next(&mut self) -> Option<Term<&[u8]>> { pub fn next(&mut self) -> Option<Term<&[u8]>> {
if self.advance() { if self.advance() {
Some(Term::wrap(self.current_streamers[0].streamer.key())) Some(Term::wrap(self.current_streamers[0].streamer.key()))

View File

@@ -35,7 +35,7 @@ mod tests {
use core::Index; use core::Index;
use directory::{Directory, RAMDirectory, ReadOnlySource}; use directory::{Directory, RAMDirectory, ReadOnlySource};
use postings::TermInfo; use postings::TermInfo;
use schema::{Document, FieldType, SchemaBuilder, TEXT}; use schema::{Document, FieldType, Schema, TEXT};
use std::path::PathBuf; use std::path::PathBuf;
use std::str; use std::str;
@@ -66,7 +66,7 @@ mod tests {
let write = directory.open_write(&path).unwrap(); let write = directory.open_write(&path).unwrap();
let field_type = FieldType::Str(TEXT); let field_type = FieldType::Str(TEXT);
let mut term_dictionary_builder = let mut term_dictionary_builder =
TermDictionaryBuilder::new(write, &field_type).unwrap(); TermDictionaryBuilder::create(write, &field_type).unwrap();
for term in COUNTRIES.iter() { for term in COUNTRIES.iter() {
term_dictionary_builder term_dictionary_builder
.insert(term.as_bytes(), &make_term_info(0u64)) .insert(term.as_bytes(), &make_term_info(0u64))
@@ -92,7 +92,7 @@ mod tests {
let write = directory.open_write(&path).unwrap(); let write = directory.open_write(&path).unwrap();
let field_type = FieldType::Str(TEXT); let field_type = FieldType::Str(TEXT);
let mut term_dictionary_builder = let mut term_dictionary_builder =
TermDictionaryBuilder::new(write, &field_type).unwrap(); TermDictionaryBuilder::create(write, &field_type).unwrap();
term_dictionary_builder term_dictionary_builder
.insert("abc".as_bytes(), &make_term_info(34u64)) .insert("abc".as_bytes(), &make_term_info(34u64))
.unwrap(); .unwrap();
@@ -129,7 +129,7 @@ mod tests {
#[test] #[test]
fn test_term_iterator() { fn test_term_iterator() {
let mut schema_builder = SchemaBuilder::default(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
{ {
@@ -180,7 +180,7 @@ mod tests {
let field_type = FieldType::Str(TEXT); let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = let mut term_dictionary_builder =
TermDictionaryBuilder::new(vec![], &field_type).unwrap(); TermDictionaryBuilder::create(vec![], &field_type).unwrap();
for &(ref id, ref i) in &ids { for &(ref id, ref i) in &ids {
term_dictionary_builder term_dictionary_builder
.insert(id.as_bytes(), &make_term_info(*i as u64)) .insert(id.as_bytes(), &make_term_info(*i as u64))
@@ -210,7 +210,7 @@ mod tests {
let field_type = FieldType::Str(TEXT); let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = let mut term_dictionary_builder =
TermDictionaryBuilder::new(vec![], &field_type).unwrap(); TermDictionaryBuilder::create(vec![], &field_type).unwrap();
// term requires more than 16bits // term requires more than 16bits
term_dictionary_builder term_dictionary_builder
.insert("abcdefghijklmnopqrstuvwxy", &make_term_info(1)) .insert("abcdefghijklmnopqrstuvwxy", &make_term_info(1))
@@ -245,7 +245,7 @@ mod tests {
let field_type = FieldType::Str(TEXT); let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = let mut term_dictionary_builder =
TermDictionaryBuilder::new(vec![], &field_type).unwrap(); TermDictionaryBuilder::create(vec![], &field_type).unwrap();
for &(ref id, ref i) in &ids { for &(ref id, ref i) in &ids {
term_dictionary_builder term_dictionary_builder
.insert(id.as_bytes(), &make_term_info(*i as u64)) .insert(id.as_bytes(), &make_term_info(*i as u64))
@@ -314,7 +314,7 @@ mod tests {
let field_type = FieldType::Str(TEXT); let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = let mut term_dictionary_builder =
TermDictionaryBuilder::new(vec![], &field_type).unwrap(); TermDictionaryBuilder::create(vec![], &field_type).unwrap();
term_dictionary_builder term_dictionary_builder
.insert(&[], &make_term_info(1 as u64)) .insert(&[], &make_term_info(1 as u64))
.unwrap(); .unwrap();
@@ -338,7 +338,7 @@ mod tests {
let field_type = FieldType::Str(TEXT); let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = let mut term_dictionary_builder =
TermDictionaryBuilder::new(vec![], &field_type).unwrap(); TermDictionaryBuilder::create(vec![], &field_type).unwrap();
for i in 0u8..10u8 { for i in 0u8..10u8 {
let number_arr = [i; 1]; let number_arr = [i; 1];
term_dictionary_builder term_dictionary_builder
@@ -408,7 +408,7 @@ mod tests {
let write = directory.open_write(&path).unwrap(); let write = directory.open_write(&path).unwrap();
let field_type = FieldType::Str(TEXT); let field_type = FieldType::Str(TEXT);
let mut term_dictionary_builder = let mut term_dictionary_builder =
TermDictionaryBuilder::new(write, &field_type).unwrap(); TermDictionaryBuilder::create(write, &field_type).unwrap();
for term in COUNTRIES.iter() { for term in COUNTRIES.iter() {
term_dictionary_builder term_dictionary_builder
.insert(term.as_bytes(), &make_term_info(0u64)) .insert(term.as_bytes(), &make_term_info(0u64))

View File

@@ -132,10 +132,7 @@ where
} }
/// Return the next `(key, value)` pair. /// Return the next `(key, value)` pair.
#[cfg_attr( #[cfg_attr(feature = "cargo-clippy", allow(clippy::should_implement_trait))]
feature = "cargo-clippy",
allow(clippy::should_implement_trait)
)]
pub fn next(&mut self) -> Option<(&[u8], &TermInfo)> { pub fn next(&mut self) -> Option<(&[u8], &TermInfo)> {
if self.advance() { if self.advance() {
Some((self.key(), self.value())) Some((self.key(), self.value()))

View File

@@ -29,7 +29,7 @@ where
W: Write, W: Write,
{ {
/// Creates a new `TermDictionaryBuilder` /// Creates a new `TermDictionaryBuilder`
pub fn new(w: W, _field_type: &FieldType) -> io::Result<Self> { pub fn create(w: W, _field_type: &FieldType) -> io::Result<Self> {
let fst_builder = fst::MapBuilder::new(w).map_err(convert_fst_error)?; let fst_builder = fst::MapBuilder::new(w).map_err(convert_fst_error)?;
Ok(TermDictionaryBuilder { Ok(TermDictionaryBuilder {
fst_builder, fst_builder,
@@ -132,7 +132,7 @@ impl TermDictionary {
/// Creates an empty term dictionary which contains no terms. /// Creates an empty term dictionary which contains no terms.
pub fn empty(field_type: &FieldType) -> Self { pub fn empty(field_type: &FieldType) -> Self {
let term_dictionary_data: Vec<u8> = let term_dictionary_data: Vec<u8> =
TermDictionaryBuilder::new(Vec::<u8>::new(), &field_type) TermDictionaryBuilder::create(Vec::<u8>::new(), &field_type)
.expect("Creating a TermDictionaryBuilder in a Vec<u8> should never fail") .expect("Creating a TermDictionaryBuilder in a Vec<u8> should never fail")
.finish() .finish()
.expect("Writing in a Vec<u8> should never fail"); .expect("Writing in a Vec<u8> should never fail");

View File

@@ -9,7 +9,7 @@
//! use tantivy::schema::*; //! use tantivy::schema::*;
//! //!
//! # fn main() { //! # fn main() {
//! let mut schema_builder = SchemaBuilder::new(); //! let mut schema_builder = Schema::builder();
//! //!
//! let text_options = TextOptions::default() //! let text_options = TextOptions::default()
//! .set_indexing_options( //! .set_indexing_options(
@@ -82,12 +82,12 @@
//! //!
//! ``` //! ```
//! # extern crate tantivy; //! # extern crate tantivy;
//! # use tantivy::schema::SchemaBuilder; //! # use tantivy::schema::Schema;
//! # use tantivy::tokenizer::*; //! # use tantivy::tokenizer::*;
//! # use tantivy::Index; //! # use tantivy::Index;
//! # fn main() { //! # fn main() {
//! # let custom_en_tokenizer = SimpleTokenizer; //! # let custom_en_tokenizer = SimpleTokenizer;
//! # let schema = SchemaBuilder::new().build(); //! # let schema = Schema::builder().build();
//! let index = Index::create_in_ram(schema); //! let index = Index::create_in_ram(schema);
//! index.tokenizers() //! index.tokenizers()
//! .register("custom_en", custom_en_tokenizer); //! .register("custom_en", custom_en_tokenizer);
@@ -101,12 +101,12 @@
//! //!
//! ``` //! ```
//! extern crate tantivy; //! extern crate tantivy;
//! use tantivy::schema::{SchemaBuilder, IndexRecordOption, TextOptions, TextFieldIndexing}; //! use tantivy::schema::{Schema, IndexRecordOption, TextOptions, TextFieldIndexing};
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! use tantivy::Index; //! use tantivy::Index;
//! //!
//! # fn main() { //! # fn main() {
//! let mut schema_builder = SchemaBuilder::new(); //! let mut schema_builder = Schema::builder();
//! let text_field_indexing = TextFieldIndexing::default() //! let text_field_indexing = TextFieldIndexing::default()
//! .set_tokenizer("custom_en") //! .set_tokenizer("custom_en")
//! .set_index_option(IndexRecordOption::WithFreqsAndPositions); //! .set_index_option(IndexRecordOption::WithFreqsAndPositions);
@@ -157,13 +157,11 @@ pub use self::tokenizer::BoxedTokenizer;
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer}; pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
pub use self::tokenizer_manager::TokenizerManager; pub use self::tokenizer_manager::TokenizerManager;
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::Token; use super::Token;
use super::TokenizerManager; use super::TokenizerManager;
/// This is a function that can be used in tests and doc tests /// This is a function that can be used in tests and doc tests
/// to assert a token's correctness. /// to assert a token's correctness.
pub fn assert_token(token: &Token, position: usize, text: &str, from: usize, to: usize) { pub fn assert_token(token: &Token, position: usize, text: &str, from: usize, to: usize) {

View File

@@ -108,7 +108,7 @@ impl NgramTokenizer {
/// Create a `NGramTokenizer` which generates tokens for all inner ngrams. /// Create a `NGramTokenizer` which generates tokens for all inner ngrams.
/// ///
/// This is as opposed to only prefix ngrams . /// This is as opposed to only prefix ngrams .
pub fn all_ngrams(min_gram: usize, max_gram:usize) -> NgramTokenizer { pub fn all_ngrams(min_gram: usize, max_gram: usize) -> NgramTokenizer {
Self::new(min_gram, max_gram, false) Self::new(min_gram, max_gram, false)
} }
@@ -137,9 +137,10 @@ impl<'a> Tokenizer<'a> for NgramTokenizer {
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl { fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
NgramTokenStream { NgramTokenStream {
ngram_charidx_iterator: StutteringIterator::new( ngram_charidx_iterator: StutteringIterator::new(
CodepointFrontiers::for_str(text), CodepointFrontiers::for_str(text),
self.min_gram, self.min_gram,
self.max_gram), self.max_gram,
),
prefix_only: self.prefix_only, prefix_only: self.prefix_only,
text, text,
token: Token::default(), token: Token::default(),
@@ -172,7 +173,6 @@ impl<'a> TokenStream for NgramTokenStream<'a> {
} }
} }
/// This iterator takes an underlying Iterator /// This iterator takes an underlying Iterator
/// and emits all of the pairs `(a,b)` such that /// and emits all of the pairs `(a,b)` such that
/// a and b are items emitted by the iterator at /// a and b are items emitted by the iterator at
@@ -190,11 +190,13 @@ struct StutteringIterator<T> {
memory: Vec<usize>, memory: Vec<usize>,
cursor: usize, cursor: usize,
gram_len: usize gram_len: usize,
} }
impl<T> StutteringIterator<T> impl<T> StutteringIterator<T>
where T: Iterator<Item=usize> { where
T: Iterator<Item = usize>,
{
pub fn new(mut underlying: T, min_gram: usize, max_gram: usize) -> StutteringIterator<T> { pub fn new(mut underlying: T, min_gram: usize, max_gram: usize) -> StutteringIterator<T> {
assert!(min_gram > 0); assert!(min_gram > 0);
let memory: Vec<usize> = (&mut underlying).take(max_gram + 1).collect(); let memory: Vec<usize> = (&mut underlying).take(max_gram + 1).collect();
@@ -222,7 +224,9 @@ impl<T> StutteringIterator<T>
} }
impl<T> Iterator for StutteringIterator<T> impl<T> Iterator for StutteringIterator<T>
where T: Iterator<Item=usize> { where
T: Iterator<Item = usize>,
{
type Item = (usize, usize); type Item = (usize, usize);
fn next(&mut self) -> Option<(usize, usize)> { fn next(&mut self) -> Option<(usize, usize)> {
@@ -230,7 +234,7 @@ impl<T> Iterator for StutteringIterator<T>
// we have exhausted all options // we have exhausted all options
// starting at `self.memory[self.cursor]`. // starting at `self.memory[self.cursor]`.
// //
// Time to advance. // Time to advance.
self.gram_len = self.min_gram; self.gram_len = self.min_gram;
if let Some(next_val) = self.underlying.next() { if let Some(next_val) = self.underlying.next() {
self.memory[self.cursor] = next_val; self.memory[self.cursor] = next_val;
@@ -252,22 +256,20 @@ impl<T> Iterator for StutteringIterator<T>
} }
} }
/// Emits all of the offsets where a codepoint starts /// Emits all of the offsets where a codepoint starts
/// or a codepoint ends. /// or a codepoint ends.
/// ///
/// By convention, we emit [0] for the empty string. /// By convention, we emit [0] for the empty string.
struct CodepointFrontiers<'a> { struct CodepointFrontiers<'a> {
s: &'a str, s: &'a str,
next_el: Option<usize> next_el: Option<usize>,
} }
impl<'a> CodepointFrontiers<'a> { impl<'a> CodepointFrontiers<'a> {
fn for_str(s: &'a str) -> Self { fn for_str(s: &'a str) -> Self {
CodepointFrontiers { CodepointFrontiers {
s, s,
next_el: Some(0) next_el: Some(0),
} }
} }
} }
@@ -276,26 +278,20 @@ impl<'a> Iterator for CodepointFrontiers<'a> {
type Item = usize; type Item = usize;
fn next(&mut self) -> Option<usize> { fn next(&mut self) -> Option<usize> {
self.next_el self.next_el.map(|offset| {
.map(|offset| { if self.s.is_empty() {
if self.s.is_empty() { self.next_el = None;
self.next_el = None; } else {
} else { let first_codepoint_width = utf8_codepoint_width(self.s.as_bytes()[0]);
let first_codepoint_width = utf8_codepoint_width(self.s.as_bytes()[0]); self.s = &self.s[first_codepoint_width..];
self.s = &self.s[first_codepoint_width..]; self.next_el = Some(offset + first_codepoint_width);
self.next_el = Some(offset + first_codepoint_width); }
} offset
offset })
})
} }
} }
const CODEPOINT_UTF8_WIDTH: [u8; 16] = [ const CODEPOINT_UTF8_WIDTH: [u8; 16] = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 4];
1, 1, 1, 1,
1, 1, 1, 1,
2, 2, 2, 2,
2, 2, 3, 4,
];
// Number of bytes to encode a codepoint in UTF-8 given // Number of bytes to encode a codepoint in UTF-8 given
// the first byte. // the first byte.
@@ -309,13 +305,13 @@ fn utf8_codepoint_width(b: u8) -> usize {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use tokenizer::tokenizer::{TokenStream, Tokenizer};
use super::NgramTokenizer;
use tokenizer::Token;
use tokenizer::tests::assert_token;
use super::CodepointFrontiers;
use super::StutteringIterator;
use super::utf8_codepoint_width; use super::utf8_codepoint_width;
use super::CodepointFrontiers;
use super::NgramTokenizer;
use super::StutteringIterator;
use tokenizer::tests::assert_token;
use tokenizer::tokenizer::{TokenStream, Tokenizer};
use tokenizer::Token;
fn test_helper<T: TokenStream>(mut tokenizer: T) -> Vec<Token> { fn test_helper<T: TokenStream>(mut tokenizer: T) -> Vec<Token> {
let mut tokens: Vec<Token> = vec![]; let mut tokens: Vec<Token> = vec![];
@@ -323,7 +319,6 @@ mod tests {
tokens tokens
} }
#[test] #[test]
fn test_utf8_codepoint_width() { fn test_utf8_codepoint_width() {
// 0xxx // 0xxx
@@ -344,17 +339,16 @@ mod tests {
} }
} }
#[test] #[test]
fn test_codepoint_frontiers() { fn test_codepoint_frontiers() {
assert_eq!(CodepointFrontiers::for_str("").collect::<Vec<_>>(), vec![0]); assert_eq!(CodepointFrontiers::for_str("").collect::<Vec<_>>(), vec![0]);
assert_eq!( assert_eq!(
CodepointFrontiers::for_str("abcd").collect::<Vec<_>>(), CodepointFrontiers::for_str("abcd").collect::<Vec<_>>(),
vec![0,1,2,3,4] vec![0, 1, 2, 3, 4]
); );
assert_eq!( assert_eq!(
CodepointFrontiers::for_str("aあ").collect::<Vec<_>>(), CodepointFrontiers::for_str("aあ").collect::<Vec<_>>(),
vec![0,1,4] vec![0, 1, 4]
); );
} }
@@ -425,7 +419,6 @@ mod tests {
assert!(tokens.is_empty()); assert!(tokens.is_empty());
} }
#[test] #[test]
#[should_panic(expected = "min_gram must be greater than 0")] #[should_panic(expected = "min_gram must be greater than 0")]
fn test_ngram_min_max_interval_empty() { fn test_ngram_min_max_interval_empty() {
@@ -438,7 +431,6 @@ mod tests {
NgramTokenizer::all_ngrams(2, 1); NgramTokenizer::all_ngrams(2, 1);
} }
#[test] #[test]
fn test_stutterring_iterator_empty() { fn test_stutterring_iterator_empty() {
let rg: Vec<usize> = vec![0]; let rg: Vec<usize> = vec![0];
@@ -470,4 +462,4 @@ mod tests {
assert_eq!(it.next(), None); assert_eq!(it.next(), None);
} }
} }