Compare commits

...

14 Commits

Author SHA1 Message Date
Paul Masurel
f3099a83eb Blop 2018-12-24 11:41:18 +09:00
Paul Masurel
f745bb9d2a blop 2018-12-24 11:28:08 +09:00
Paul Masurel
d9417acbc6 done 2018-12-11 09:01:45 +09:00
Paul Masurel
38540c3826 small step 2018-12-09 15:26:19 +09:00
fdb-hiroshima
21a24672d8 Add accessors for Snippet and HighlightSection (#448)
* Add accessors for Snippet and HighlightSection

And add an example of custom highlighter

* Remove inline(always) and unnecessary empty lines
2018-12-02 18:00:16 +09:00
dependabot[bot]
a3f1fbaae6 Update scoped-pool requirement from 0.1 to 1.0 (#447)
Updates the requirements on [scoped-pool](https://github.com/reem/rust-scoped-pool) to permit the latest version.
- [Release notes](https://github.com/reem/rust-scoped-pool/releases)
- [Commits](https://github.com/reem/rust-scoped-pool/commits/1.0.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-12-01 13:54:59 +09:00
Paul Masurel
a6e767c877 Cargo fmt 2018-11-30 22:52:45 +09:00
Paul Masurel
6af0488dbe Executor made sorted 2018-11-30 22:52:26 +09:00
Paul Masurel
07d87e154b Collector refactoring and multithreaded search (#437)
* Split Collector into an overall Collector and a per-segment SegmentCollector. Precursor to cross-segment parallelism, and as a side benefit cleans up any per-segment fields from being Option<T> to just T.

* Attempt to add MultiCollector back

* working. Chained collector is broken though

* Fix chained collector

* Fix test

* Make Weight Send+Sync for parallelization purposes

* Expose parameters of RangeQuery for external usage

* Removed &mut self

* fixing tests

* Restored TestCollectors

* blop

* multicollector working

* chained collector working

* test broken

* fixing unit test

* blop

* blop

* Blop

* simplifying APi

* blop

* better syntax

* Simplifying top_collector

* refactoring

* blop

* Sync with master

* Added multithread search

* Collector refactoring

* Schema::builder

* CR and rustdoc

* CR comments

* blop

* Added an executor

* Sorted the segment readers in the searcher

* Update searcher.rs

* Fixed unit testst

* changed the place where we have the sort-segment-by-count heuristic

* using crossbeam::channel

* inlining

* Comments about panics propagating

* Added unit test for executor panicking

* Readded default

* Removed Default impl

* Added unit test for executor
2018-11-30 22:46:59 +09:00
Paul Masurel
8b0b0133dd Importing crossbeam_channel from crossbeam reexport. 2018-11-19 09:19:28 +09:00
dependabot[bot]
7b9752f897 Update crossbeam-channel requirement from 0.2 to 0.3 (#436)
* Update crossbeam-channel requirement from 0.2 to 0.3

Updates the requirements on [crossbeam-channel](https://github.com/crossbeam-rs/crossbeam-channel) to permit the latest version.
- [Release notes](https://github.com/crossbeam-rs/crossbeam-channel/releases)
- [Changelog](https://github.com/crossbeam-rs/crossbeam-channel/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crossbeam-rs/crossbeam-channel/commits/v0.3.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>

* fixing build
2018-11-16 14:26:59 +09:00
dependabot[bot]
c92f41aea8 Update rand requirement from 0.5 to 0.6 (#440)
* Update rand requirement from 0.5 to 0.6

Updates the requirements on [rand](https://github.com/rust-random/rand) to permit the latest version.
- [Release notes](https://github.com/rust-random/rand/releases)
- [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-random/rand/commits)

Signed-off-by: dependabot[bot] <support@dependabot.com>

* Updating rand.
2018-11-16 12:38:01 +09:00
Do Duy
dea16f1d9d Derive Clone for QueryParser (#442) 2018-11-15 18:45:40 +09:00
dependabot[bot]
236cfbec08 Update crossbeam requirement from 0.4 to 0.5 (#438)
Updates the requirements on [crossbeam](https://github.com/crossbeam-rs/crossbeam) to permit the latest version.
- [Release notes](https://github.com/crossbeam-rs/crossbeam/releases)
- [Changelog](https://github.com/crossbeam-rs/crossbeam/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crossbeam-rs/crossbeam/commits/crossbeam-0.5.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-11-15 06:16:22 +09:00
73 changed files with 2734 additions and 1642 deletions

View File

@@ -1,5 +1,13 @@
Tantivy 0.8.1
=====================
*No change in the index format*
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
- Multithreaded search (@jwolfe, @fulmicoton)
Tantivy 0.7.1
=====================
*No change in the index format*
- Bugfix: NGramTokenizer panics on non ascii chars
- Added a space usage API

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy"
version = "0.7.1"
version = "0.8.0-dev"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
@@ -33,8 +33,7 @@ itertools = "0.7"
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
bit-set = "0.5"
uuid = { version = "0.7", features = ["v4", "serde"] }
crossbeam = "0.4"
crossbeam-channel = "0.2"
crossbeam = "0.5"
futures = "0.1"
futures-cpupool = "0.1"
owning_ref = "0.4"
@@ -49,12 +48,14 @@ owned-read = "0.4"
failure = "0.1"
htmlescape = "0.3.1"
fail = "0.2"
scoped-pool = "1.0"
aho-corasick = "0.6"
[target.'cfg(windows)'.dependencies]
winapi = "0.2"
[dev-dependencies]
rand = "0.5"
rand = "0.6"
maplit = "1"
[profile.release]

View File

@@ -16,10 +16,11 @@ extern crate tempdir;
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopCollector;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::Index;
use tempdir::TempDir;
fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the
@@ -34,7 +35,7 @@ fn main() -> tantivy::Result<()> {
// be indexed".
// first we need to define a schema ...
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
// Our first field is title.
// We want full-text search for it, and we also want
@@ -212,15 +213,10 @@ fn main() -> tantivy::Result<()> {
//
// We are not interested in all of the documents but
// only in the top 10. Keeping track of our top 10 best documents
// is the role of the TopCollector.
let mut top_collector = TopCollector::with_limit(10);
// is the role of the TopDocs.
// We can now perform our query.
searcher.search(&*query, &mut top_collector)?;
// Our top collector now contains the 10
// most relevant doc ids...
let doc_addresses = top_collector.docs();
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
// The actual documents still need to be
// retrieved from Tantivy's store.
@@ -229,12 +225,10 @@ fn main() -> tantivy::Result<()> {
// the document returned will only contain
// a title.
for doc_address in doc_addresses {
for (_score, doc_address) in top_docs {
let retrieved_doc = searcher.doc(doc_address)?;
println!("{}", schema.to_json(&retrieved_doc));
}
Ok(())
}
use tempdir::TempDir;

View File

@@ -0,0 +1,187 @@
// # Custom collector example
//
// This example shows how you can implement your own
// collector. As an example, we will compute a collector
// that computes the standard deviation of a given fast field.
//
// Of course, you can have a look at the tantivy's built-in collectors
// such as the `CountCollector` for more examples.
extern crate tempdir;
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::{Collector, SegmentCollector};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser;
use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INT_INDEXED, TEXT};
use tantivy::Index;
use tantivy::SegmentReader;
#[derive(Default)]
struct Stats {
count: usize,
sum: f64,
squared_sum: f64,
}
impl Stats {
pub fn count(&self) -> usize {
self.count
}
pub fn mean(&self) -> f64 {
self.sum / (self.count as f64)
}
fn square_mean(&self) -> f64 {
self.squared_sum / (self.count as f64)
}
pub fn standard_deviation(&self) -> f64 {
let mean = self.mean();
(self.square_mean() - mean * mean).sqrt()
}
fn non_zero_count(self) -> Option<Stats> {
if self.count == 0 {
None
} else {
Some(self)
}
}
}
struct StatsCollector {
field: Field,
}
impl StatsCollector {
fn with_field(field: Field) -> StatsCollector {
StatsCollector { field }
}
}
impl Collector for StatsCollector {
// That's the type of our result.
// Our standard deviation will be a float.
type Fruit = Option<Stats>;
type Child = StatsSegmentCollector;
fn for_segment(
&self,
_segment_local_id: u32,
segment: &SegmentReader,
) -> tantivy::Result<StatsSegmentCollector> {
let fast_field_reader = segment.fast_field_reader(self.field)?;
Ok(StatsSegmentCollector {
fast_field_reader,
stats: Stats::default(),
})
}
fn requires_scoring(&self) -> bool {
// this collector does not care about score.
false
}
fn merge_fruits(&self, segment_stats: Vec<Option<Stats>>) -> tantivy::Result<Option<Stats>> {
let mut stats = Stats::default();
for segment_stats_opt in segment_stats {
if let Some(segment_stats) = segment_stats_opt {
stats.count += segment_stats.count;
stats.sum += segment_stats.sum;
stats.squared_sum += segment_stats.squared_sum;
}
}
Ok(stats.non_zero_count())
}
}
struct StatsSegmentCollector {
fast_field_reader: FastFieldReader<u64>,
stats: Stats,
}
impl SegmentCollector for StatsSegmentCollector {
type Fruit = Option<Stats>;
fn collect(&mut self, doc: u32, _score: f32) {
let value = self.fast_field_reader.get(doc) as f64;
self.stats.count += 1;
self.stats.sum += value;
self.stats.squared_sum += value * value;
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
self.stats.non_zero_count()
}
}
fn main() -> tantivy::Result<()> {
// # Defining the schema
//
// The Tantivy index requires a very strict schema.
// The schema declares which fields are in the index,
// and for each field, its type and "the way it should
// be indexed".
// first we need to define a schema ...
let mut schema_builder = Schema::builder();
// We'll assume a fictional index containing
// products, and with a name, a description, and a price.
let product_name = schema_builder.add_text_field("name", TEXT);
let product_description = schema_builder.add_text_field("description", TEXT);
let price = schema_builder.add_u64_field("price", INT_INDEXED | FAST);
let schema = schema_builder.build();
// # Indexing documents
//
// Lets index a bunch of fake documents for the sake of
// this example.
let index = Index::create_in_ram(schema.clone());
let mut index_writer = index.writer(50_000_000)?;
index_writer.add_document(doc!(
product_name => "Super Broom 2000",
product_description => "While it is ok for short distance travel, this broom \
was designed quiditch. It will up your game.",
price => 30_200u64
));
index_writer.add_document(doc!(
product_name => "Turbulobroom",
product_description => "You might have heard of this broom before : it is the sponsor of the Wales team.\
You'll enjoy its sharp turns, and rapid acceleration",
price => 29_240u64
));
index_writer.add_document(doc!(
product_name => "Broomio",
product_description => "Great value for the price. This broom is a market favorite",
price => 21_240u64
));
index_writer.add_document(doc!(
product_name => "Whack a Mole",
product_description => "Prime quality bat.",
price => 5_200u64
));
index_writer.commit()?;
index.load_searchers()?;
let searcher = index.searcher();
let query_parser = QueryParser::for_index(&index, vec![product_name, product_description]);
// here we want to get a hit on the 'ken' in Frankenstein
let query = query_parser.parse_query("broom")?;
if let Some(stats) = searcher.search(&query, &StatsCollector::with_field(price))? {
println!("count: {}", stats.count());
println!("mean: {}", stats.mean());
println!("standard deviation: {}", stats.standard_deviation());
}
Ok(())
}

View File

@@ -5,7 +5,7 @@
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopCollector;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::tokenizer::NgramTokenizer;
@@ -20,7 +20,7 @@ fn main() -> tantivy::Result<()> {
// be indexed".
// first we need to define a schema ...
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
// Our first field is title.
// In this example we want to use NGram searching
@@ -104,11 +104,9 @@ fn main() -> tantivy::Result<()> {
// here we want to get a hit on the 'ken' in Frankenstein
let query = query_parser.parse_query("ken")?;
let mut top_collector = TopCollector::with_limit(10);
searcher.search(&*query, &mut top_collector)?;
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
let doc_addresses = top_collector.docs();
for doc_address in doc_addresses {
for (_, doc_address) in top_docs {
let retrieved_doc = searcher.doc(doc_address)?;
println!("{}", schema.to_json(&retrieved_doc));
}

View File

@@ -10,7 +10,7 @@
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopCollector;
use tantivy::collector::TopDocs;
use tantivy::query::TermQuery;
use tantivy::schema::*;
use tantivy::Index;
@@ -27,10 +27,9 @@ fn extract_doc_given_isbn(index: &Index, isbn_term: &Term) -> tantivy::Result<Op
// The second argument is here to tell we don't care about decoding positions,
// or term frequencies.
let term_query = TermQuery::new(isbn_term.clone(), IndexRecordOption::Basic);
let mut top_collector = TopCollector::with_limit(1);
searcher.search(&term_query, &mut top_collector)?;
let top_docs = searcher.search(&term_query, &TopDocs::with_limit(1))?;
if let Some(doc_address) = top_collector.docs().first() {
if let Some((_score, doc_address)) = top_docs.first() {
let doc = searcher.doc(*doc_address)?;
Ok(Some(doc))
} else {
@@ -44,7 +43,7 @@ fn main() -> tantivy::Result<()> {
//
// Check out the *basic_search* example if this makes
// small sense to you.
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
// Tantivy does not really have a notion of primary id.
// This may change in the future.

View File

@@ -25,7 +25,7 @@ fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the
// sake of this example
let index_path = TempDir::new("tantivy_facet_example_dir")?;
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("name", TEXT | STORED);
@@ -62,11 +62,10 @@ fn main() -> tantivy::Result<()> {
let mut facet_collector = FacetCollector::for_field(tags);
facet_collector.add_facet("/pools");
searcher.search(&AllQuery, &mut facet_collector).unwrap();
let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
let counts = facet_collector.harvest();
// This lists all of the facet counts
let facets: Vec<(&Facet, u64)> = counts.get("/pools").collect();
let facets: Vec<(&Facet, u64)> = facet_counts.get("/pools").collect();
assert_eq!(
facets,
vec![

View File

@@ -18,7 +18,7 @@ use tantivy::{DocId, DocSet, Postings};
fn main() -> tantivy::Result<()> {
// We first create a schema for the sake of the
// example. Check the `basic_search` example for more information.
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
// For this example, we need to make sure to index positions for our title
// field. `TEXT` precisely does this.

View File

@@ -10,11 +10,11 @@ extern crate tempdir;
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopCollector;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::Index;
use tantivy::SnippetGenerator;
use tantivy::{Snippet, SnippetGenerator};
use tempdir::TempDir;
fn main() -> tantivy::Result<()> {
@@ -23,7 +23,7 @@ fn main() -> tantivy::Result<()> {
let index_path = TempDir::new("tantivy_example_dir")?;
// # Defining the schema
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT | STORED);
let body = schema_builder.add_text_field("body", TEXT | STORED);
let schema = schema_builder.build();
@@ -54,18 +54,34 @@ fn main() -> tantivy::Result<()> {
let query_parser = QueryParser::for_index(&index, vec![title, body]);
let query = query_parser.parse_query("sycamore spring")?;
let mut top_collector = TopCollector::with_limit(10);
searcher.search(&*query, &mut top_collector)?;
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
let snippet_generator = SnippetGenerator::new(&searcher, &*query, body)?;
let doc_addresses = top_collector.docs();
for doc_address in doc_addresses {
for (score, doc_address) in top_docs {
let doc = searcher.doc(doc_address)?;
let snippet = snippet_generator.snippet_from_doc(&doc);
println!("Document score {}:", score);
println!("title: {}", doc.get_first(title).unwrap().text().unwrap());
println!("snippet: {}", snippet.to_html());
println!("custom highlighting: {}", highlight(snippet));
}
Ok(())
}
fn highlight(snippet: Snippet) -> String {
let mut result = String::new();
let mut start_from = 0;
for (start, end) in snippet.highlighted().iter().map(|h| h.bounds()) {
result.push_str(&snippet.fragments()[start_from..start]);
result.push_str(" --> ");
result.push_str(&snippet.fragments()[start..end]);
result.push_str(" <-- ");
start_from = end;
}
result.push_str(&snippet.fragments()[start_from..]);
result
}

View File

@@ -15,7 +15,7 @@ extern crate tempdir;
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopCollector;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::tokenizer::*;
@@ -23,7 +23,7 @@ use tantivy::Index;
fn main() -> tantivy::Result<()> {
// this example assumes you understand the content in `basic_search`
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
// This configures your custom options for how tantivy will
// store and process your content in the index; The key
@@ -105,15 +105,11 @@ fn main() -> tantivy::Result<()> {
// stop words are applied on the query as well.
// The following will be equivalent to `title:frankenstein`
let query = query_parser.parse_query("title:\"the Frankenstein\"")?;
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
let mut top_collector = TopCollector::with_limit(10);
searcher.search(&*query, &mut top_collector)?;
let doc_addresses = top_collector.docs();
for doc_address in doc_addresses {
for (score, doc_address) in top_docs {
let retrieved_doc = searcher.doc(doc_address)?;
println!("\n==\nDocument score {}:", score);
println!("{}", schema.to_json(&retrieved_doc));
}

View File

@@ -9,7 +9,7 @@ fn main() -> tantivy::Result<()> {
// Check out the basic example if this is confusing to you.
//
// first we need to define a schema ...
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title", TEXT | STORED);
schema_builder.add_text_field("body", TEXT);
schema_builder.add_u64_field("year", INT_INDEXED);

View File

@@ -1,142 +0,0 @@
use collector::Collector;
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
/// Collector that does nothing.
/// This is used in the chain Collector and will hopefully
/// be optimized away by the compiler.
pub struct DoNothingCollector;
impl Collector for DoNothingCollector {
#[inline]
fn set_segment(&mut self, _: SegmentLocalId, _: &SegmentReader) -> Result<()> {
Ok(())
}
#[inline]
fn collect(&mut self, _doc: DocId, _score: Score) {}
#[inline]
fn requires_scoring(&self) -> bool {
false
}
}
/// Zero-cost abstraction used to collect on multiple collectors.
/// This contraption is only usable if the type of your collectors
/// are known at compile time.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::{CountCollector, TopCollector, chain};
/// use tantivy::query::QueryParser;
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
///
/// index.load_searchers()?;
/// let searcher = index.searcher();
///
/// {
/// let mut top_collector = TopCollector::with_limit(2);
/// let mut count_collector = CountCollector::default();
/// {
/// let mut collectors = chain().push(&mut top_collector).push(&mut count_collector);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// searcher.search(&*query, &mut collectors).unwrap();
/// }
/// assert_eq!(count_collector.count(), 2);
/// assert!(top_collector.at_capacity());
/// }
///
/// Ok(())
/// }
/// ```
pub struct ChainedCollector<Left: Collector, Right: Collector> {
left: Left,
right: Right,
}
impl<Left: Collector, Right: Collector> ChainedCollector<Left, Right> {
/// Adds a collector
pub fn push<C: Collector>(self, new_collector: &mut C) -> ChainedCollector<Self, &mut C> {
ChainedCollector {
left: self,
right: new_collector,
}
}
}
impl<Left: Collector, Right: Collector> Collector for ChainedCollector<Left, Right> {
fn set_segment(
&mut self,
segment_local_id: SegmentLocalId,
segment: &SegmentReader,
) -> Result<()> {
self.left.set_segment(segment_local_id, segment)?;
self.right.set_segment(segment_local_id, segment)?;
Ok(())
}
fn collect(&mut self, doc: DocId, score: Score) {
self.left.collect(doc, score);
self.right.collect(doc, score);
}
fn requires_scoring(&self) -> bool {
self.left.requires_scoring() || self.right.requires_scoring()
}
}
/// Creates a `ChainedCollector`
pub fn chain() -> ChainedCollector<DoNothingCollector, DoNothingCollector> {
ChainedCollector {
left: DoNothingCollector,
right: DoNothingCollector,
}
}
#[cfg(test)]
mod tests {
use super::*;
use collector::{Collector, CountCollector, TopCollector};
#[test]
fn test_chained_collector() {
let mut top_collector = TopCollector::with_limit(2);
let mut count_collector = CountCollector::default();
{
let mut collectors = chain().push(&mut top_collector).push(&mut count_collector);
collectors.collect(1, 0.2);
collectors.collect(2, 0.1);
collectors.collect(3, 0.5);
}
assert_eq!(count_collector.count(), 3);
assert!(top_collector.at_capacity());
}
}

View File

@@ -1,4 +1,5 @@
use super::Collector;
use collector::SegmentCollector;
use DocId;
use Result;
use Score;
@@ -11,14 +12,14 @@ use SegmentReader;
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT};
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::CountCollector;
/// use tantivy::collector::Count;
/// use tantivy::query::QueryParser;
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new();
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
@@ -43,59 +44,86 @@ use SegmentReader;
/// let searcher = index.searcher();
///
/// {
/// let mut count_collector = CountCollector::default();
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// searcher.search(&*query, &mut count_collector).unwrap();
/// let count = searcher.search(&query, &Count).unwrap();
///
/// assert_eq!(count_collector.count(), 2);
/// assert_eq!(count, 2);
/// }
///
/// Ok(())
/// }
/// ```
#[derive(Default)]
pub struct CountCollector {
count: usize,
}
pub struct Count;
impl CountCollector {
/// Returns the count of documents that were
/// collected.
pub fn count(&self) -> usize {
self.count
}
}
impl Collector for Count {
type Fruit = usize;
impl Collector for CountCollector {
fn set_segment(&mut self, _: SegmentLocalId, _: &SegmentReader) -> Result<()> {
Ok(())
}
type Child = SegmentCountCollector;
fn collect(&mut self, _: DocId, _: Score) {
self.count += 1;
fn for_segment(&self, _: SegmentLocalId, _: &SegmentReader) -> Result<SegmentCountCollector> {
Ok(SegmentCountCollector::default())
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(&self, segment_counts: Vec<usize>) -> Result<usize> {
Ok(segment_counts.into_iter().sum())
}
}
#[derive(Default)]
pub struct SegmentCountCollector {
count: usize,
}
impl SegmentCollector for SegmentCountCollector {
type Fruit = usize;
fn collect(&mut self, _: DocId, _: Score) {
self.count += 1;
}
fn harvest(self) -> usize {
self.count
}
}
#[cfg(test)]
mod tests {
use collector::{Collector, CountCollector};
use super::{Count, SegmentCountCollector};
use collector::Collector;
use collector::SegmentCollector;
#[test]
fn test_count_collector() {
let mut count_collector = CountCollector::default();
assert_eq!(count_collector.count(), 0);
count_collector.collect(0u32, 1f32);
assert_eq!(count_collector.count(), 1);
assert_eq!(count_collector.count(), 1);
count_collector.collect(1u32, 1f32);
assert_eq!(count_collector.count(), 2);
assert!(!count_collector.requires_scoring());
fn test_count_collect_does_not_requires_scoring() {
assert!(!Count.requires_scoring());
}
#[test]
fn test_segment_count_collector() {
{
let count_collector = SegmentCountCollector::default();
assert_eq!(count_collector.harvest(), 0);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
assert_eq!(count_collector.harvest(), 1);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
assert_eq!(count_collector.harvest(), 1);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
count_collector.collect(1u32, 1f32);
assert_eq!(count_collector.harvest(), 2);
}
}
}

View File

@@ -1,20 +1,17 @@
use collector::Collector;
use collector::SegmentCollector;
use docset::SkipResult;
use fastfield::FacetReader;
use schema::Facet;
use schema::Field;
use std::cell::UnsafeCell;
use std::cmp::Ordering;
use std::collections::btree_map;
use std::collections::BTreeMap;
use std::collections::BTreeSet;
use std::collections::BinaryHeap;
use std::collections::Bound;
use std::iter::Peekable;
use std::mem;
use std::{u64, usize};
use termdict::TermMerger;
use std::cmp::Ordering;
use DocId;
use Result;
use Score;
@@ -46,12 +43,6 @@ impl<'a> Ord for Hit<'a> {
}
}
struct SegmentFacetCounter {
pub facet_reader: FacetReader,
pub facet_ords: Vec<u64>,
pub facet_counts: Vec<u64>,
}
fn facet_depth(facet_bytes: &[u8]) -> usize {
if facet_bytes.is_empty() {
0
@@ -91,14 +82,14 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Facet, SchemaBuilder, TEXT};
/// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::FacetCollector;
/// use tantivy::query::AllQuery;
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new();
/// let mut schema_builder = Schema::builder();
///
/// // Facet have their own specific type.
/// // It is not a bad practise to put all of your
@@ -141,13 +132,10 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/lang");
/// facet_collector.add_facet("/category");
/// searcher.search(&AllQuery, &mut facet_collector).unwrap();
///
/// // this object contains count aggregate for all of the facets.
/// let counts = facet_collector.harvest();
/// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
///
/// // This lists all of the facet counts
/// let facets: Vec<(&Facet, u64)> = counts
/// let facets: Vec<(&Facet, u64)> = facet_counts
/// .get("/category")
/// .collect();
/// assert_eq!(facets, vec![
@@ -159,13 +147,10 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// {
/// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction");
/// searcher.search(&AllQuery, &mut facet_collector).unwrap();
///
/// // this object contains count aggregate for all of the facets.
/// let counts = facet_collector.harvest();
/// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
///
/// // This lists all of the facet counts
/// let facets: Vec<(&Facet, u64)> = counts
/// let facets: Vec<(&Facet, u64)> = facet_counts
/// .get("/category/fiction")
/// .collect();
/// assert_eq!(facets, vec![
@@ -178,13 +163,10 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// {
/// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction");
/// searcher.search(&AllQuery, &mut facet_collector).unwrap();
///
/// // this object contains count aggregate for all of the facets.
/// let counts = facet_collector.harvest();
/// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
///
/// // This lists all of the facet counts
/// let facets: Vec<(&Facet, u64)> = counts.top_k("/category/fiction", 1);
/// let facets: Vec<(&Facet, u64)> = facet_counts.top_k("/category/fiction", 1);
/// assert_eq!(facets, vec![
/// (&Facet::from("/category/fiction/fantasy"), 2)
/// ]);
@@ -194,21 +176,21 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// }
/// ```
pub struct FacetCollector {
facet_ords: Vec<u64>,
field: Field,
ff_reader: Option<UnsafeCell<FacetReader>>,
segment_counters: Vec<SegmentFacetCounter>,
// facet_ord -> collapse facet_id
current_segment_collapse_mapping: Vec<usize>,
// collapse facet_id -> count
current_segment_counts: Vec<u64>,
// collapse facet_id -> facet_ord
current_collapse_facet_ords: Vec<u64>,
facets: BTreeSet<Facet>,
}
pub struct FacetSegmentCollector {
reader: FacetReader,
facet_ords_buf: Vec<u64>,
// facet_ord -> collapse facet_id
collapse_mapping: Vec<usize>,
// collapse facet_id -> count
counts: Vec<u64>,
// collapse facet_id -> facet_ord
collapse_facet_ords: Vec<u64>,
}
fn skip<'a, I: Iterator<Item = &'a Facet>>(
target: &[u8],
collapse_it: &mut Peekable<I>,
@@ -240,15 +222,8 @@ impl FacetCollector {
/// is of the proper type.
pub fn for_field(field: Field) -> FacetCollector {
FacetCollector {
facet_ords: Vec::with_capacity(255),
segment_counters: Vec::new(),
field,
ff_reader: None,
facets: BTreeSet::new(),
current_segment_collapse_mapping: Vec::new(),
current_collapse_facet_ords: Vec::new(),
current_segment_counts: Vec::new(),
facets: BTreeSet::default(),
}
}
@@ -278,143 +253,100 @@ impl FacetCollector {
}
self.facets.insert(facet);
}
fn set_collapse_mapping(&mut self, facet_reader: &FacetReader) {
self.current_segment_collapse_mapping.clear();
self.current_collapse_facet_ords.clear();
self.current_segment_counts.clear();
let mut collapse_facet_it = self.facets.iter().peekable();
self.current_collapse_facet_ords.push(0);
let mut facet_streamer = facet_reader.facet_dict().range().into_stream();
if !facet_streamer.advance() {
return;
}
'outer: loop {
// at the begining of this loop, facet_streamer
// is positionned on a term that has not been processed yet.
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
match skip_result {
SkipResult::Reached => {
// we reach a facet we decided to collapse.
let collapse_depth = facet_depth(facet_streamer.key());
let mut collapsed_id = 0;
self.current_segment_collapse_mapping.push(0);
while facet_streamer.advance() {
let depth = facet_depth(facet_streamer.key());
if depth <= collapse_depth {
continue 'outer;
}
if depth == collapse_depth + 1 {
collapsed_id = self.current_collapse_facet_ords.len();
self.current_collapse_facet_ords
.push(facet_streamer.term_ord());
self.current_segment_collapse_mapping.push(collapsed_id);
} else {
self.current_segment_collapse_mapping.push(collapsed_id);
}
}
break;
}
SkipResult::End | SkipResult::OverStep => {
self.current_segment_collapse_mapping.push(0);
if !facet_streamer.advance() {
break;
}
}
}
}
}
fn finalize_segment(&mut self) {
if self.ff_reader.is_some() {
self.segment_counters.push(SegmentFacetCounter {
facet_reader: self.ff_reader.take().unwrap().into_inner(),
facet_ords: mem::replace(&mut self.current_collapse_facet_ords, Vec::new()),
facet_counts: mem::replace(&mut self.current_segment_counts, Vec::new()),
});
}
}
/// Returns the results of the collection.
///
/// This method does not just return the counters,
/// it also translates the facet ordinals of the last segment.
pub fn harvest(mut self) -> FacetCounts {
self.finalize_segment();
let collapsed_facet_ords: Vec<&[u64]> = self
.segment_counters
.iter()
.map(|segment_counter| &segment_counter.facet_ords[..])
.collect();
let collapsed_facet_counts: Vec<&[u64]> = self
.segment_counters
.iter()
.map(|segment_counter| &segment_counter.facet_counts[..])
.collect();
let facet_streams = self
.segment_counters
.iter()
.map(|seg_counts| seg_counts.facet_reader.facet_dict().range().into_stream())
.collect::<Vec<_>>();
let mut facet_merger = TermMerger::new(facet_streams);
let mut facet_counts = BTreeMap::new();
while facet_merger.advance() {
let count = facet_merger
.current_kvs()
.iter()
.map(|it| {
let seg_ord = it.segment_ord;
let term_ord = it.streamer.term_ord();
collapsed_facet_ords[seg_ord]
.binary_search(&term_ord)
.map(|collapsed_term_id| {
if collapsed_term_id == 0 {
0
} else {
collapsed_facet_counts[seg_ord][collapsed_term_id]
}
}).unwrap_or(0)
}).sum();
if count > 0u64 {
let bytes: Vec<u8> = facet_merger.key().to_owned();
// may create an corrupted facet if the term dicitonary is corrupted
let facet = unsafe { Facet::from_encoded(bytes) };
facet_counts.insert(facet, count);
}
}
FacetCounts { facet_counts }
}
}
impl Collector for FacetCollector {
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
self.finalize_segment();
type Fruit = FacetCounts;
type Child = FacetSegmentCollector;
fn for_segment(
&self,
_: SegmentLocalId,
reader: &SegmentReader,
) -> Result<FacetSegmentCollector> {
let facet_reader = reader.facet_reader(self.field)?;
self.set_collapse_mapping(&facet_reader);
self.current_segment_counts
.resize(self.current_collapse_facet_ords.len(), 0);
self.ff_reader = Some(UnsafeCell::new(facet_reader));
Ok(())
let mut collapse_mapping = Vec::new();
let mut counts = Vec::new();
let mut collapse_facet_ords = Vec::new();
let mut collapse_facet_it = self.facets.iter().peekable();
collapse_facet_ords.push(0);
{
let mut facet_streamer = facet_reader.facet_dict().range().into_stream();
if facet_streamer.advance() {
'outer: loop {
// at the begining of this loop, facet_streamer
// is positionned on a term that has not been processed yet.
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
match skip_result {
SkipResult::Reached => {
// we reach a facet we decided to collapse.
let collapse_depth = facet_depth(facet_streamer.key());
let mut collapsed_id = 0;
collapse_mapping.push(0);
while facet_streamer.advance() {
let depth = facet_depth(facet_streamer.key());
if depth <= collapse_depth {
continue 'outer;
}
if depth == collapse_depth + 1 {
collapsed_id = collapse_facet_ords.len();
collapse_facet_ords.push(facet_streamer.term_ord());
collapse_mapping.push(collapsed_id);
} else {
collapse_mapping.push(collapsed_id);
}
}
break;
}
SkipResult::End | SkipResult::OverStep => {
collapse_mapping.push(0);
if !facet_streamer.advance() {
break;
}
}
}
}
}
}
counts.resize(collapse_facet_ords.len(), 0);
Ok(FacetSegmentCollector {
reader: facet_reader,
facet_ords_buf: Vec::with_capacity(255),
collapse_mapping,
counts,
collapse_facet_ords,
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(&self, segments_facet_counts: Vec<FacetCounts>) -> Result<FacetCounts> {
let mut facet_counts: BTreeMap<Facet, u64> = BTreeMap::new();
for segment_facet_counts in segments_facet_counts {
for (facet, count) in segment_facet_counts.facet_counts {
*(facet_counts.entry(facet).or_insert(0)) += count;
}
}
Ok(FacetCounts { facet_counts })
}
}
impl SegmentCollector for FacetSegmentCollector {
type Fruit = FacetCounts;
fn collect(&mut self, doc: DocId, _: Score) {
let facet_reader: &mut FacetReader = unsafe {
&mut *self
.ff_reader
.as_ref()
.expect("collect() was called before set_segment. This should never happen.")
.get()
};
facet_reader.facet_ords(doc, &mut self.facet_ords);
self.reader.facet_ords(doc, &mut self.facet_ords_buf);
let mut previous_collapsed_ord: usize = usize::MAX;
for &facet_ord in &self.facet_ords {
let collapsed_ord = self.current_segment_collapse_mapping[facet_ord as usize];
self.current_segment_counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord
{
for &facet_ord in &self.facet_ords_buf {
let collapsed_ord = self.collapse_mapping[facet_ord as usize];
self.counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord {
0
} else {
1
@@ -423,8 +355,23 @@ impl Collector for FacetCollector {
}
}
fn requires_scoring(&self) -> bool {
false
/// Returns the results of the collection.
///
/// This method does not just return the counters,
/// it also translates the facet ordinals of the last segment.
fn harvest(self) -> FacetCounts {
let mut facet_counts = BTreeMap::new();
let facet_dict = self.reader.facet_dict();
for (collapsed_facet_ord, count) in self.counts.iter().cloned().enumerate() {
if count == 0 {
continue;
}
let mut facet = vec![];
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
facet_dict.ord_to_term(facet_ord as u64, &mut facet);
facet_counts.insert(unsafe { Facet::from_encoded(facet) }, count);
}
FacetCounts { facet_counts }
}
}
@@ -505,14 +452,14 @@ mod tests {
use core::Index;
use query::AllQuery;
use rand::distributions::Uniform;
use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng};
use schema::Field;
use schema::{Document, Facet, SchemaBuilder};
use schema::{Document, Facet, Field, Schema};
use std::iter;
#[test]
fn test_facet_collector_drilldown() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -536,12 +483,10 @@ mod tests {
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet(Facet::from("/top1"));
searcher.search(&AllQuery, &mut facet_collector).unwrap();
let counts = searcher.search(&AllQuery, &facet_collector).unwrap();
let counts: FacetCounts = facet_collector.harvest();
{
let facets: Vec<(String, u64)> = counts
.get("/top1")
@@ -575,7 +520,7 @@ mod tests {
#[test]
fn test_doc_unsorted_multifacet() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facets");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -592,8 +537,7 @@ mod tests {
assert_eq!(searcher.num_docs(), 1);
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet("/subjects");
searcher.search(&AllQuery, &mut facet_collector).unwrap();
let counts = facet_collector.harvest();
let counts = searcher.search(&AllQuery, &facet_collector).unwrap();
let facets: Vec<(&Facet, u64)> = counts.get("/subjects").collect();
assert_eq!(facets[0].1, 1);
}
@@ -607,7 +551,7 @@ mod tests {
#[test]
fn test_facet_collector_topk() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -626,7 +570,7 @@ mod tests {
);
doc
}).collect();
thread_rng().shuffle(&mut docs[..]);
docs[..].shuffle(&mut thread_rng());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs {
@@ -639,9 +583,8 @@ mod tests {
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet("/facet");
searcher.search(&AllQuery, &mut facet_collector).unwrap();
let counts: FacetCounts = searcher.search(&AllQuery, &facet_collector).unwrap();
let counts: FacetCounts = facet_collector.harvest();
{
let facets: Vec<(&Facet, u64)> = counts.top_k("/facet", 3);
assert_eq!(
@@ -664,13 +607,13 @@ mod bench {
use query::AllQuery;
use rand::{thread_rng, Rng};
use schema::Facet;
use schema::SchemaBuilder;
use schema::Schema;
use test::Bencher;
use Index;
#[bench]
fn bench_facet_collector(b: &mut Bencher) {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -694,8 +637,8 @@ mod bench {
b.iter(|| {
let searcher = index.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field);
searcher.search(&AllQuery, &mut facet_collector).unwrap();
let facet_collector = FacetCollector::for_field(facet_field);
searcher.search(&AllQuery, &facet_collector).unwrap();
});
}
}

View File

@@ -79,7 +79,7 @@ mod tests {
// make sure we have facet counters correctly filled
fn test_facet_collector_results() {
let mut schema_builder = schema::SchemaBuilder::new();
let mut schema_builder = schema::Schema::builder();
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
let text_field = schema_builder.add_text_field("text", STRING);

View File

@@ -1,7 +1,91 @@
/*!
Defines how the documents matching a search query should be processed.
# Collectors
Collectors define the information you want to extract from the documents matching the queries.
In tantivy jargon, we call this information your search "fruit".
Your fruit could for instance be :
- [the count of matching documents](./struct.Count.html)
- [the top 10 documents, by relevancy or by a fast field](./struct.TopDocs.html)
- [facet counts](./struct.FacetCollector.html)
At one point in your code, you will trigger the actual search operation by calling
[the `search(...)` method of your `Searcher` object](../struct.Searcher.html#method.search).
This call will look like this.
```verbatim
let fruit = searcher.search(&query, &collector)?;
```
Here the type of fruit is actually determined as an associated type of the collector (`Collector::Fruit`).
# Combining several collectors
A rich search experience often requires to run several collectors on your search query.
For instance,
- selecting the top-K products matching your query
- counting the matching documents
- computing several facets
- computing statistics about the matching product prices
A simple and efficient way to do that is to pass your collectors as one tuple.
The resulting `Fruit` will then be a typed tuple with each collector's original fruits
in their respective position.
```rust
# extern crate tantivy;
# use tantivy::schema::*;
# use tantivy::*;
# use tantivy::query::*;
use tantivy::collector::{Count, TopDocs};
#
# fn main() -> tantivy::Result<()> {
# let mut schema_builder = Schema::builder();
# let title = schema_builder.add_text_field("title", TEXT);
# let schema = schema_builder.build();
# let index = Index::create_in_ram(schema);
# let mut index_writer = index.writer(3_000_000)?;
# index_writer.add_document(doc!(
# title => "The Name of the Wind",
# ));
# index_writer.add_document(doc!(
# title => "The Diary of Muadib",
# ));
# index_writer.commit().unwrap();
# index.load_searchers()?;
# let searcher = index.searcher();
# let query_parser = QueryParser::for_index(&index, vec![title]);
# let query = query_parser.parse_query("diary")?;
let (doc_count, top_docs): (usize, Vec<(Score, DocAddress)>) =
searcher.search(&query, &(Count, TopDocs::with_limit(2)))?;
# Ok(())
# }
```
The `Collector` trait is implemented for up to 4 collectors.
If you have more than 4 collectors, you can either group them into
tuples of tuples `(a,(b,(c,d)))`, or rely on `MultiCollector`'s.
# Combining several collectors dynamically
Combining collectors into a tuple is a zero-cost abstraction: everything
happens as if you had manually implemented a single collector
combining all of our features.
Unfortunately it requires you to know at compile time your collector types.
If on the other hand, the collectors depend on some query parameter,
you can rely on `MultiCollector`'s.
# Implementing your own collectors.
See the `custom_collector` example.
*/
use downcast;
use DocId;
use Result;
use Score;
@@ -9,7 +93,7 @@ use SegmentLocalId;
use SegmentReader;
mod count_collector;
pub use self::count_collector::CountCollector;
pub use self::count_collector::Count;
mod multi_collector;
pub use self::multi_collector::MultiCollector;
@@ -17,237 +101,267 @@ pub use self::multi_collector::MultiCollector;
mod top_collector;
mod top_score_collector;
pub use self::top_score_collector::TopScoreCollector;
#[deprecated]
pub use self::top_score_collector::TopScoreCollector as TopCollector;
pub use self::top_score_collector::TopDocs;
mod top_field_collector;
pub use self::top_field_collector::TopFieldCollector;
pub use self::top_field_collector::TopDocsByField;
mod facet_collector;
pub use self::facet_collector::FacetCollector;
mod chained_collector;
pub use self::chained_collector::{chain, ChainedCollector};
/// `Fruit` is the type for the result of our collection.
/// e.g. `usize` for the `Count` collector.
pub trait Fruit: Send + downcast::Any {}
impl<T> Fruit for T where T: Send + downcast::Any {}
/// Collectors are in charge of collecting and retaining relevant
/// information from the document found and scored by the query.
///
///
/// For instance,
///
/// - keeping track of the top 10 best documents
/// - computing a breakdown over a fast field
/// - computing the number of documents matching the query
///
/// Queries are in charge of pushing the `DocSet` to the collector.
/// Our search index is in fact a collection of segments, so
/// a `Collector` trait is actually more of a factory to instance
/// `SegmentCollector`s for each segments.
///
/// As they work on multiple segments, they first inform
/// the collector of a change in a segment and then
/// call the `collect` method to push the document to the collector.
///
/// Temporally, our collector will receive calls
/// - `.set_segment(0, segment_reader_0)`
/// - `.collect(doc0_of_segment_0)`
/// - `.collect(...)`
/// - `.collect(last_doc_of_segment_0)`
/// - `.set_segment(1, segment_reader_1)`
/// - `.collect(doc0_of_segment_1)`
/// - `.collect(...)`
/// - `.collect(last_doc_of_segment_1)`
/// - `...`
/// - `.collect(last_doc_of_last_segment)`
/// The collection logic itself is in the `SegmentCollector`.
///
/// Segments are not guaranteed to be visited in any specific order.
pub trait Collector {
pub trait Collector: Sync {
/// `Fruit` is the type for the result of our collection.
/// e.g. `usize` for the `Count` collector.
type Fruit: Fruit;
/// Type of the `SegmentCollector` associated to this collector.
type Child: SegmentCollector<Fruit = Self::Fruit>;
/// `set_segment` is called before beginning to enumerate
/// on this segment.
fn set_segment(
&mut self,
fn for_segment(
&self,
segment_local_id: SegmentLocalId,
segment: &SegmentReader,
) -> Result<()>;
/// The query pushes the scored document to the collector via this method.
fn collect(&mut self, doc: DocId, score: Score);
) -> Result<Self::Child>;
/// Returns true iff the collector requires to compute scores for documents.
fn requires_scoring(&self) -> bool;
/// Combines the fruit associated to the collection of each segments
/// into one fruit.
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit>;
}
impl<'a, C: Collector> Collector for &'a mut C {
fn set_segment(
&mut self,
segment_local_id: SegmentLocalId,
segment: &SegmentReader,
) -> Result<()> {
(*self).set_segment(segment_local_id, segment)
}
/// The `SegmentCollector` is the trait in charge of defining the
/// collect operation at the scale of the segment.
///
/// `.collect(doc, score)` will be called for every documents
/// matching the query.
pub trait SegmentCollector: 'static {
/// `Fruit` is the type for the result of our collection.
/// e.g. `usize` for the `Count` collector.
type Fruit: Fruit;
/// The query pushes the scored document to the collector via this method.
fn collect(&mut self, doc: DocId, score: Score) {
C::collect(self, doc, score)
fn collect(&mut self, doc: DocId, score: Score);
/// Extract the fruit of the collection from the `SegmentCollector`.
fn harvest(self) -> Self::Fruit;
}
// -----------------------------------------------
// Tuple implementations.
impl<Left, Right> Collector for (Left, Right)
where
Left: Collector,
Right: Collector,
{
type Fruit = (Left::Fruit, Right::Fruit);
type Child = (Left::Child, Right::Child);
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
let left = self.0.for_segment(segment_local_id, segment)?;
let right = self.1.for_segment(segment_local_id, segment)?;
Ok((left, right))
}
fn requires_scoring(&self) -> bool {
C::requires_scoring(self)
self.0.requires_scoring() || self.1.requires_scoring()
}
fn merge_fruits(
&self,
children: Vec<(Left::Fruit, Right::Fruit)>,
) -> Result<(Left::Fruit, Right::Fruit)> {
let mut left_fruits = vec![];
let mut right_fruits = vec![];
for (left_fruit, right_fruit) in children {
left_fruits.push(left_fruit);
right_fruits.push(right_fruit);
}
Ok((
self.0.merge_fruits(left_fruits)?,
self.1.merge_fruits(right_fruits)?,
))
}
}
impl<Left, Right> SegmentCollector for (Left, Right)
where
Left: SegmentCollector,
Right: SegmentCollector,
{
type Fruit = (Left::Fruit, Right::Fruit);
fn collect(&mut self, doc: DocId, score: Score) {
self.0.collect(doc, score);
self.1.collect(doc, score);
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
(self.0.harvest(), self.1.harvest())
}
}
// 3-Tuple
impl<One, Two, Three> Collector for (One, Two, Three)
where
One: Collector,
Two: Collector,
Three: Collector,
{
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit);
type Child = (One::Child, Two::Child, Three::Child);
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
let one = self.0.for_segment(segment_local_id, segment)?;
let two = self.1.for_segment(segment_local_id, segment)?;
let three = self.2.for_segment(segment_local_id, segment)?;
Ok((one, two, three))
}
fn requires_scoring(&self) -> bool {
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
}
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> Result<Self::Fruit> {
let mut one_fruits = vec![];
let mut two_fruits = vec![];
let mut three_fruits = vec![];
for (one_fruit, two_fruit, three_fruit) in children {
one_fruits.push(one_fruit);
two_fruits.push(two_fruit);
three_fruits.push(three_fruit);
}
Ok((
self.0.merge_fruits(one_fruits)?,
self.1.merge_fruits(two_fruits)?,
self.2.merge_fruits(three_fruits)?,
))
}
}
impl<One, Two, Three> SegmentCollector for (One, Two, Three)
where
One: SegmentCollector,
Two: SegmentCollector,
Three: SegmentCollector,
{
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit);
fn collect(&mut self, doc: DocId, score: Score) {
self.0.collect(doc, score);
self.1.collect(doc, score);
self.2.collect(doc, score);
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
(self.0.harvest(), self.1.harvest(), self.2.harvest())
}
}
// 4-Tuple
impl<One, Two, Three, Four> Collector for (One, Two, Three, Four)
where
One: Collector,
Two: Collector,
Three: Collector,
Four: Collector,
{
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit, Four::Fruit);
type Child = (One::Child, Two::Child, Three::Child, Four::Child);
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
let one = self.0.for_segment(segment_local_id, segment)?;
let two = self.1.for_segment(segment_local_id, segment)?;
let three = self.2.for_segment(segment_local_id, segment)?;
let four = self.3.for_segment(segment_local_id, segment)?;
Ok((one, two, three, four))
}
fn requires_scoring(&self) -> bool {
self.0.requires_scoring()
|| self.1.requires_scoring()
|| self.2.requires_scoring()
|| self.3.requires_scoring()
}
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> Result<Self::Fruit> {
let mut one_fruits = vec![];
let mut two_fruits = vec![];
let mut three_fruits = vec![];
let mut four_fruits = vec![];
for (one_fruit, two_fruit, three_fruit, four_fruit) in children {
one_fruits.push(one_fruit);
two_fruits.push(two_fruit);
three_fruits.push(three_fruit);
four_fruits.push(four_fruit);
}
Ok((
self.0.merge_fruits(one_fruits)?,
self.1.merge_fruits(two_fruits)?,
self.2.merge_fruits(three_fruits)?,
self.3.merge_fruits(four_fruits)?,
))
}
}
impl<One, Two, Three, Four> SegmentCollector for (One, Two, Three, Four)
where
One: SegmentCollector,
Two: SegmentCollector,
Three: SegmentCollector,
Four: SegmentCollector,
{
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit, Four::Fruit);
fn collect(&mut self, doc: DocId, score: Score) {
self.0.collect(doc, score);
self.1.collect(doc, score);
self.2.collect(doc, score);
self.3.collect(doc, score);
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
(
self.0.harvest(),
self.1.harvest(),
self.2.harvest(),
self.3.harvest(),
)
}
}
#[allow(missing_docs)]
mod downcast_impl {
downcast!(super::Fruit);
}
#[cfg(test)]
pub mod tests {
use super::*;
use core::SegmentReader;
use fastfield::BytesFastFieldReader;
use fastfield::FastFieldReader;
use schema::Field;
use DocId;
use Score;
use SegmentLocalId;
/// Stores all of the doc ids.
/// This collector is only used for tests.
/// It is unusable in practise, as it does not store
/// the segment ordinals
pub struct TestCollector {
offset: DocId,
segment_max_doc: DocId,
docs: Vec<DocId>,
scores: Vec<Score>,
}
impl TestCollector {
/// Return the exhalist of documents.
pub fn docs(self) -> Vec<DocId> {
self.docs
}
pub fn scores(self) -> Vec<Score> {
self.scores
}
}
impl Default for TestCollector {
fn default() -> TestCollector {
TestCollector {
offset: 0,
segment_max_doc: 0,
docs: Vec::new(),
scores: Vec::new(),
}
}
}
impl Collector for TestCollector {
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
self.offset += self.segment_max_doc;
self.segment_max_doc = reader.max_doc();
Ok(())
}
fn collect(&mut self, doc: DocId, score: Score) {
self.docs.push(doc + self.offset);
self.scores.push(score);
}
fn requires_scoring(&self) -> bool {
true
}
}
/// Collects in order all of the fast fields for all of the
/// doc in the `DocSet`
///
/// This collector is mainly useful for tests.
pub struct FastFieldTestCollector {
vals: Vec<u64>,
field: Field,
ff_reader: Option<FastFieldReader<u64>>,
}
impl FastFieldTestCollector {
pub fn for_field(field: Field) -> FastFieldTestCollector {
FastFieldTestCollector {
vals: Vec::new(),
field,
ff_reader: None,
}
}
pub fn vals(self) -> Vec<u64> {
self.vals
}
}
impl Collector for FastFieldTestCollector {
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
self.ff_reader = Some(reader.fast_field_reader(self.field)?);
Ok(())
}
fn collect(&mut self, doc: DocId, _score: Score) {
let val = self.ff_reader.as_ref().unwrap().get(doc);
self.vals.push(val);
}
fn requires_scoring(&self) -> bool {
false
}
}
/// Collects in order all of the fast field bytes for all of the
/// docs in the `DocSet`
///
/// This collector is mainly useful for tests.
pub struct BytesFastFieldTestCollector {
vals: Vec<u8>,
field: Field,
ff_reader: Option<BytesFastFieldReader>,
}
impl BytesFastFieldTestCollector {
pub fn for_field(field: Field) -> BytesFastFieldTestCollector {
BytesFastFieldTestCollector {
vals: Vec::new(),
field,
ff_reader: None,
}
}
pub fn vals(self) -> Vec<u8> {
self.vals
}
}
impl Collector for BytesFastFieldTestCollector {
fn set_segment(&mut self, _segment_local_id: u32, segment: &SegmentReader) -> Result<()> {
self.ff_reader = Some(segment.bytes_fast_field_reader(self.field)?);
Ok(())
}
fn collect(&mut self, doc: u32, _score: f32) {
let val = self.ff_reader.as_ref().unwrap().get_val(doc);
self.vals.extend(val);
}
fn requires_scoring(&self) -> bool {
false
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use collector::{Collector, CountCollector};
use test::Bencher;
#[bench]
fn build_collector(b: &mut Bencher) {
b.iter(|| {
let mut count_collector = CountCollector::default();
let docs: Vec<u32> = (0..1_000_000).collect();
for doc in docs {
count_collector.collect(doc, 1f32);
}
count_collector.count()
});
}
}
pub mod tests;

View File

@@ -1,9 +1,96 @@
use super::Collector;
use super::SegmentCollector;
use collector::Fruit;
use downcast::Downcast;
use std::marker::PhantomData;
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
use TantivyError;
pub struct MultiFruit {
sub_fruits: Vec<Option<Box<Fruit>>>,
}
pub struct CollectorWrapper<TCollector: Collector>(TCollector);
impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
type Fruit = Box<Fruit>;
type Child = Box<BoxableSegmentCollector>;
fn for_segment(
&self,
segment_local_id: u32,
reader: &SegmentReader,
) -> Result<Box<BoxableSegmentCollector>> {
let child = self.0.for_segment(segment_local_id, reader)?;
Ok(Box::new(SegmentCollectorWrapper(child)))
}
fn requires_scoring(&self) -> bool {
self.0.requires_scoring()
}
fn merge_fruits(&self, children: Vec<<Self as Collector>::Fruit>) -> Result<Box<Fruit>> {
let typed_fruit: Vec<TCollector::Fruit> = children
.into_iter()
.map(|untyped_fruit| {
Downcast::<TCollector::Fruit>::downcast(untyped_fruit)
.map(|boxed_but_typed| *boxed_but_typed)
.map_err(|e| {
let err_msg = format!("Failed to cast child collector fruit. {:?}", e);
TantivyError::InvalidArgument(err_msg)
})
}).collect::<Result<_>>()?;
let merged_fruit = self.0.merge_fruits(typed_fruit)?;
Ok(Box::new(merged_fruit))
}
}
impl SegmentCollector for Box<BoxableSegmentCollector> {
type Fruit = Box<Fruit>;
fn collect(&mut self, doc: u32, score: f32) {
self.as_mut().collect(doc, score);
}
fn harvest(self) -> Box<Fruit> {
BoxableSegmentCollector::harvest_from_box(self)
}
}
pub trait BoxableSegmentCollector {
fn collect(&mut self, doc: u32, score: f32);
fn harvest_from_box(self: Box<Self>) -> Box<Fruit>;
}
pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegmentCollector);
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
for SegmentCollectorWrapper<TSegmentCollector>
{
fn collect(&mut self, doc: u32, score: f32) {
self.0.collect(doc, score);
}
fn harvest_from_box(self: Box<Self>) -> Box<Fruit> {
Box::new(self.0.harvest())
}
}
pub struct FruitHandle<TFruit: Fruit> {
pos: usize,
_phantom: PhantomData<TFruit>,
}
impl<TFruit: Fruit> FruitHandle<TFruit> {
pub fn extract(self, fruits: &mut MultiFruit) -> TFruit {
let boxed_fruit = fruits.sub_fruits[self.pos].take().expect("");
*Downcast::<TFruit>::downcast(boxed_fruit).expect("Failed")
}
}
/// Multicollector makes it possible to collect on more than one collector.
/// It should only be used for use cases where the Collector types is unknown
@@ -13,14 +100,14 @@ use SegmentReader;
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT};
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::{CountCollector, TopCollector, MultiCollector};
/// use tantivy::collector::{Count, TopDocs, MultiCollector};
/// use tantivy::query::QueryParser;
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new();
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
@@ -44,55 +131,114 @@ use SegmentReader;
/// index.load_searchers()?;
/// let searcher = index.searcher();
///
/// {
/// let mut top_collector = TopCollector::with_limit(2);
/// let mut count_collector = CountCollector::default();
/// {
/// let mut collectors =
/// MultiCollector::from(vec![&mut top_collector, &mut count_collector]);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// searcher.search(&*query, &mut collectors).unwrap();
/// }
/// assert_eq!(count_collector.count(), 2);
/// assert!(top_collector.at_capacity());
/// }
/// let mut collectors = MultiCollector::new();
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
/// let count_handle = collectors.add_collector(Count);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let mut multi_fruit = searcher.search(&query, &collectors)?;
///
/// let count = count_handle.extract(&mut multi_fruit);
/// let top_docs = top_docs_handle.extract(&mut multi_fruit);
///
/// # assert_eq!(count, 2);
/// # assert_eq!(top_docs.len(), 2);
///
/// Ok(())
/// }
/// ```
pub struct MultiCollector<'a> {
collectors: Vec<&'a mut Collector>,
collector_wrappers:
Vec<Box<Collector<Child = Box<BoxableSegmentCollector>, Fruit = Box<Fruit>> + 'a>>,
}
impl<'a> MultiCollector<'a> {
/// Constructor
pub fn from(collectors: Vec<&'a mut Collector>) -> MultiCollector {
MultiCollector { collectors }
/// Create a new `MultiCollector`
pub fn new() -> MultiCollector<'a> {
MultiCollector {
collector_wrappers: Vec::new(),
}
}
/// Add a new collector to our `MultiCollector`.
pub fn add_collector<'b: 'a, TCollector: Collector + 'b>(
&mut self,
collector: TCollector,
) -> FruitHandle<TCollector::Fruit> {
let pos = self.collector_wrappers.len();
self.collector_wrappers
.push(Box::new(CollectorWrapper(collector)));
FruitHandle {
pos,
_phantom: PhantomData,
}
}
}
impl<'a> Collector for MultiCollector<'a> {
fn set_segment(
&mut self,
type Fruit = MultiFruit;
type Child = MultiCollectorChild;
fn for_segment(
&self,
segment_local_id: SegmentLocalId,
segment: &SegmentReader,
) -> Result<()> {
for collector in &mut self.collectors {
collector.set_segment(segment_local_id, segment)?;
}
Ok(())
) -> Result<MultiCollectorChild> {
let children = self
.collector_wrappers
.iter()
.map(|collector_wrapper| collector_wrapper.for_segment(segment_local_id, segment))
.collect::<Result<Vec<_>>>()?;
Ok(MultiCollectorChild { children })
}
fn requires_scoring(&self) -> bool {
self.collector_wrappers.iter().any(|c| c.requires_scoring())
}
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> {
let mut segment_fruits_list: Vec<Vec<Box<Fruit>>> = (0..self.collector_wrappers.len())
.map(|_| Vec::with_capacity(segments_multifruits.len()))
.collect::<Vec<_>>();
for segment_multifruit in segments_multifruits {
for (idx, segment_fruit_opt) in segment_multifruit.sub_fruits.into_iter().enumerate() {
if let Some(segment_fruit) = segment_fruit_opt {
segment_fruits_list[idx].push(segment_fruit);
}
}
}
let sub_fruits = self
.collector_wrappers
.iter()
.zip(segment_fruits_list)
.map(|(child_collector, segment_fruits)| {
Ok(Some(child_collector.merge_fruits(segment_fruits)?))
}).collect::<Result<_>>()?;
Ok(MultiFruit { sub_fruits })
}
}
pub struct MultiCollectorChild {
children: Vec<Box<BoxableSegmentCollector>>,
}
impl SegmentCollector for MultiCollectorChild {
type Fruit = MultiFruit;
fn collect(&mut self, doc: DocId, score: Score) {
for collector in &mut self.collectors {
collector.collect(doc, score);
for child in &mut self.children {
child.collect(doc, score);
}
}
fn requires_scoring(&self) -> bool {
self.collectors
.iter()
.any(|collector| collector.requires_scoring())
fn harvest(self) -> MultiFruit {
MultiFruit {
sub_fruits: self
.children
.into_iter()
.map(|child| Some(child.harvest()))
.collect(),
}
}
}
@@ -100,20 +246,42 @@ impl<'a> Collector for MultiCollector<'a> {
mod tests {
use super::*;
use collector::{Collector, CountCollector, TopScoreCollector};
use collector::{Count, TopDocs};
use query::TermQuery;
use schema::IndexRecordOption;
use schema::{Schema, TEXT};
use Index;
use Term;
#[test]
fn test_multi_collector() {
let mut top_collector = TopScoreCollector::with_limit(2);
let mut count_collector = CountCollector::default();
let mut schema_builder = Schema::builder();
let text = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut collectors =
MultiCollector::from(vec![&mut top_collector, &mut count_collector]);
collectors.collect(1, 0.2);
collectors.collect(2, 0.1);
collectors.collect(3, 0.5);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text=>"abc"));
index_writer.add_document(doc!(text=>"abc abc abc"));
index_writer.add_document(doc!(text=>"abc abc"));
index_writer.commit().unwrap();
index_writer.add_document(doc!(text=>""));
index_writer.add_document(doc!(text=>"abc abc abc abc"));
index_writer.add_document(doc!(text=>"abc"));
index_writer.commit().unwrap();
}
assert_eq!(count_collector.count(), 3);
assert!(top_collector.at_capacity());
index.load_searchers().unwrap();
let searcher = index.searcher();
let term = Term::from_field_text(text, "abc");
let query = TermQuery::new(term, IndexRecordOption::Basic);
let mut collectors = MultiCollector::new();
let topdocs_handler = collectors.add_collector(TopDocs::with_limit(2));
let count_handler = collectors.add_collector(Count);
let mut multifruits = searcher.search(&query, &mut collectors).unwrap();
assert_eq!(count_handler.extract(&mut multifruits), 5);
assert_eq!(topdocs_handler.extract(&mut multifruits).len(), 2);
}
}

201
src/collector/tests.rs Normal file
View File

@@ -0,0 +1,201 @@
use super::*;
use core::SegmentReader;
use fastfield::BytesFastFieldReader;
use fastfield::FastFieldReader;
use schema::Field;
use DocAddress;
use DocId;
use Score;
use SegmentLocalId;
/// Stores all of the doc ids.
/// This collector is only used for tests.
/// It is unusable in pr
///
/// actise, as it does not store
/// the segment ordinals
pub struct TestCollector;
pub struct TestSegmentCollector {
segment_id: SegmentLocalId,
fruit: TestFruit,
}
#[derive(Default)]
pub struct TestFruit {
docs: Vec<DocAddress>,
scores: Vec<Score>,
}
impl TestFruit {
/// Return the list of matching documents exhaustively.
pub fn docs(&self) -> &[DocAddress] {
&self.docs[..]
}
pub fn scores(&self) -> &[Score] {
&self.scores[..]
}
}
impl Collector for TestCollector {
type Fruit = TestFruit;
type Child = TestSegmentCollector;
fn for_segment(
&self,
segment_id: SegmentLocalId,
_reader: &SegmentReader,
) -> Result<TestSegmentCollector> {
Ok(TestSegmentCollector {
segment_id,
fruit: TestFruit::default(),
})
}
fn requires_scoring(&self) -> bool {
true
}
fn merge_fruits(&self, mut children: Vec<TestFruit>) -> Result<TestFruit> {
children.sort_by_key(|fruit| {
if fruit.docs().is_empty() {
0
} else {
fruit.docs()[0].segment_ord()
}
});
let mut docs = vec![];
let mut scores = vec![];
for child in children {
docs.extend(child.docs());
scores.extend(child.scores);
}
Ok(TestFruit { docs, scores })
}
}
impl SegmentCollector for TestSegmentCollector {
type Fruit = TestFruit;
fn collect(&mut self, doc: DocId, score: Score) {
self.fruit.docs.push(DocAddress(self.segment_id, doc));
self.fruit.scores.push(score);
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
self.fruit
}
}
/// Collects in order all of the fast fields for all of the
/// doc in the `DocSet`
///
/// This collector is mainly useful for tests.
pub struct FastFieldTestCollector {
field: Field,
}
pub struct FastFieldSegmentCollector {
vals: Vec<u64>,
reader: FastFieldReader<u64>,
}
impl FastFieldTestCollector {
pub fn for_field(field: Field) -> FastFieldTestCollector {
FastFieldTestCollector { field }
}
}
impl Collector for FastFieldTestCollector {
type Fruit = Vec<u64>;
type Child = FastFieldSegmentCollector;
fn for_segment(
&self,
_: SegmentLocalId,
reader: &SegmentReader,
) -> Result<FastFieldSegmentCollector> {
Ok(FastFieldSegmentCollector {
vals: Vec::new(),
reader: reader.fast_field_reader(self.field)?,
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(&self, children: Vec<Vec<u64>>) -> Result<Vec<u64>> {
Ok(children.into_iter().flat_map(|v| v.into_iter()).collect())
}
}
impl SegmentCollector for FastFieldSegmentCollector {
type Fruit = Vec<u64>;
fn collect(&mut self, doc: DocId, _score: Score) {
let val = self.reader.get(doc);
self.vals.push(val);
}
fn harvest(self) -> Vec<u64> {
self.vals
}
}
/// Collects in order all of the fast field bytes for all of the
/// docs in the `DocSet`
///
/// This collector is mainly useful for tests.
pub struct BytesFastFieldTestCollector {
field: Field,
}
pub struct BytesFastFieldSegmentCollector {
vals: Vec<u8>,
reader: BytesFastFieldReader,
}
impl BytesFastFieldTestCollector {
pub fn for_field(field: Field) -> BytesFastFieldTestCollector {
BytesFastFieldTestCollector { field }
}
}
impl Collector for BytesFastFieldTestCollector {
type Fruit = Vec<u8>;
type Child = BytesFastFieldSegmentCollector;
fn for_segment(
&self,
_segment_local_id: u32,
segment: &SegmentReader,
) -> Result<BytesFastFieldSegmentCollector> {
Ok(BytesFastFieldSegmentCollector {
vals: Vec::new(),
reader: segment.bytes_fast_field_reader(self.field)?,
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(&self, children: Vec<Vec<u8>>) -> Result<Vec<u8>> {
Ok(children.into_iter().flat_map(|c| c.into_iter()).collect())
}
}
impl SegmentCollector for BytesFastFieldSegmentCollector {
type Fruit = Vec<u8>;
fn collect(&mut self, doc: u32, _score: f32) {
let data = self.reader.get_val(doc);
self.vals.extend(data);
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
self.vals
}
}

View File

@@ -1,56 +1,59 @@
use serde::export::PhantomData;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use DocAddress;
use DocId;
use Result;
use SegmentLocalId;
use SegmentReader;
/// Contains a feature (field, score, etc.) of a document along with the document address.
///
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
/// default Rust heap is a max heap, whereas a min heap is needed.
#[derive(Clone, Copy)]
pub struct ComparableDoc<T> {
///
/// WARNING: equality is not what you would expect here.
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
/// is equal. This should be perfectly fine for this usage, but let's make sure this
/// struct is never public.
struct ComparableDoc<T, D> {
feature: T,
doc_address: DocAddress,
doc: D,
}
impl<T: PartialOrd> PartialOrd for ComparableDoc<T> {
impl<T: PartialOrd, D> PartialOrd for ComparableDoc<T, D> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: PartialOrd> Ord for ComparableDoc<T> {
impl<T: PartialOrd, D> Ord for ComparableDoc<T, D> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
other
.feature
.partial_cmp(&self.feature)
.unwrap_or_else(|| other.doc_address.cmp(&self.doc_address))
.unwrap_or_else(|| Ordering::Equal)
}
}
impl<T: PartialOrd> PartialEq for ComparableDoc<T> {
impl<T: PartialOrd, D> PartialEq for ComparableDoc<T, D> {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl<T: PartialOrd> Eq for ComparableDoc<T> {}
impl<T: PartialOrd, D> Eq for ComparableDoc<T, D> {}
/// The Top Collector keeps track of the K documents
/// sorted by type `T`.
///
/// The implementation is based on a `BinaryHeap`.
/// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`.
pub struct TopCollector<T> {
pub(crate) struct TopCollector<T> {
limit: usize,
heap: BinaryHeap<ComparableDoc<T>>,
segment_id: u32,
_marker: PhantomData<T>,
}
impl<T: PartialOrd + Clone> TopCollector<T> {
impl<T> TopCollector<T>
where
T: PartialOrd + Clone,
{
/// Creates a top collector, with a number of documents equal to "limit".
///
/// # Panics
@@ -61,127 +64,157 @@ impl<T: PartialOrd + Clone> TopCollector<T> {
}
TopCollector {
limit,
heap: BinaryHeap::with_capacity(limit),
segment_id: 0,
_marker: PhantomData,
}
}
/// Returns K best documents sorted in decreasing order.
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn docs(&self) -> Vec<DocAddress> {
self.top_docs()
.into_iter()
.map(|(_feature, doc)| doc)
.collect()
pub fn limit(&self) -> usize {
self.limit
}
/// Returns K best FeatureDocuments sorted in decreasing order.
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn top_docs(&self) -> Vec<(T, DocAddress)> {
let mut feature_docs: Vec<ComparableDoc<T>> = self.heap.iter().cloned().collect();
feature_docs.sort();
feature_docs
pub fn merge_fruits(
&self,
children: Vec<Vec<(T, DocAddress)>>,
) -> Result<Vec<(T, DocAddress)>> {
if self.limit == 0 {
return Ok(Vec::new());
}
let mut top_collector = BinaryHeap::new();
for child_fruit in children {
for (feature, doc) in child_fruit {
if top_collector.len() < self.limit {
top_collector.push(ComparableDoc { feature, doc });
} else {
if let Some(mut head) = top_collector.peek_mut() {
if head.feature < feature {
*head = ComparableDoc { feature, doc };
}
}
}
}
}
Ok(top_collector
.into_sorted_vec()
.into_iter()
.map(
|ComparableDoc {
feature,
doc_address,
}| (feature, doc_address),
).collect()
.map(|cdoc| (cdoc.feature, cdoc.doc))
.collect())
}
pub(crate) fn for_segment(
&self,
segment_id: SegmentLocalId,
_: &SegmentReader,
) -> Result<TopSegmentCollector<T>> {
Ok(TopSegmentCollector::new(segment_id, self.limit))
}
}
/// The Top Collector keeps track of the K documents
/// sorted by type `T`.
///
/// The implementation is based on a `BinaryHeap`.
/// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`.
pub(crate) struct TopSegmentCollector<T> {
limit: usize,
heap: BinaryHeap<ComparableDoc<T, DocId>>,
segment_id: u32,
}
impl<T: PartialOrd> TopSegmentCollector<T> {
fn new(segment_id: SegmentLocalId, limit: usize) -> TopSegmentCollector<T> {
TopSegmentCollector {
limit,
heap: BinaryHeap::with_capacity(limit),
segment_id,
}
}
}
impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
pub fn harvest(self) -> Vec<(T, DocAddress)> {
let segment_id = self.segment_id;
self.heap
.into_sorted_vec()
.into_iter()
.map(|comparable_doc| {
(
comparable_doc.feature,
DocAddress(segment_id, comparable_doc.doc),
)
}).collect()
}
/// Return true iff at least K documents have gone through
/// the collector.
#[inline]
pub fn at_capacity(&self) -> bool {
#[inline(always)]
pub(crate) fn at_capacity(&self) -> bool {
self.heap.len() >= self.limit
}
/// Sets the segment local ID for the collector
pub fn set_segment_id(&mut self, segment_id: SegmentLocalId) {
self.segment_id = segment_id;
}
/// Collects a document scored by the given feature
///
/// It collects documents until it has reached the max capacity. Once it reaches capacity, it
/// will compare the lowest scoring item with the given one and keep whichever is greater.
#[inline(always)]
pub fn collect(&mut self, doc: DocId, feature: T) {
if self.at_capacity() {
// It's ok to unwrap as long as a limit of 0 is forbidden.
let limit_doc: ComparableDoc<T> = self
.heap
.peek()
.expect("Top collector with size 0 is forbidden")
.clone();
if limit_doc.feature < feature {
let mut mut_head = self
.heap
.peek_mut()
.expect("Top collector with size 0 is forbidden");
mut_head.feature = feature;
mut_head.doc_address = DocAddress(self.segment_id, doc);
if let Some(limit_feature) = self.heap.peek().map(|head| head.feature.clone()) {
if limit_feature < feature {
if let Some(mut head) = self.heap.peek_mut() {
head.feature = feature;
head.doc = doc;
}
}
}
} else {
let wrapped_doc = ComparableDoc {
feature,
doc_address: DocAddress(self.segment_id, doc),
};
self.heap.push(wrapped_doc);
// we have not reached capacity yet, so we can just push the
// element.
self.heap.push(ComparableDoc { feature, doc });
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use DocId;
use super::{TopCollector, TopSegmentCollector};
use DocAddress;
use Score;
#[test]
fn test_top_collector_not_at_capacity() {
let mut top_collector = TopCollector::with_limit(4);
let mut top_collector = TopSegmentCollector::new(0, 4);
top_collector.collect(1, 0.8);
top_collector.collect(3, 0.2);
top_collector.collect(5, 0.3);
assert!(!top_collector.at_capacity());
let score_docs: Vec<(Score, DocId)> = top_collector
.top_docs()
.into_iter()
.map(|(score, doc_address)| (score, doc_address.doc()))
.collect();
assert_eq!(score_docs, vec![(0.8, 1), (0.3, 5), (0.2, 3)]);
assert_eq!(
top_collector.harvest(),
vec![
(0.8, DocAddress(0, 1)),
(0.3, DocAddress(0, 5)),
(0.2, DocAddress(0, 3))
]
);
}
#[test]
fn test_top_collector_at_capacity() {
let mut top_collector = TopCollector::with_limit(4);
let mut top_collector = TopSegmentCollector::new(0, 4);
top_collector.collect(1, 0.8);
top_collector.collect(3, 0.2);
top_collector.collect(5, 0.3);
top_collector.collect(7, 0.9);
top_collector.collect(9, -0.2);
assert!(top_collector.at_capacity());
{
let score_docs: Vec<(Score, DocId)> = top_collector
.top_docs()
.into_iter()
.map(|(score, doc_address)| (score, doc_address.doc()))
.collect();
assert_eq!(score_docs, vec![(0.9, 7), (0.8, 1), (0.3, 5), (0.2, 3)]);
}
{
let docs: Vec<DocId> = top_collector
.docs()
.into_iter()
.map(|doc_address| doc_address.doc())
.collect();
assert_eq!(docs, vec![7, 1, 5, 3]);
}
assert_eq!(
top_collector.harvest(),
vec![
(0.9, DocAddress(0, 7)),
(0.8, DocAddress(0, 1)),
(0.3, DocAddress(0, 5)),
(0.2, DocAddress(0, 3))
]
);
}
#[test]
@@ -189,5 +222,4 @@ mod tests {
fn test_top_0() {
let _collector: TopCollector<Score> = TopCollector::with_limit(0);
}
}

View File

@@ -1,12 +1,13 @@
use super::Collector;
use collector::top_collector::TopCollector;
use collector::top_collector::TopSegmentCollector;
use collector::SegmentCollector;
use fastfield::FastFieldReader;
use fastfield::FastValue;
use schema::Field;
use DocAddress;
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
/// The Top Field Collector keeps track of the K documents
@@ -19,67 +20,57 @@ use SegmentReader;
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT, FAST};
/// use tantivy::{Index, Result, DocId};
/// use tantivy::collector::TopFieldCollector;
/// use tantivy::query::QueryParser;
/// # use tantivy::schema::{Schema, Field, FAST, TEXT};
/// # use tantivy::{Index, Result, DocAddress};
/// # use tantivy::query::{Query, QueryParser};
/// use tantivy::collector::TopDocs;
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let rating = schema_builder.add_u64_field("rating", FAST);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// rating => 92u64,
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// rating => 97u64,
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// rating => 63u64,
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// rating => 80u64,
/// ));
/// index_writer.commit().unwrap();
/// }
/// # fn main() {
/// # let mut schema_builder = Schema::builder();
/// # let title = schema_builder.add_text_field("title", TEXT);
/// # let rating = schema_builder.add_u64_field("rating", FAST);
/// # let schema = schema_builder.build();
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
/// # index_writer.add_document(doc!(
/// # title => "The Name of the Wind",
/// # rating => 92u64,
/// # ));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # index_writer.commit().unwrap();
/// # index.load_searchers().unwrap();
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary").unwrap();
/// # let top_docs = docs_sorted_by_rating(&index, &query, rating).unwrap();
/// # assert_eq!(top_docs,
/// # vec![(97u64, DocAddress(0u32, 1)),
/// # (80u64, DocAddress(0u32, 3))]);
/// # }
/// #
/// /// Searches the document matching the given query, and
/// /// collects the top 10 documents, order by the `field`
/// /// given in argument.
/// ///
/// /// `field` is required to be a FAST field.
/// fn docs_sorted_by_rating(index: &Index, query: &Query, sort_by_field: Field)
/// -> Result<Vec<(u64, DocAddress)>> {
///
/// index.load_searchers()?;
/// let searcher = index.searcher();
/// // This is where we build our collector!
/// let top_docs_by_rating = TopDocs::with_limit(2).order_by_field(sort_by_field);
///
/// {
/// let mut top_collector = TopFieldCollector::with_limit(rating, 2);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// searcher.search(&*query, &mut top_collector).unwrap();
///
/// let score_docs: Vec<(u64, DocId)> = top_collector
/// .top_docs()
/// .into_iter()
/// .map(|(field, doc_address)| (field, doc_address.doc()))
/// .collect();
///
/// assert_eq!(score_docs, vec![(97u64, 1), (80, 3)]);
/// }
///
/// Ok(())
/// // ... and here is our documents. Not this is a simple vec.
/// // The `u64` in the pair is the value of our fast field for each documents.
/// index.searcher()
/// .search(query, &top_docs_by_rating)
/// }
/// ```
pub struct TopFieldCollector<T: FastValue> {
field: Field,
pub struct TopDocsByField<T> {
collector: TopCollector<T>,
fast_field: Option<FastFieldReader<T>>,
field: Field,
}
impl<T: FastValue + PartialOrd + Clone> TopFieldCollector<T> {
impl<T: FastValue + PartialOrd + Clone> TopDocsByField<T> {
/// Creates a top field collector, with a number of documents equal to "limit".
///
/// The given field name must be a fast field, otherwise the collector have an error while
@@ -87,68 +78,72 @@ impl<T: FastValue + PartialOrd + Clone> TopFieldCollector<T> {
///
/// # Panics
/// The method panics if limit is 0
pub fn with_limit(field: Field, limit: usize) -> Self {
TopFieldCollector {
field,
pub(crate) fn new(field: Field, limit: usize) -> TopDocsByField<T> {
TopDocsByField {
collector: TopCollector::with_limit(limit),
fast_field: None,
field,
}
}
/// Returns K best documents sorted the given field name in decreasing order.
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn docs(&self) -> Vec<DocAddress> {
self.collector.docs()
}
/// Returns K best FieldDocuments sorted in decreasing order.
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn top_docs(&self) -> Vec<(T, DocAddress)> {
self.collector.top_docs()
}
/// Return true iff at least K documents have gone through
/// the collector.
#[inline]
pub fn at_capacity(&self) -> bool {
self.collector.at_capacity()
}
}
impl<T: FastValue + PartialOrd + Clone> Collector for TopFieldCollector<T> {
fn set_segment(&mut self, segment_id: u32, segment: &SegmentReader) -> Result<()> {
self.collector.set_segment_id(segment_id);
self.fast_field = Some(segment.fast_field_reader(self.field)?);
Ok(())
}
impl<T: FastValue + PartialOrd + Send + Sync + 'static> Collector for TopDocsByField<T> {
type Fruit = Vec<(T, DocAddress)>;
fn collect(&mut self, doc: DocId, _score: Score) {
let field_value = self
.fast_field
.as_ref()
.expect("collect() was called before set_segment. This should never happen.")
.get(doc);
self.collector.collect(doc, field_value);
type Child = TopFieldSegmentCollector<T>;
fn for_segment(
&self,
segment_local_id: SegmentLocalId,
reader: &SegmentReader,
) -> Result<TopFieldSegmentCollector<T>> {
let collector = self.collector.for_segment(segment_local_id, reader)?;
let reader = reader.fast_field_reader(self.field)?;
Ok(TopFieldSegmentCollector { collector, reader })
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(
&self,
segment_fruits: Vec<Vec<(T, DocAddress)>>,
) -> Result<Vec<(T, DocAddress)>> {
self.collector.merge_fruits(segment_fruits)
}
}
pub struct TopFieldSegmentCollector<T: FastValue + PartialOrd> {
collector: TopSegmentCollector<T>,
reader: FastFieldReader<T>,
}
impl<T: FastValue + PartialOrd + Send + Sync + 'static> SegmentCollector
for TopFieldSegmentCollector<T>
{
type Fruit = Vec<(T, DocAddress)>;
fn collect(&mut self, doc: u32, _score: f32) {
let field_value = self.reader.get(doc);
self.collector.collect(doc, field_value);
}
fn harvest(self) -> Vec<(T, DocAddress)> {
self.collector.harvest()
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::TopDocsByField;
use collector::Collector;
use collector::TopDocs;
use query::Query;
use query::QueryParser;
use schema::Field;
use schema::IntOptions;
use schema::Schema;
use schema::{SchemaBuilder, FAST, TEXT};
use schema::{Schema, FAST, TEXT};
use DocAddress;
use Index;
use IndexWriter;
use TantivyError;
@@ -158,7 +153,7 @@ mod tests {
#[test]
fn test_top_collector_not_at_capacity() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build();
@@ -178,22 +173,22 @@ mod tests {
});
let searcher = index.searcher();
let mut top_collector = TopFieldCollector::with_limit(size, 4);
searcher.search(&*query, &mut top_collector).unwrap();
assert!(!top_collector.at_capacity());
let score_docs: Vec<(u64, DocId)> = top_collector
.top_docs()
.into_iter()
.map(|(field, doc_address)| (field, doc_address.doc()))
.collect();
assert_eq!(score_docs, vec![(64, 1), (16, 2), (12, 0)]);
let top_collector = TopDocs::with_limit(4).order_by_field(size);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
assert_eq!(
top_docs,
vec![
(64, DocAddress(0, 1)),
(16, DocAddress(0, 2)),
(12, DocAddress(0, 0))
]
);
}
#[test]
#[should_panic]
fn test_field_does_not_exist() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build();
@@ -204,14 +199,16 @@ mod tests {
));
});
let searcher = index.searcher();
let segment = searcher.segment_reader(0);
let mut top_collector: TopFieldCollector<u64> = TopFieldCollector::with_limit(Field(2), 4);
let _ = top_collector.set_segment(0, segment);
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(Field(2));
let segment_reader = searcher.segment_reader(0u32);
top_collector
.for_segment(0, segment_reader)
.expect("should panic");
}
#[test]
fn test_field_not_fast_field() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, IntOptions::default());
let schema = schema_builder.build();
@@ -223,26 +220,16 @@ mod tests {
});
let searcher = index.searcher();
let segment = searcher.segment_reader(0);
let mut top_collector: TopFieldCollector<u64> = TopFieldCollector::with_limit(size, 4);
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(size);
assert_matches!(
top_collector.set_segment(0, segment),
Err(TantivyError::FastFieldError(_))
top_collector
.for_segment(0, segment)
.map(|_| ())
.unwrap_err(),
TantivyError::FastFieldError(_)
);
}
#[test]
#[should_panic]
fn test_collect_before_set_segment() {
let mut top_collector: TopFieldCollector<u64> = TopFieldCollector::with_limit(Field(0), 4);
top_collector.collect(0, 0f32);
}
#[test]
#[should_panic]
fn test_top_0() {
let _: TopFieldCollector<u64> = TopFieldCollector::with_limit(Field(0), 0);
}
fn index(
query: &str,
query_field: Field,

View File

@@ -1,5 +1,10 @@
use super::Collector;
use collector::top_collector::TopCollector;
use collector::top_collector::TopSegmentCollector;
use collector::SegmentCollector;
use collector::TopDocsByField;
use fastfield::FastValue;
use schema::Field;
use DocAddress;
use DocId;
use Result;
@@ -17,14 +22,15 @@ use SegmentReader;
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT};
/// use tantivy::{Index, Result, DocId, Score};
/// use tantivy::collector::TopScoreCollector;
/// use tantivy::DocAddress;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser;
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new();
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
@@ -48,140 +54,147 @@ use SegmentReader;
/// index.load_searchers()?;
/// let searcher = index.searcher();
///
/// {
/// let mut top_collector = TopScoreCollector::with_limit(2);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// searcher.search(&*query, &mut top_collector).unwrap();
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2))?;
///
/// let score_docs: Vec<(Score, DocId)> = top_collector
/// .top_docs()
/// .into_iter()
/// .map(|(score, doc_address)| (score, doc_address.doc()))
/// .collect();
///
/// assert_eq!(score_docs, vec![(0.7261542, 1), (0.6099695, 3)]);
/// }
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
///
/// Ok(())
/// }
/// ```
pub struct TopScoreCollector {
collector: TopCollector<Score>,
}
pub struct TopDocs(TopCollector<Score>);
impl TopScoreCollector {
impl TopDocs {
/// Creates a top score collector, with a number of documents equal to "limit".
///
/// # Panics
/// The method panics if limit is 0
pub fn with_limit(limit: usize) -> TopScoreCollector {
TopScoreCollector {
collector: TopCollector::with_limit(limit),
}
pub fn with_limit(limit: usize) -> TopDocs {
TopDocs(TopCollector::with_limit(limit))
}
/// Returns K best scored documents sorted in decreasing order.
/// Set top-K to rank documents by a given fast field.
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn docs(&self) -> Vec<DocAddress> {
self.collector.docs()
}
/// Returns K best ScoredDocuments sorted in decreasing order.
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
pub fn top_docs(&self) -> Vec<(Score, DocAddress)> {
self.collector.top_docs()
}
/// Returns K best ScoredDocuments sorted in decreasing order.
///
/// Calling this method triggers the sort.
/// The result of the sort is not cached.
#[deprecated]
pub fn score_docs(&self) -> Vec<(Score, DocAddress)> {
self.collector.top_docs()
}
/// Return true iff at least K documents have gone through
/// the collector.
#[inline]
pub fn at_capacity(&self) -> bool {
self.collector.at_capacity()
/// (By default, `TopDocs` collects the top-K documents sorted by
/// the similarity score.)
pub fn order_by_field<T: PartialOrd + FastValue + Clone>(
self,
field: Field,
) -> TopDocsByField<T> {
TopDocsByField::new(field, self.0.limit())
}
}
impl Collector for TopScoreCollector {
fn set_segment(&mut self, segment_id: SegmentLocalId, _: &SegmentReader) -> Result<()> {
self.collector.set_segment_id(segment_id);
Ok(())
}
impl Collector for TopDocs {
type Fruit = Vec<(Score, DocAddress)>;
fn collect(&mut self, doc: DocId, score: Score) {
self.collector.collect(doc, score);
type Child = TopScoreSegmentCollector;
fn for_segment(
&self,
segment_local_id: SegmentLocalId,
reader: &SegmentReader,
) -> Result<Self::Child> {
let collector = self.0.for_segment(segment_local_id, reader)?;
Ok(TopScoreSegmentCollector(collector))
}
fn requires_scoring(&self) -> bool {
true
}
fn merge_fruits(&self, child_fruits: Vec<Vec<(Score, DocAddress)>>) -> Result<Self::Fruit> {
self.0.merge_fruits(child_fruits)
}
}
/// Segment Collector associated to `TopDocs`.
pub struct TopScoreSegmentCollector(TopSegmentCollector<Score>);
impl SegmentCollector for TopScoreSegmentCollector {
type Fruit = Vec<(Score, DocAddress)>;
fn collect(&mut self, doc: DocId, score: Score) {
self.0.collect(doc, score)
}
fn harvest(self) -> Vec<(Score, DocAddress)> {
self.0.harvest()
}
}
#[cfg(test)]
mod tests {
use super::*;
use collector::Collector;
use DocId;
use super::TopDocs;
use query::QueryParser;
use schema::Schema;
use schema::TEXT;
use DocAddress;
use Index;
use Score;
fn make_index() -> Index {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
index_writer.add_document(doc!(text_field=>"I like Droopy"));
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
index
}
#[test]
fn test_top_collector_not_at_capacity() {
let mut top_collector = TopScoreCollector::with_limit(4);
top_collector.collect(1, 0.8);
top_collector.collect(3, 0.2);
top_collector.collect(5, 0.3);
assert!(!top_collector.at_capacity());
let score_docs: Vec<(Score, DocId)> = top_collector
.top_docs()
.into_iter()
.map(|(score, doc_address)| (score, doc_address.doc()))
.collect();
assert_eq!(score_docs, vec![(0.8, 1), (0.3, 5), (0.2, 3)]);
let index = make_index();
let field = index.schema().get_field("text").unwrap();
let query_parser = QueryParser::for_index(&index, vec![field]);
let text_query = query_parser.parse_query("droopy tax").unwrap();
let score_docs: Vec<(Score, DocAddress)> = index
.searcher()
.search(&text_query, &TopDocs::with_limit(4))
.unwrap();
assert_eq!(
score_docs,
vec![
(0.81221175, DocAddress(0u32, 1)),
(0.5376842, DocAddress(0u32, 2)),
(0.48527452, DocAddress(0, 0))
]
);
}
#[test]
fn test_top_collector_at_capacity() {
let mut top_collector = TopScoreCollector::with_limit(4);
top_collector.collect(1, 0.8);
top_collector.collect(3, 0.2);
top_collector.collect(5, 0.3);
top_collector.collect(7, 0.9);
top_collector.collect(9, -0.2);
assert!(top_collector.at_capacity());
{
let score_docs: Vec<(Score, DocId)> = top_collector
.top_docs()
.into_iter()
.map(|(score, doc_address)| (score, doc_address.doc()))
.collect();
assert_eq!(score_docs, vec![(0.9, 7), (0.8, 1), (0.3, 5), (0.2, 3)]);
}
{
let docs: Vec<DocId> = top_collector
.docs()
.into_iter()
.map(|doc_address| doc_address.doc())
.collect();
assert_eq!(docs, vec![7, 1, 5, 3]);
}
let index = make_index();
let field = index.schema().get_field("text").unwrap();
let query_parser = QueryParser::for_index(&index, vec![field]);
let text_query = query_parser.parse_query("droopy tax").unwrap();
let score_docs: Vec<(Score, DocAddress)> = index
.searcher()
.search(&text_query, &TopDocs::with_limit(2))
.unwrap();
assert_eq!(
score_docs,
vec![
(0.81221175, DocAddress(0u32, 1)),
(0.5376842, DocAddress(0u32, 2)),
]
);
}
#[test]
#[should_panic]
fn test_top_0() {
TopScoreCollector::with_limit(0);
TopDocs::with_limit(0);
}
}

View File

@@ -4,8 +4,8 @@ use common::VInt;
use directory::ReadOnlySource;
use directory::WritePtr;
use schema::Field;
use space_usage::PerFieldSpaceUsage;
use space_usage::FieldUsage;
use space_usage::PerFieldSpaceUsage;
use std::collections::HashMap;
use std::io::Write;
use std::io::{self, Read};
@@ -172,7 +172,8 @@ impl CompositeFile {
pub fn space_usage(&self) -> PerFieldSpaceUsage {
let mut fields = HashMap::new();
for (&field_addr, &(start, end)) in self.offsets_index.iter() {
fields.entry(field_addr.field)
fields
.entry(field_addr.field)
.or_insert_with(|| FieldUsage::empty(field_addr.field))
.add_field_idx(field_addr.idx, end - start);
}

134
src/core/executor.rs Normal file
View File

@@ -0,0 +1,134 @@
use crossbeam::channel;
use scoped_pool::{Pool, ThreadConfig};
use Result;
/// Search executor whether search request are single thread or multithread.
///
/// We don't expose Rayon thread pool directly here for several reasons.
///
/// First dependency hell. It is not a good idea to expose the
/// API of a dependency, knowing it might conflict with a different version
/// used by the client. Second, we may stop using rayon in the future.
pub enum Executor {
SingleThread,
ThreadPool(Pool),
}
impl Executor {
/// Creates an Executor that performs all task in the caller thread.
pub fn single_thread() -> Executor {
Executor::SingleThread
}
// Creates an Executor that dispatches the tasks in a thread pool.
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Executor {
let thread_config = ThreadConfig::new().prefix(prefix);
let pool = Pool::with_thread_config(num_threads, thread_config);
Executor::ThreadPool(pool)
}
// Perform a map in the thread pool.
//
// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task
// will propagate to the caller.
pub fn map<
A: Send,
R: Send,
AIterator: Iterator<Item = A>,
F: Sized + Sync + Fn(A) -> Result<R>,
>(
&self,
f: F,
args: AIterator,
) -> Result<Vec<R>> {
match self {
Executor::SingleThread => args.map(f).collect::<Result<_>>(),
Executor::ThreadPool(pool) => {
let args_with_indices: Vec<(usize, A)> = args.enumerate().collect();
let num_fruits = args_with_indices.len();
let fruit_receiver = {
let (fruit_sender, fruit_receiver) = channel::unbounded();
pool.scoped(|scope| {
for arg_with_idx in args_with_indices {
scope.execute(|| {
let (idx, arg) = arg_with_idx;
let fruit = f(arg);
if let Err(err) = fruit_sender.send((idx, fruit)) {
error!("Failed to send search task. It probably means all search threads have panicked. {:?}", err);
}
});
}
});
fruit_receiver
// This ends the scope of fruit_sender.
// This is important as it makes it possible for the fruit_receiver iteration to
// terminate.
};
let mut results = Vec::with_capacity(num_fruits);
unsafe { results.set_len(num_fruits) };
let mut num_items = 0;
for (pos, fruit_res) in fruit_receiver {
results[pos] = fruit_res?;
num_items += 1;
}
// this checks ensures that we filled of this
// uninitialized memory.
assert_eq!(num_items, results.len());
Ok(results)
}
}
}
}
#[cfg(test)]
mod tests {
use super::Executor;
#[test]
#[should_panic(expected = "panic should propagate")]
fn test_panic_propagates_single_thread() {
let _result: Vec<usize> = Executor::single_thread()
.map(
|_| {
panic!("panic should propagate");
},
vec![0].into_iter(),
).unwrap();
}
#[test]
#[should_panic] //< unfortunately the panic message is not propagated
fn test_panic_propagates_multi_thread() {
let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
.map(
|_| {
panic!("panic should propagate");
},
vec![0].into_iter(),
).unwrap();
}
#[test]
fn test_map_singlethread() {
let result: Vec<usize> = Executor::single_thread()
.map(|i| Ok(i * 2), 0..1_000)
.unwrap();
assert_eq!(result.len(), 1_000);
for i in 0..1_000 {
assert_eq!(result[i], i * 2);
}
}
}
#[test]
fn test_map_multithread() {
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
.map(|i| Ok(i * 2), 0..10)
.unwrap();
assert_eq!(result.len(), 10);
for i in 0..10 {
assert_eq!(result[i], i * 2);
}
}

View File

@@ -3,6 +3,7 @@ use super::pool::Pool;
use super::segment::create_segment;
use super::segment::Segment;
use core::searcher::Searcher;
use core::Executor;
use core::IndexMeta;
use core::SegmentId;
use core::SegmentMeta;
@@ -45,6 +46,7 @@ pub struct Index {
schema: Schema,
num_searchers: Arc<AtomicUsize>,
searcher_pool: Arc<Pool<Searcher>>,
executor: Arc<Executor>,
tokenizers: TokenizerManager,
}
@@ -54,6 +56,29 @@ impl Index {
dir.exists(&META_FILEPATH)
}
/// Accessor to the search executor.
///
/// This pool is used by default when calling `searcher.search(...)`
/// to perform search on the individual segments.
///
/// By default the executor is single thread, and simply runs in the calling thread.
pub fn search_executor(&self) -> &Executor {
self.executor.as_ref()
}
/// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads.
pub fn set_multithread_executor(&mut self, num_threads: usize) {
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-"));
}
/// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads.
pub fn set_default_multithread_executor(&mut self) {
let default_num_threads = num_cpus::get();
self.set_multithread_executor(default_num_threads);
}
/// Creates a new index using the `RAMDirectory`.
///
/// The index will be allocated in anonymous memory.
@@ -85,7 +110,9 @@ impl Index {
if index.schema() == schema {
Ok(index)
} else {
Err(TantivyError::SchemaError("An index exists but the schema does not match.".to_string()))
Err(TantivyError::SchemaError(
"An index exists but the schema does not match.".to_string(),
))
}
} else {
Index::create(dir, schema)
@@ -131,6 +158,7 @@ impl Index {
num_searchers: Arc::new(AtomicUsize::new(n_cpus)),
searcher_pool: Arc::new(Pool::new()),
tokenizers: TokenizerManager::default(),
executor: Arc::new(Executor::single_thread()),
};
index.load_searchers()?;
Ok(index)
@@ -348,19 +376,20 @@ impl Clone for Index {
num_searchers: Arc::clone(&self.num_searchers),
searcher_pool: Arc::clone(&self.searcher_pool),
tokenizers: self.tokenizers.clone(),
executor: self.executor.clone(),
}
}
}
#[cfg(test)]
mod tests {
use schema::{Schema, SchemaBuilder, INT_INDEXED, TEXT};
use Index;
use directory::RAMDirectory;
use schema::{Schema, INT_INDEXED, TEXT};
use Index;
#[test]
fn test_indexer_for_field() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let num_likes_field = schema_builder.add_u64_field("num_likes", INT_INDEXED);
let body_field = schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build();
@@ -388,7 +417,6 @@ mod tests {
assert!(Index::exists(&directory));
}
#[test]
fn open_or_create_should_open() {
let directory = RAMDirectory::create();
@@ -402,7 +430,7 @@ mod tests {
let directory = RAMDirectory::create();
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
assert!(Index::exists(&directory));
assert!(Index::create(directory.clone(), SchemaBuilder::default().build()).is_ok());
assert!(Index::create(directory.clone(), Schema::builder().build()).is_ok());
}
#[test]
@@ -411,12 +439,15 @@ mod tests {
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
assert!(Index::exists(&directory));
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
let err = Index::open_or_create(directory, SchemaBuilder::default().build());
assert_eq!(format!("{:?}", err.unwrap_err()), "SchemaError(\"An index exists but the schema does not match.\")");
let err = Index::open_or_create(directory, Schema::builder().build());
assert_eq!(
format!("{:?}", err.unwrap_err()),
"SchemaError(\"An index exists but the schema does not match.\")"
);
}
fn throw_away_schema() -> Schema {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let _ = schema_builder.add_u64_field("num_likes", INT_INDEXED);
schema_builder.build()
}

View File

@@ -46,13 +46,13 @@ impl fmt::Debug for IndexMeta {
mod tests {
use super::IndexMeta;
use schema::{SchemaBuilder, TEXT};
use schema::{Schema, TEXT};
use serde_json;
#[test]
fn test_serialize_metas() {
let schema = {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("text", TEXT);
schema_builder.build()
};

View File

@@ -1,3 +1,4 @@
mod executor;
pub mod index;
mod index_meta;
mod inverted_index_reader;
@@ -9,6 +10,7 @@ mod segment_id;
mod segment_meta;
mod segment_reader;
pub use self::executor::Executor;
pub use self::index::Index;
pub use self::index_meta::IndexMeta;
pub use self::inverted_index_reader::InvertedIndexReader;

View File

@@ -1,18 +1,43 @@
use collector::Collector;
use collector::SegmentCollector;
use core::Executor;
use core::InvertedIndexReader;
use core::SegmentReader;
use query::Query;
use query::Scorer;
use query::Weight;
use schema::Document;
use schema::Schema;
use schema::{Field, Term};
use space_usage::SearcherSpaceUsage;
use std::fmt;
use std::sync::Arc;
use store::StoreReader;
use termdict::TermMerger;
use DocAddress;
use Index;
use Result;
fn collect_segment<C: Collector>(
collector: &C,
weight: &Weight,
segment_ord: u32,
segment_reader: &SegmentReader,
) -> Result<C::Fruit> {
let mut scorer = weight.scorer(segment_reader)?;
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
if let Some(delete_bitset) = segment_reader.delete_bitset() {
scorer.for_each(&mut |doc, score| {
if !delete_bitset.is_deleted(doc) {
segment_collector.collect(doc, score);
}
});
} else {
scorer.for_each(&mut |doc, score| segment_collector.collect(doc, score));
}
Ok(segment_collector.harvest())
}
/// Holds a list of `SegmentReader`s ready for search.
///
/// It guarantees that the `Segment` will not be removed before
@@ -22,6 +47,7 @@ pub struct Searcher {
schema: Schema,
index: Index,
segment_readers: Vec<SegmentReader>,
store_readers: Vec<StoreReader>,
}
impl Searcher {
@@ -31,10 +57,15 @@ impl Searcher {
index: Index,
segment_readers: Vec<SegmentReader>,
) -> Searcher {
let store_readers = segment_readers
.iter()
.map(|segment_reader| segment_reader.get_store_reader())
.collect();
Searcher {
schema,
index,
segment_readers,
store_readers,
}
}
@@ -49,8 +80,8 @@ impl Searcher {
/// the request to the right `Segment`.
pub fn doc(&self, doc_address: DocAddress) -> Result<Document> {
let DocAddress(segment_local_id, doc_id) = doc_address;
let segment_reader = &self.segment_readers[segment_local_id as usize];
segment_reader.doc(doc_id)
let store_reader = &self.store_readers[segment_local_id as usize];
store_reader.get(doc_id)
}
/// Access the schema associated to the index of this searcher.
@@ -86,9 +117,58 @@ impl Searcher {
&self.segment_readers[segment_ord as usize]
}
/// Runs a query on the segment readers wrapped by the searcher
pub fn search<C: Collector>(&self, query: &Query, collector: &mut C) -> Result<()> {
query.search(self, collector)
/// Runs a query on the segment readers wrapped by the searcher.
///
/// Search works as follows :
///
/// First the weight object associated to the query is created.
///
/// Then, the query loops over the segments and for each segment :
/// - setup the collector and informs it that the segment being processed has changed.
/// - creates a SegmentCollector for collecting documents associated to the segment
/// - creates a `Scorer` object associated for this segment
/// - iterate through the matched documents and push them to the segment collector.
///
/// Finally, the Collector merges each of the child collectors into itself for result usability
/// by the caller.
pub fn search<C: Collector>(&self, query: &Query, collector: &C) -> Result<C::Fruit> {
let executor = self.index.search_executor();
self.search_with_executor(query, collector, executor)
}
/// Same as [`search(...)`](#method.search) but multithreaded.
///
/// The current implementation is rather naive :
/// multithreading is by splitting search into as many task
/// as there are segments.
///
/// It is powerless at making search faster if your index consists in
/// one large segment.
///
/// Also, keep in my multithreading a single query on several
/// threads will not improve your throughput. It can actually
/// hurt it. It will however, decrease the average response time.
pub fn search_with_executor<C: Collector>(
&self,
query: &Query,
collector: &C,
executor: &Executor,
) -> Result<C::Fruit> {
let scoring_enabled = collector.requires_scoring();
let weight = query.weight(self, scoring_enabled)?;
let segment_readers = self.segment_readers();
let fruits = executor.map(
|(segment_ord, segment_reader)| {
collect_segment(
collector,
weight.as_ref(),
segment_ord as u32,
segment_reader,
)
},
segment_readers.iter().enumerate(),
)?;
collector.merge_fruits(fruits)
}
/// Return the field searcher associated to a `Field`.

View File

@@ -4,6 +4,7 @@ use core::InvertedIndexReader;
use core::Segment;
use core::SegmentComponent;
use core::SegmentId;
use directory::ReadOnlySource;
use error::TantivyError;
use fastfield::DeleteBitSet;
use fastfield::FacetReader;
@@ -12,7 +13,6 @@ use fastfield::{self, FastFieldNotAvailableError};
use fastfield::{BytesFastFieldReader, FastValue, MultiValueIntFastFieldReader};
use fieldnorm::FieldNormReader;
use schema::Cardinality;
use schema::Document;
use schema::Field;
use schema::FieldType;
use schema::Schema;
@@ -54,7 +54,7 @@ pub struct SegmentReader {
fast_fields_composite: CompositeFile,
fieldnorms_composite: CompositeFile,
store_reader: StoreReader,
store_source: ReadOnlySource,
delete_bitset_opt: Option<DeleteBitSet>,
schema: Schema,
}
@@ -197,8 +197,7 @@ impl SegmentReader {
/// Accessor to the segment's `Field norms`'s reader.
///
/// Field norms are the length (in tokens) of the fields.
/// It is used in the computation of the [TfIdf]
/// (https://fulmicoton.gitbooks.io/tantivy-doc/content/tfidf.html).
/// It is used in the computation of the [TfIdf](https://fulmicoton.gitbooks.io/tantivy-doc/content/tfidf.html).
///
/// They are simply stored as a fast field, serialized in
/// the `.fieldnorm` file of the segment.
@@ -216,8 +215,8 @@ impl SegmentReader {
}
/// Accessor to the segment's `StoreReader`.
pub fn get_store_reader(&self) -> &StoreReader {
&self.store_reader
pub fn get_store_reader(&self) -> StoreReader {
StoreReader::from_source(self.store_source.clone())
}
/// Open a new segment for reading.
@@ -226,7 +225,6 @@ impl SegmentReader {
let termdict_composite = CompositeFile::open(&termdict_source)?;
let store_source = segment.open_read(SegmentComponent::STORE)?;
let store_reader = StoreReader::from_source(store_source);
fail_point!("SegmentReader::open#middle");
@@ -272,7 +270,7 @@ impl SegmentReader {
fast_fields_composite,
fieldnorms_composite,
segment_id: segment.id(),
store_reader,
store_source,
delete_bitset_opt,
positions_composite,
positions_idx_composite,
@@ -351,14 +349,6 @@ impl SegmentReader {
inv_idx_reader
}
/// Returns the document (or to be accurate, its stored field)
/// bearing the given doc id.
/// This method is slow and should seldom be called from
/// within a collector.
pub fn doc(&self, doc_id: DocId) -> Result<Document> {
self.store_reader.get(doc_id)
}
/// Returns the segment id
pub fn segment_id(&self) -> SegmentId {
self.segment_id
@@ -393,8 +383,11 @@ impl SegmentReader {
self.positions_idx_composite.space_usage(),
self.fast_fields_composite.space_usage(),
self.fieldnorms_composite.space_usage(),
self.store_reader.space_usage(),
self.delete_bitset_opt.as_ref().map(|x| x.space_usage()).unwrap_or(0),
self.get_store_reader().space_usage(),
self.delete_bitset_opt
.as_ref()
.map(|x| x.space_usage())
.unwrap_or(0),
)
}
}
@@ -454,12 +447,12 @@ impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
#[cfg(test)]
mod test {
use core::Index;
use schema::{SchemaBuilder, Term, STORED, TEXT};
use schema::{Schema, Term, STORED, TEXT};
use DocId;
#[test]
fn test_alive_docs_iterator() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("name", TEXT | STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());

View File

@@ -15,13 +15,13 @@ use std::sync::PoisonError;
#[derive(Debug, Fail)]
pub enum TantivyError {
/// Path does not exist.
#[fail(display = "path does not exist: '{:?}'", _0)]
#[fail(display = "Path does not exist: '{:?}'", _0)]
PathDoesNotExist(PathBuf),
/// File already exists, this is a problem when we try to write into a new file.
#[fail(display = "file already exists: '{:?}'", _0)]
#[fail(display = "File already exists: '{:?}'", _0)]
FileAlreadyExists(PathBuf),
/// Index already exists in this directory
#[fail(display = "index already exists")]
#[fail(display = "Index already exists")]
IndexAlreadyExists,
/// Failed to acquire file lock
#[fail(
@@ -30,28 +30,29 @@ pub enum TantivyError {
)]
LockFailure(LockType),
/// IO Error.
#[fail(display = "an IO error occurred: '{}'", _0)]
#[fail(display = "An IO error occurred: '{}'", _0)]
IOError(#[cause] IOError),
/// The data within is corrupted.
///
/// For instance, it contains invalid JSON.
#[fail(display = "file contains corrupted data: '{:?}'", _0)]
/// Data corruption.
#[fail(display = "File contains corrupted data: '{:?}'", _0)]
CorruptedFile(PathBuf),
/// A thread holding the locked panicked and poisoned the lock.
#[fail(display = "a thread holding the locked panicked and poisoned the lock")]
#[fail(display = "A thread holding the locked panicked and poisoned the lock")]
Poisoned,
/// Invalid argument was passed by the user.
#[fail(display = "an invalid argument was passed: '{}'", _0)]
#[fail(display = "An invalid argument was passed: '{}'", _0)]
InvalidArgument(String),
/// An Error happened in one of the thread.
#[fail(display = "an error occurred in a thread: '{}'", _0)]
#[fail(display = "An error occurred in a thread: '{}'", _0)]
ErrorInThread(String),
/// An Error appeared related to the schema.
#[fail(display = "Schema error: '{}'", _0)]
SchemaError(String),
/// Tried to access a fastfield reader for a field not configured accordingly.
#[fail(display = "fast field not available: '{:?}'", _0)]
#[fail(display = "Fast field not available: '{:?}'", _0)]
FastFieldError(#[cause] FastFieldNotAvailableError),
/// System error. (e.g.: We failed spawning a new thread)
#[fail(display = "System error.'{}'", _0)]
SystemError(String),
}
impl From<FastFieldNotAvailableError> for TantivyError {

View File

@@ -6,12 +6,12 @@ pub use self::writer::BytesFastFieldWriter;
#[cfg(test)]
mod tests {
use schema::SchemaBuilder;
use schema::Schema;
use Index;
#[test]
fn test_bytes() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let field = schema_builder.add_bytes_field("bytesfield");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);

View File

@@ -127,19 +127,19 @@ mod tests {
use common::CompositeFile;
use directory::{Directory, RAMDirectory, WritePtr};
use fastfield::FastFieldReader;
use rand::Rng;
use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
use rand::SeedableRng;
use rand::XorShiftRng;
use schema::Document;
use schema::Field;
use schema::Schema;
use schema::FAST;
use schema::{Schema, SchemaBuilder};
use std::collections::HashMap;
use std::path::Path;
lazy_static! {
pub static ref SCHEMA: Schema = {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
schema_builder.add_u64_field("field", FAST);
schema_builder.build()
};
@@ -298,7 +298,7 @@ mod tests {
fn test_signed_intfastfield() {
let path = Path::new("test");
let mut directory: RAMDirectory = RAMDirectory::create();
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let i64_field = schema_builder.add_i64_field("field", FAST);
let schema = schema_builder.build();
@@ -342,7 +342,7 @@ mod tests {
fn test_signed_intfastfield_default_val() {
let path = Path::new("test");
let mut directory: RAMDirectory = RAMDirectory::create();
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let i64_field = schema_builder.add_i64_field("field", FAST);
let schema = schema_builder.build();
@@ -367,11 +367,10 @@ mod tests {
}
}
// Warning: this generates the same permutation at each call
pub fn generate_permutation() -> Vec<u64> {
let seed: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut rng = XorShiftRng::from_seed(seed);
let mut permutation: Vec<u64> = (0u64..100_000u64).collect();
rng.shuffle(&mut permutation);
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
permutation
}

View File

@@ -9,12 +9,12 @@ mod tests {
use schema::Cardinality;
use schema::IntOptions;
use schema::SchemaBuilder;
use schema::Schema;
use Index;
#[test]
fn test_multivalued_u64() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field(
"multifield",
IntOptions::default().set_fast(Cardinality::MultiValues),
@@ -49,7 +49,7 @@ mod tests {
#[test]
fn test_multivalued_i64() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let field = schema_builder.add_i64_field(
"multifield",
IntOptions::default().set_fast(Cardinality::MultiValues),

View File

@@ -47,11 +47,11 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
mod tests {
use core::Index;
use schema::{Document, Facet, SchemaBuilder};
use schema::{Document, Facet, Schema};
#[test]
fn test_multifastfield_reader() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facets");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);

View File

@@ -7,7 +7,7 @@ use directory::ReadOnlySource;
use directory::{Directory, RAMDirectory, WritePtr};
use fastfield::{FastFieldSerializer, FastFieldsWriter};
use owning_ref::OwningRef;
use schema::SchemaBuilder;
use schema::Schema;
use schema::FAST;
use std::collections::HashMap;
use std::marker::PhantomData;
@@ -108,7 +108,7 @@ impl<Item: FastValue> FastFieldReader<Item> {
impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
fn from(vals: Vec<Item>) -> FastFieldReader<Item> {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("field", FAST);
let schema = schema_builder.build();
let path = Path::new("__dummy__");

View File

@@ -15,7 +15,7 @@
//! precompute computationally expensive functions of the fieldnorm
//! in a very short array.
//!
//! This trick is used by the [BM25 similarity]().
//! This trick is used by the BM25 similarity.
mod code;
mod reader;
mod serializer;

View File

@@ -1,7 +1,6 @@
use rand::thread_rng;
use std::collections::HashSet;
use rand::distributions::Range;
use rand::Rng;
use schema::*;
use Index;
@@ -16,7 +15,7 @@ fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
#[ignore]
#[cfg(feature = "mmap")]
fn test_indexing() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let id_field = schema_builder.add_u64_field("id", INT_INDEXED);
let multiples_field = schema_builder.add_u64_field("multiples", INT_INDEXED);
@@ -24,7 +23,6 @@ fn test_indexing() {
let index = Index::create_from_tempdir(schema).unwrap();
let universe = Range::new(0u64, 20u64);
let mut rng = thread_rng();
let mut index_writer = index.writer_with_num_threads(3, 120_000_000).unwrap();
@@ -33,7 +31,7 @@ fn test_indexing() {
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
for _ in 0..200 {
let random_val = rng.sample(&universe);
let random_val = rng.gen_range(0, 20);
if random_val == 0 {
index_writer.commit().expect("Commit failed");
committed_docs.extend(&uncommitted_docs);

View File

@@ -8,7 +8,7 @@ use core::SegmentComponent;
use core::SegmentId;
use core::SegmentMeta;
use core::SegmentReader;
use crossbeam_channel as channel;
use crossbeam::channel;
use docset::DocSet;
use error::TantivyError;
use fastfield::write_delete_bitset;
@@ -388,11 +388,12 @@ impl IndexWriter {
let mem_budget = self.heap_size_in_bytes_per_thread;
let join_handle: JoinHandle<Result<()>> = thread::Builder::new()
.name(format!(
"indexing thread {} for gen {}",
"thrd-tantivy-index{}-gen{}",
self.worker_id, generation
)).spawn(move || {
loop {
let mut document_iterator = document_receiver_clone.clone().peekable();
let mut document_iterator =
document_receiver_clone.clone().into_iter().peekable();
// the peeking here is to avoid
// creating a new segment's files
@@ -640,7 +641,10 @@ impl IndexWriter {
pub fn add_document(&mut self, document: Document) -> u64 {
let opstamp = self.stamper.stamp();
let add_operation = AddOperation { opstamp, document };
self.document_sender.send(add_operation);
let send_result = self.document_sender.send(add_operation);
if let Err(e) = send_result {
panic!("Failed to index document. Sending to indexing channel failed. This probably means all of the indexing threads have panicked. {:?}", e);
}
opstamp
}
}
@@ -657,7 +661,7 @@ mod tests {
#[test]
fn test_lockfile_stops_duplicates() {
let schema_builder = schema::SchemaBuilder::default();
let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build());
let _index_writer = index.writer(40_000_000).unwrap();
match index.writer(40_000_000) {
@@ -668,7 +672,7 @@ mod tests {
#[test]
fn test_lockfile_already_exists_error_msg() {
let schema_builder = schema::SchemaBuilder::default();
let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build());
let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
match index.writer_with_num_threads(1, 3_000_000) {
@@ -683,7 +687,7 @@ mod tests {
#[test]
fn test_set_merge_policy() {
let schema_builder = schema::SchemaBuilder::default();
let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build());
let index_writer = index.writer(40_000_000).unwrap();
assert_eq!(
@@ -701,7 +705,7 @@ mod tests {
#[test]
fn test_lockfile_released_on_drop() {
let schema_builder = schema::SchemaBuilder::default();
let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build());
{
let _index_writer = index.writer(40_000_000).unwrap();
@@ -713,7 +717,7 @@ mod tests {
#[test]
fn test_commit_and_rollback() {
let mut schema_builder = schema::SchemaBuilder::default();
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
@@ -747,7 +751,7 @@ mod tests {
#[test]
fn test_with_merges() {
let mut schema_builder = schema::SchemaBuilder::default();
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let num_docs_containing = |s: &str| {
@@ -784,7 +788,7 @@ mod tests {
#[test]
fn test_prepare_with_commit_message() {
let mut schema_builder = schema::SchemaBuilder::default();
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
@@ -818,7 +822,7 @@ mod tests {
#[test]
fn test_prepare_but_rollback() {
let mut schema_builder = schema::SchemaBuilder::default();
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
@@ -866,7 +870,7 @@ mod tests {
#[test]
fn test_write_commit_fails() {
use fail;
let mut schema_builder = schema::SchemaBuilder::default();
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());

View File

@@ -614,7 +614,7 @@ impl IndexMerger {
store_writer.store(&doc)?;
}
} else {
store_writer.stack(store_reader)?;
store_writer.stack(&store_reader)?;
}
}
Ok(())
@@ -635,10 +635,9 @@ impl SerializableSegment for IndexMerger {
#[cfg(test)]
mod tests {
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use collector::chain;
use collector::tests::TestCollector;
use collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use collector::FacetCollector;
use collector::{Count, FacetCollector};
use core::Index;
use futures::Future;
use query::AllQuery;
@@ -647,6 +646,7 @@ mod tests {
use schema;
use schema::Cardinality;
use schema::Document;
use schema::Facet;
use schema::IndexRecordOption;
use schema::IntOptions;
use schema::Term;
@@ -658,7 +658,7 @@ mod tests {
#[test]
fn test_index_merger_no_deletes() {
let mut schema_builder = schema::SchemaBuilder::default();
let mut schema_builder = schema::Schema::builder();
let text_fieldtype = schema::TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default()
@@ -742,27 +742,32 @@ mod tests {
index.load_searchers().unwrap();
let searcher = index.searcher();
let get_doc_ids = |terms: Vec<Term>| {
let mut collector = TestCollector::default();
let query = BooleanQuery::new_multiterms_query(terms);
assert!(searcher.search(&query, &mut collector).is_ok());
collector.docs()
let top_docs = searcher.search(&query, &TestCollector).unwrap();
top_docs.docs().to_vec()
};
{
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
vec![1, 2, 4]
vec![DocAddress(0, 1), DocAddress(0, 2), DocAddress(0, 4)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
vec![0, 3]
vec![DocAddress(0, 0), DocAddress(0, 3)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "g")]),
vec![4]
vec![DocAddress(0, 4)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
vec![0, 1, 2, 3, 4]
vec![
DocAddress(0, 0),
DocAddress(0, 1),
DocAddress(0, 2),
DocAddress(0, 3),
DocAddress(0, 4)
]
);
}
{
@@ -788,17 +793,17 @@ mod tests {
{
let get_fast_vals = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
let mut collector = FastFieldTestCollector::for_field(score_field);
assert!(searcher.search(&query, &mut collector).is_ok());
collector.vals()
searcher
.search(&query, &FastFieldTestCollector::for_field(score_field))
.unwrap()
};
let get_fast_vals_bytes = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
let mut collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
searcher
.search(&query, &mut collector)
.expect("failed to search");
collector.vals()
.search(
&query,
&BytesFastFieldTestCollector::for_field(bytes_score_field),
).expect("failed to search")
};
assert_eq!(
get_fast_vals(vec![Term::from_field_text(text_field, "a")]),
@@ -814,7 +819,7 @@ mod tests {
#[test]
fn test_index_merger_with_deletes() {
let mut schema_builder = schema::SchemaBuilder::default();
let mut schema_builder = schema::Schema::builder();
let text_fieldtype = schema::TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
@@ -827,21 +832,13 @@ mod tests {
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
let search_term = |searcher: &Searcher, term: Term| {
let mut collector = FastFieldTestCollector::for_field(score_field);
let mut bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
let collector = FastFieldTestCollector::for_field(score_field);
let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
{
let mut combined_collector =
chain().push(&mut collector).push(&mut bytes_collector);
searcher
.search(&term_query, &mut combined_collector)
.unwrap();
}
let scores = collector.vals();
let mut score_bytes = Cursor::new(bytes_collector.vals());
let (scores, bytes) = searcher
.search(&term_query, &(collector, bytes_collector))
.unwrap();
let mut score_bytes = Cursor::new(bytes);
for &score in &scores {
assert_eq!(score as u32, score_bytes.read_u32::<BigEndian>().unwrap());
}
@@ -922,10 +919,10 @@ mod tests {
assert_eq!(searcher.segment_readers().len(), 2);
assert_eq!(searcher.num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].num_docs(), 1);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!(searcher.segment_readers()[1].num_docs(), 2);
assert_eq!(searcher.segment_readers()[1].max_doc(), 4);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 4);
assert_eq!(searcher.segment_readers()[1].num_docs(), 1);
assert_eq!(searcher.segment_readers()[1].max_doc(), 3);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec
@@ -959,15 +956,15 @@ mod tests {
.segment_reader(0)
.fast_field_reader::<u64>(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 1);
assert_eq!(score_field_reader.max_value(), 3);
assert_eq!(score_field_reader.min_value(), 4000);
assert_eq!(score_field_reader.max_value(), 7000);
let score_field_reader = searcher
.segment_reader(1)
.fast_field_reader::<u64>(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 4000);
assert_eq!(score_field_reader.max_value(), 7000);
assert_eq!(score_field_reader.min_value(), 1);
assert_eq!(score_field_reader.max_value(), 3);
}
{
// merging the segments
@@ -1140,10 +1137,9 @@ mod tests {
#[test]
fn test_merge_facets() {
let mut schema_builder = schema::SchemaBuilder::default();
let mut schema_builder = schema::Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let index = Index::create_in_ram(schema_builder.build());
use schema::Facet;
{
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| {
@@ -1172,20 +1168,16 @@ mod tests {
index_doc(&mut index_writer, &["/top/e", "/top/f"]);
index_writer.commit().expect("committed");
}
index.load_searchers().unwrap();
let test_searcher = |expected_num_docs: usize, expected: &[(&str, u64)]| {
let searcher = index.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet(Facet::from("/top"));
use collector::{CountCollector, MultiCollector};
let mut count_collector = CountCollector::default();
{
let mut multi_collectors =
MultiCollector::from(vec![&mut count_collector, &mut facet_collector]);
searcher.search(&AllQuery, &mut multi_collectors).unwrap();
}
assert_eq!(count_collector.count(), expected_num_docs);
let facet_counts = facet_collector.harvest();
let (count, facet_counts) = searcher
.search(&AllQuery, &(Count, facet_collector))
.unwrap();
assert_eq!(count, expected_num_docs);
let facets: Vec<(String, u64)> = facet_counts
.get("/top")
.map(|(facet, count)| (facet.to_string(), count))
@@ -1209,7 +1201,6 @@ mod tests {
("/top/f", 1),
],
);
// Merging the segments
{
let segment_ids = index
@@ -1222,7 +1213,6 @@ mod tests {
.wait()
.expect("Merging failed");
index_writer.wait_merging_threads().unwrap();
index.load_searchers().unwrap();
test_searcher(
11,
@@ -1261,7 +1251,7 @@ mod tests {
#[test]
fn test_merge_multivalued_int_fields_all_deleted() {
let mut schema_builder = schema::SchemaBuilder::default();
let mut schema_builder = schema::Schema::builder();
let int_options = IntOptions::default()
.set_fast(Cardinality::MultiValues)
.set_indexed();
@@ -1302,7 +1292,7 @@ mod tests {
#[test]
fn test_merge_multivalued_int_fields() {
let mut schema_builder = schema::SchemaBuilder::default();
let mut schema_builder = schema::Schema::builder();
let int_options = IntOptions::default()
.set_fast(Cardinality::MultiValues)
.set_indexed();
@@ -1368,15 +1358,17 @@ mod tests {
assert_eq!(&vals, &[17]);
}
{
let segment = searcher.segment_reader(1u32);
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[20]);
}
println!(
"{:?}",
searcher
.segment_readers()
.iter()
.map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
{
let segment = searcher.segment_reader(2u32);
let segment = searcher.segment_reader(1u32);
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[28, 27]);
@@ -1385,6 +1377,13 @@ mod tests {
assert_eq!(&vals, &[1_000]);
}
{
let segment = searcher.segment_reader(2u32);
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[20]);
}
// Merging the segments
{
let segment_ids = index
@@ -1403,6 +1402,14 @@ mod tests {
{
let searcher = index.searcher();
println!(
"{:?}",
searcher
.segment_readers()
.iter()
.map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
let segment = searcher.segment_reader(0u32);
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
@@ -1428,13 +1435,13 @@ mod tests {
assert_eq!(&vals, &[17]);
ff_reader.get_vals(7, &mut vals);
assert_eq!(&vals, &[20]);
ff_reader.get_vals(8, &mut vals);
assert_eq!(&vals, &[28, 27]);
ff_reader.get_vals(9, &mut vals);
ff_reader.get_vals(8, &mut vals);
assert_eq!(&vals, &[1_000]);
ff_reader.get_vals(9, &mut vals);
assert_eq!(&vals, &[20]);
}
}
}

View File

@@ -227,8 +227,24 @@ impl SegmentUpdater {
if self.is_alive() {
let index = &self.0.index;
let directory = index.directory();
let mut commited_segment_metas = self.0.segment_manager.committed_segment_metas();
// We sort segment_readers by number of documents.
// This is an heuristic to make multithreading more efficient.
//
// This is not done at the searcher level because I had a strange
// use case in which I was dealing with a large static index,
// dispatched over 5 SSD drives.
//
// A `UnionDirectory` makes it possible to read from these
// 5 different drives and creates a meta.json on the fly.
// In order to optimize the throughput, it creates a lasagna of segments
// from the different drives.
//
// Segment 1 from disk 1, Segment 1 from disk 2, etc.
commited_segment_metas.sort_by_key(|segment_meta| -(segment_meta.max_doc() as i32));
save_metas(
self.0.segment_manager.committed_segment_metas(),
commited_segment_metas,
index.schema(),
opstamp,
commit_message,
@@ -484,7 +500,7 @@ mod tests {
#[test]
fn test_delete_during_merge() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();

View File

@@ -24,7 +24,8 @@
//! # use tempdir::TempDir;
//! # use tantivy::Index;
//! # use tantivy::schema::*;
//! # use tantivy::collector::TopCollector;
//! # use tantivy::{Score, DocAddress};
//! # use tantivy::collector::TopDocs;
//! # use tantivy::query::QueryParser;
//! #
//! # fn main() {
@@ -46,7 +47,7 @@
//! // in a compressed, row-oriented key-value store.
//! // This store is useful to reconstruct the
//! // documents that were selected during the search phase.
//! let mut schema_builder = SchemaBuilder::default();
//! let mut schema_builder = Schema::builder();
//! let title = schema_builder.add_text_field("title", TEXT | STORED);
//! let body = schema_builder.add_text_field("body", TEXT);
//! let schema = schema_builder.build();
@@ -86,13 +87,13 @@
//! // A ticket has been opened regarding this problem.
//! let query = query_parser.parse_query("sea whale")?;
//!
//! let mut top_collector = TopCollector::with_limit(10);
//! searcher.search(&*query, &mut top_collector)?;
//! // Perform search.
//! // `topdocs` contains the 10 most relevant doc ids, sorted by decreasing scores...
//! let top_docs: Vec<(Score, DocAddress)> =
//! searcher.search(&query, &TopDocs::with_limit(10))?;
//!
//! // Our top collector now contains the 10
//! // most relevant doc ids...
//! let doc_addresses = top_collector.docs();
//! for doc_address in doc_addresses {
//! for (_score, doc_address) in top_docs {
//! // Retrieve the actual content of documents given its `doc_address`.
//! let retrieved_doc = searcher.doc(doc_address)?;
//! println!("{}", schema.to_json(&retrieved_doc));
//! }
@@ -129,11 +130,11 @@ extern crate base64;
extern crate bit_set;
extern crate bitpacking;
extern crate byteorder;
extern crate scoped_pool;
extern crate combine;
extern crate crossbeam;
extern crate crossbeam_channel;
extern crate fnv;
extern crate fst;
extern crate fst_regex;
@@ -152,8 +153,6 @@ extern crate tempdir;
extern crate tempfile;
extern crate uuid;
#[cfg(test)]
#[macro_use]
extern crate matches;
@@ -218,7 +217,7 @@ pub mod store;
pub mod termdict;
mod snippet;
pub use self::snippet::SnippetGenerator;
pub use self::snippet::{SnippetGenerator, Snippet};
mod docset;
pub use self::docset::{DocSet, SkipResult};
@@ -301,9 +300,11 @@ mod tests {
use docset::DocSet;
use query::BooleanQuery;
use rand::distributions::Bernoulli;
use rand::distributions::Range;
use rand::{Rng, SeedableRng, XorShiftRng};
use rand::distributions::Uniform;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use schema::*;
use DocAddress;
use Index;
use IndexWriter;
use Postings;
@@ -322,16 +323,15 @@ mod tests {
}
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
let seed: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
XorShiftRng::from_seed(seed)
.sample_iter(&Range::new(0u32, max_value))
let seed: [u8; 32] = [1; 32];
StdRng::from_seed(seed)
.sample_iter(&Uniform::new(0u32, max_value))
.take(n_elems)
.collect::<Vec<u32>>()
}
pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> {
let seed: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, seed_val];
XorShiftRng::from_seed(seed)
StdRng::from_seed([seed_val; 32])
.sample_iter(&Bernoulli::new(ratio))
.take(n as usize)
.enumerate()
@@ -346,7 +346,7 @@ mod tests {
#[test]
#[cfg(feature = "mmap")]
fn test_indexing() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_from_tempdir(schema).unwrap();
@@ -371,7 +371,7 @@ mod tests {
#[test]
fn test_docfreq1() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
@@ -411,7 +411,7 @@ mod tests {
#[test]
fn test_fieldnorm_no_docs_with_field() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let title_field = schema_builder.add_text_field("title", TEXT);
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
@@ -440,7 +440,7 @@ mod tests {
#[test]
fn test_fieldnorm() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
{
@@ -481,7 +481,7 @@ mod tests {
#[test]
fn test_delete_postings1() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let term_abcd = Term::from_field_text(text_field, "abcd");
let term_a = Term::from_field_text(text_field, "a");
@@ -492,42 +492,21 @@ mod tests {
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{
// 0
let doc = doc!(text_field=>"a b");
index_writer.add_document(doc);
}
{
// 1
let doc = doc!(text_field=>" a c");
index_writer.add_document(doc);
}
{
// 2
let doc = doc!(text_field=>" b c");
index_writer.add_document(doc);
}
{
// 3
let doc = doc!(text_field=>" b d");
index_writer.add_document(doc);
}
{
index_writer.delete_term(Term::from_field_text(text_field, "c"));
}
{
index_writer.delete_term(Term::from_field_text(text_field, "a"));
}
{
// 4
let doc = doc!(text_field=>" b c");
index_writer.add_document(doc);
}
{
// 5
let doc = doc!(text_field=>" a");
index_writer.add_document(doc);
}
// 0
index_writer.add_document(doc!(text_field=>"a b"));
// 1
index_writer.add_document(doc!(text_field=>" a c"));
// 2
index_writer.add_document(doc!(text_field=>" b c"));
// 3
index_writer.add_document(doc!(text_field=>" b d"));
index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.delete_term(Term::from_field_text(text_field, "a"));
// 4
index_writer.add_document(doc!(text_field=>" b c"));
// 5
index_writer.add_document(doc!(text_field=>" a"));
index_writer.commit().unwrap();
}
{
@@ -562,15 +541,10 @@ mod tests {
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{
// 0
let doc = doc!(text_field=>"a b");
index_writer.add_document(doc);
}
{
// 1
index_writer.delete_term(Term::from_field_text(text_field, "c"));
}
// 0
index_writer.add_document(doc!(text_field=>"a b"));
// 1
index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.rollback().unwrap();
}
{
@@ -606,13 +580,8 @@ mod tests {
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{
let doc = doc!(text_field=>"a b");
index_writer.add_document(doc);
}
{
index_writer.delete_term(Term::from_field_text(text_field, "c"));
}
index_writer.add_document(doc!(text_field=>"a b"));
index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.rollback().unwrap();
index_writer.delete_term(Term::from_field_text(text_field, "a"));
index_writer.commit().unwrap();
@@ -656,7 +625,7 @@ mod tests {
#[test]
fn test_indexed_u64() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("value", INT_INDEXED);
let schema = schema_builder.build();
@@ -679,7 +648,7 @@ mod tests {
#[test]
fn test_indexed_i64() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_i64_field("value", INT_INDEXED);
let schema = schema_builder.build();
@@ -703,7 +672,7 @@ mod tests {
#[test]
fn test_indexedfield_not_in_documents() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let absent_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
@@ -719,7 +688,7 @@ mod tests {
#[test]
fn test_delete_postings2() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -755,7 +724,7 @@ mod tests {
#[test]
fn test_termfreq() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -792,7 +761,7 @@ mod tests {
#[test]
fn test_searcher_1() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -800,18 +769,9 @@ mod tests {
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{
let doc = doc!(text_field=>"af af af b");
index_writer.add_document(doc);
}
{
let doc = doc!(text_field=>"a b c");
index_writer.add_document(doc);
}
{
let doc = doc!(text_field=>"a b c d");
index_writer.add_document(doc);
}
index_writer.add_document(doc!(text_field=>"af af af b"));
index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.add_document(doc!(text_field=>"a b c d"));
index_writer.commit().unwrap();
}
{
@@ -819,55 +779,42 @@ mod tests {
let searcher = index.searcher();
let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
let mut collector = TestCollector::default();
assert!(searcher.search(&query, &mut collector).is_ok());
collector.docs()
let topdocs = searcher.search(&query, &TestCollector).unwrap();
topdocs.docs().to_vec()
};
{
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
vec![1, 2]
);
}
{
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
vec![0]
);
}
{
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
vec![0, 1, 2]
);
}
{
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "c")]),
vec![1, 2]
);
}
{
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "d")]),
vec![2]
);
}
{
assert_eq!(
get_doc_ids(vec![
Term::from_field_text(text_field, "b"),
Term::from_field_text(text_field, "a"),
]),
vec![0, 1, 2]
);
}
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
vec![DocAddress(0, 1), DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
vec![DocAddress(0, 0)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "c")]),
vec![DocAddress(0, 1), DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "d")]),
vec![DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![
Term::from_field_text(text_field, "b"),
Term::from_field_text(text_field, "a"),
]),
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
);
}
}
#[test]
fn test_searcher_2() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -894,7 +841,7 @@ mod tests {
#[test]
fn test_doc_macro() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let other_text_field = schema_builder.add_text_field("text2", TEXT);
let document = doc!(text_field => "tantivy",
@@ -912,7 +859,7 @@ mod tests {
#[test]
fn test_wrong_fast_field_type() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
let text_field = schema_builder.add_text_field("text", TEXT);

View File

@@ -26,12 +26,12 @@
/// #[macro_use]
/// extern crate tantivy;
///
/// use tantivy::schema::{SchemaBuilder, TEXT, FAST};
/// use tantivy::schema::{Schema, TEXT, FAST};
///
/// //...
///
/// # fn main() {
/// let mut schema_builder = SchemaBuilder::new();
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let author = schema_builder.add_text_field("text", TEXT);
/// let likes = schema_builder.add_u64_field("num_u64", FAST);
@@ -67,11 +67,11 @@ macro_rules! doc(
#[cfg(test)]
mod test {
use schema::{SchemaBuilder, FAST, TEXT};
use schema::{Schema, FAST, TEXT};
#[test]
fn test_doc_basic() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT);
let author = schema_builder.add_text_field("text", TEXT);
let likes = schema_builder.add_u64_field("num_u64", FAST);
@@ -85,7 +85,7 @@ mod test {
#[test]
fn test_doc_trailing_comma() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT);
let author = schema_builder.add_text_field("text", TEXT);
let likes = schema_builder.add_u64_field("num_u64", FAST);

View File

@@ -271,12 +271,9 @@ mod bench {
use test::Bencher;
fn generate_array_with_seed(n: usize, ratio: f64, seed_val: u8) -> Vec<u32> {
let seed: &[u8; 16] = &[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,seed_val];
let seed: &[u8; 16] = &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, seed_val];
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
(0u32..)
.filter(|_| rng.gen_bool(ratio))
.take(n)
.collect()
(0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect()
}
pub fn generate_array(n: usize, ratio: f64) -> Vec<u32> {

View File

@@ -54,17 +54,18 @@ pub mod tests {
use indexer::operation::AddOperation;
use indexer::SegmentWriter;
use query::Scorer;
use rand::{Rng, SeedableRng, XorShiftRng};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use schema::Field;
use schema::IndexRecordOption;
use schema::{Document, SchemaBuilder, Term, INT_INDEXED, STRING, TEXT};
use schema::{Document, Schema, Term, INT_INDEXED, STRING, TEXT};
use std::iter;
use DocId;
use Score;
#[test]
pub fn test_position_write() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -88,7 +89,7 @@ pub mod tests {
#[test]
pub fn test_skip_positions() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -163,7 +164,7 @@ pub mod tests {
#[test]
pub fn test_position_and_fieldnorm1() {
let mut positions = Vec::new();
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
@@ -276,7 +277,7 @@ pub mod tests {
#[test]
pub fn test_position_and_fieldnorm2() {
let mut positions: Vec<u32> = Vec::new();
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -317,7 +318,7 @@ pub mod tests {
let num_docs = 300u32;
let index = {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_u64_field("value", INT_INDEXED);
let schema = schema_builder.build();
@@ -498,12 +499,11 @@ pub mod tests {
Term::from_field_text(field, "d")
};
pub static ref INDEX: Index = {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let seed: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut rng: XorShiftRng = XorShiftRng::from_seed(seed);
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
let index = Index::create_in_ram(schema);
let posting_list_size = 1_000_000;

View File

@@ -630,7 +630,7 @@ mod tests {
use docset::DocSet;
use fst::Streamer;
use schema::IndexRecordOption;
use schema::SchemaBuilder;
use schema::Schema;
use schema::Term;
use schema::INT_INDEXED;
use DocId;
@@ -707,7 +707,7 @@ mod tests {
}
fn build_block_postings(docs: Vec<DocId>) -> BlockSegmentPostings {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let int_field = schema_builder.add_u64_field("id", INT_INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -778,7 +778,7 @@ mod tests {
#[test]
fn test_reset_block_segment_postings() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let int_field = schema_builder.add_u64_field("id", INT_INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);

View File

@@ -174,8 +174,8 @@ mod tests {
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::ExpUnrolledLinkedList;
use super::super::MemoryArena;
use super::ExpUnrolledLinkedList;
use test::Bencher;
const NUM_STACK: usize = 10_000;

View File

@@ -86,12 +86,12 @@ mod tests {
use super::AllQuery;
use query::Query;
use schema::{SchemaBuilder, TEXT};
use schema::{Schema, TEXT};
use Index;
#[test]
fn test_all_query() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);

View File

@@ -11,7 +11,7 @@ use Result;
/// A weight struct for Fuzzy Term and Regex Queries
pub struct AutomatonWeight<A>
where
A: Automaton,
A: Automaton + Send + Sync + 'static,
{
field: Field,
automaton: A,
@@ -19,7 +19,7 @@ where
impl<A> AutomatonWeight<A>
where
A: Automaton,
A: Automaton + Send + Sync + 'static,
{
/// Create a new AutomationWeight
pub fn new(field: Field, automaton: A) -> AutomatonWeight<A> {
@@ -34,7 +34,7 @@ where
impl<A> Weight for AutomatonWeight<A>
where
A: Automaton,
A: Automaton + Send + Sync + 'static,
{
fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>> {
let max_doc = reader.max_doc();

View File

@@ -19,10 +19,11 @@ mod tests {
use query::Scorer;
use query::TermQuery;
use schema::*;
use DocId;
use Index;
fn aux_test_helper() -> (Index, Field) {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -130,9 +131,13 @@ mod tests {
let matching_docs = |boolean_query: &Query| {
let searcher = index.searcher();
let mut test_collector = TestCollector::default();
searcher.search(boolean_query, &mut test_collector).unwrap();
test_collector.docs()
let test_docs = searcher.search(boolean_query, &TestCollector).unwrap();
test_docs
.docs()
.iter()
.cloned()
.map(|doc| doc.1)
.collect::<Vec<DocId>>()
};
{
@@ -186,9 +191,8 @@ mod tests {
let score_docs = |boolean_query: &Query| {
let searcher = index.searcher();
let mut test_collector = TestCollector::default();
searcher.search(boolean_query, &mut test_collector).unwrap();
test_collector.scores()
let fruit = searcher.search(boolean_query, &TestCollector).unwrap();
fruit.scores().to_vec()
};
{

View File

@@ -25,14 +25,14 @@ lazy_static! {
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT};
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result, Term};
/// use tantivy::collector::{CountCollector, TopCollector, chain};
/// use tantivy::collector::{Count, TopDocs};
/// use tantivy::query::FuzzyTermQuery;
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new();
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
@@ -57,16 +57,12 @@ lazy_static! {
/// let searcher = index.searcher();
///
/// {
/// let mut top_collector = TopCollector::with_limit(2);
/// let mut count_collector = CountCollector::default();
/// {
/// let mut collectors = chain().push(&mut top_collector).push(&mut count_collector);
/// let term = Term::from_field_text(title, "Diary");
/// let query = FuzzyTermQuery::new(term, 1, true);
/// searcher.search(&query, &mut collectors).unwrap();
/// }
/// assert_eq!(count_collector.count(), 2);
/// assert!(top_collector.at_capacity());
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = FuzzyTermQuery::new(term, 1, true);
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
/// assert_eq!(count, 2);
/// assert_eq!(top_docs.len(), 2);
/// }
///
/// Ok(())
@@ -122,8 +118,8 @@ impl Query for FuzzyTermQuery {
#[cfg(test)]
mod test {
use super::FuzzyTermQuery;
use collector::TopCollector;
use schema::SchemaBuilder;
use collector::TopDocs;
use schema::Schema;
use schema::TEXT;
use tests::assert_nearly_equals;
use Index;
@@ -131,7 +127,7 @@ mod test {
#[test]
pub fn test_fuzzy_term() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let country_field = schema_builder.add_text_field("country", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -148,14 +144,14 @@ mod test {
index.load_searchers().unwrap();
let searcher = index.searcher();
{
let mut collector = TopCollector::with_limit(2);
let term = Term::from_field_text(country_field, "japon");
let fuzzy_query = FuzzyTermQuery::new(term, 1, true);
searcher.search(&fuzzy_query, &mut collector).unwrap();
let scored_docs = collector.top_docs();
assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
let (score, _) = scored_docs[0];
let top_docs = searcher
.search(&fuzzy_query, &TopDocs::with_limit(2))
.unwrap();
assert_eq!(top_docs.len(), 1, "Expected only 1 document");
let (score, _) = top_docs[0];
assert_nearly_equals(1f32, score);
}
}

View File

@@ -56,15 +56,15 @@ pub use self::weight::Weight;
#[cfg(test)]
mod tests {
use Index;
use schema::{SchemaBuilder, TEXT};
use query::QueryParser;
use Term;
use schema::{Schema, TEXT};
use std::collections::BTreeSet;
use Index;
use Term;
#[test]
fn test_query_terms() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -73,33 +73,48 @@ mod tests {
let term_b = Term::from_field_text(text_field, "b");
{
let mut terms_set: BTreeSet<Term> = BTreeSet::new();
query_parser.parse_query("a").unwrap().query_terms(&mut terms_set);
query_parser
.parse_query("a")
.unwrap()
.query_terms(&mut terms_set);
let terms: Vec<&Term> = terms_set.iter().collect();
assert_eq!(vec![&term_a], terms);
}
{
let mut terms_set: BTreeSet<Term> = BTreeSet::new();
query_parser.parse_query("a b").unwrap().query_terms(&mut terms_set);
query_parser
.parse_query("a b")
.unwrap()
.query_terms(&mut terms_set);
let terms: Vec<&Term> = terms_set.iter().collect();
assert_eq!(vec![&term_a, &term_b], terms);
}
{
let mut terms_set: BTreeSet<Term> = BTreeSet::new();
query_parser.parse_query("\"a b\"").unwrap().query_terms(&mut terms_set);
query_parser
.parse_query("\"a b\"")
.unwrap()
.query_terms(&mut terms_set);
let terms: Vec<&Term> = terms_set.iter().collect();
assert_eq!(vec![&term_a, &term_b], terms);
}
{
let mut terms_set: BTreeSet<Term> = BTreeSet::new();
query_parser.parse_query("a a a a a").unwrap().query_terms(&mut terms_set);
query_parser
.parse_query("a a a a a")
.unwrap()
.query_terms(&mut terms_set);
let terms: Vec<&Term> = terms_set.iter().collect();
assert_eq!(vec![&term_a], terms);
}
{
let mut terms_set: BTreeSet<Term> = BTreeSet::new();
query_parser.parse_query("a -b").unwrap().query_terms(&mut terms_set);
query_parser
.parse_query("a -b")
.unwrap()
.query_terms(&mut terms_set);
let terms: Vec<&Term> = terms_set.iter().collect();
assert_eq!(vec![&term_a, &term_b], terms);
}
}
}
}

View File

@@ -13,11 +13,13 @@ mod tests {
use collector::tests::TestCollector;
use core::Index;
use error::TantivyError;
use schema::{SchemaBuilder, Term, TEXT};
use schema::{Schema, Term, TEXT};
use tests::assert_nearly_equals;
use DocAddress;
use DocId;
fn create_index(texts: &[&'static str]) -> Index {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -47,16 +49,19 @@ mod tests {
index.load_searchers().unwrap();
let searcher = index.searcher();
let test_query = |texts: Vec<&str>| {
let mut test_collector = TestCollector::default();
let terms: Vec<Term> = texts
.iter()
.map(|text| Term::from_field_text(text_field, text))
.collect();
let phrase_query = PhraseQuery::new(terms);
searcher
.search(&phrase_query, &mut test_collector)
let test_fruits = searcher
.search(&phrase_query, &TestCollector)
.expect("search should succeed");
test_collector.docs()
test_fruits
.docs()
.iter()
.map(|docaddr| docaddr.1)
.collect::<Vec<_>>()
};
assert_eq!(test_query(vec!["a", "b", "c"]), vec![2, 4]);
assert_eq!(test_query(vec!["a", "b"]), vec![1, 2, 3, 4]);
@@ -67,7 +72,7 @@ mod tests {
#[test]
pub fn test_phrase_query_no_positions() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
use schema::IndexRecordOption;
use schema::TextFieldIndexing;
use schema::TextOptions;
@@ -91,9 +96,9 @@ mod tests {
Term::from_field_text(text_field, "a"),
Term::from_field_text(text_field, "b"),
]);
let mut test_collector = TestCollector::default();
if let TantivyError::SchemaError(ref msg) = searcher
.search(&phrase_query, &mut test_collector)
.search(&phrase_query, &TestCollector)
.map(|_| ())
.unwrap_err()
{
assert_eq!(
@@ -113,16 +118,16 @@ mod tests {
index.load_searchers().unwrap();
let searcher = index.searcher();
let test_query = |texts: Vec<&str>| {
let mut test_collector = TestCollector::default();
let terms: Vec<Term> = texts
.iter()
.map(|text| Term::from_field_text(text_field, text))
.collect();
let phrase_query = PhraseQuery::new(terms);
searcher
.search(&phrase_query, &mut test_collector)
.expect("search should succeed");
test_collector.scores()
.search(&phrase_query, &TestCollector)
.expect("search should succeed")
.scores()
.to_vec()
};
let scores = test_query(vec!["a", "b"]);
assert_nearly_equals(scores[0], 0.40618482);
@@ -131,51 +136,39 @@ mod tests {
#[test] // motivated by #234
pub fn test_phrase_query_docfreq_order() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
{
// 0
let doc = doc!(text_field=>"b");
index_writer.add_document(doc);
}
{
// 1
let doc = doc!(text_field=>"a b");
index_writer.add_document(doc);
}
{
// 2
let doc = doc!(text_field=>"b a");
index_writer.add_document(doc);
}
index_writer.add_document(doc!(text_field=>"b"));
index_writer.add_document(doc!(text_field=>"a b"));
index_writer.add_document(doc!(text_field=>"b a"));
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let test_query = |texts: Vec<&str>| {
let mut test_collector = TestCollector::default();
let terms: Vec<Term> = texts
.iter()
.map(|text| Term::from_field_text(text_field, text))
.collect();
let phrase_query = PhraseQuery::new(terms);
searcher
.search(&phrase_query, &mut test_collector)
.expect("search should succeed");
test_collector.docs()
.search(&phrase_query, &TestCollector)
.expect("search should succeed")
.docs()
.to_vec()
};
assert_eq!(test_query(vec!["a", "b"]), vec![1]);
assert_eq!(test_query(vec!["b", "a"]), vec![2]);
assert_eq!(test_query(vec!["a", "b"]), vec![DocAddress(0, 1)]);
assert_eq!(test_query(vec!["b", "a"]), vec![DocAddress(0, 2)]);
}
#[test] // motivated by #234
pub fn test_phrase_query_non_trivial_offsets() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -187,16 +180,18 @@ mod tests {
index.load_searchers().unwrap();
let searcher = index.searcher();
let test_query = |texts: Vec<(usize, &str)>| {
let mut test_collector = TestCollector::default();
let terms: Vec<(usize, Term)> = texts
.iter()
.map(|(offset, text)| (*offset, Term::from_field_text(text_field, text)))
.collect();
let phrase_query = PhraseQuery::new_with_offset(terms);
searcher
.search(&phrase_query, &mut test_collector)
.expect("search should succeed");
test_collector.docs()
.search(&phrase_query, &TestCollector)
.expect("search should succeed")
.docs()
.iter()
.map(|doc_address| doc_address.1)
.collect::<Vec<DocId>>()
};
assert_eq!(test_query(vec![(0, "a"), (1, "b")]), vec![0]);
assert_eq!(test_query(vec![(1, "b"), (0, "a")]), vec![0]);

View File

@@ -1,11 +1,9 @@
use super::Weight;
use collector::Collector;
use core::searcher::Searcher;
use downcast;
use std::collections::BTreeSet;
use std::fmt;
use Result;
use SegmentLocalId;
use Term;
/// The `Query` trait defines a set of documents and a scoring method
@@ -63,26 +61,6 @@ pub trait Query: QueryClone + downcast::Any + fmt::Debug {
/// Extract all of the terms associated to the query and insert them in the
/// term set given in arguments.
fn query_terms(&self, _term_set: &mut BTreeSet<Term>) {}
/// Search works as follows :
///
/// First the weight object associated to the query is created.
///
/// Then, the query loops over the segments and for each segment :
/// - setup the collector and informs it that the segment being processed has changed.
/// - creates a `Scorer` object associated for this segment
/// - iterate throw the matched documents and push them to the collector.
///
fn search(&self, searcher: &Searcher, collector: &mut Collector) -> Result<()> {
let scoring_enabled = collector.requires_scoring();
let weight = self.weight(searcher, scoring_enabled)?;
for (segment_ord, segment_reader) in searcher.segment_readers().iter().enumerate() {
collector.set_segment(segment_ord as SegmentLocalId, segment_reader)?;
let mut scorer = weight.scorer(segment_reader)?;
scorer.collect(collector, segment_reader.delete_bitset());
}
Ok(())
}
}
pub trait QueryClone {
@@ -98,6 +76,26 @@ where
}
}
impl Query for Box<Query> {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result<Box<Weight>> {
self.as_ref().weight(searcher, scoring_enabled)
}
fn count(&self, searcher: &Searcher) -> Result<usize> {
self.as_ref().count(searcher)
}
fn query_terms(&self, term_set: &mut BTreeSet<Term<Vec<u8>>>) {
self.as_ref().query_terms(term_set);
}
}
impl QueryClone for Box<Query> {
fn box_clone(&self) -> Box<Query> {
self.as_ref().box_clone()
}
}
#[allow(missing_docs)]
mod downcast_impl {
downcast!(super::Query);

View File

@@ -128,6 +128,7 @@ fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
///
/// * all docs query: A plain `*` will match all documents in the index.
///
#[derive(Clone)]
pub struct QueryParser {
schema: Schema,
default_fields: Vec<Field>,
@@ -484,12 +485,12 @@ mod test {
use query::Query;
use schema::Field;
use schema::{IndexRecordOption, TextFieldIndexing, TextOptions};
use schema::{SchemaBuilder, Term, INT_INDEXED, STORED, STRING, TEXT};
use schema::{Schema, Term, INT_INDEXED, STORED, STRING, TEXT};
use tokenizer::{LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager};
use Index;
fn make_query_parser() -> QueryParser {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field_indexing = TextFieldIndexing::default()
.set_tokenizer("en_with_stop_words")
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
@@ -720,7 +721,7 @@ mod test {
#[test]
pub fn test_unknown_tokenizer() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field_indexing = TextFieldIndexing::default()
.set_tokenizer("nonexistingtokenizer")
.set_index_option(IndexRecordOption::Basic);
@@ -738,7 +739,7 @@ mod test {
#[test]
pub fn test_query_parser_no_positions() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field_indexing = TextFieldIndexing::default()
.set_tokenizer("customtokenizer")
.set_index_option(IndexRecordOption::Basic);

View File

@@ -40,14 +40,13 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// # #[macro_use]
/// # extern crate tantivy;
/// # use tantivy::Index;
/// # use tantivy::schema::{SchemaBuilder, INT_INDEXED};
/// # use tantivy::collector::CountCollector;
/// # use tantivy::query::Query;
/// # use tantivy::schema::{Schema, INT_INDEXED};
/// # use tantivy::collector::Count;
/// # use tantivy::Result;
/// # use tantivy::query::RangeQuery;
/// #
/// # fn run() -> Result<()> {
/// # let mut schema_builder = SchemaBuilder::new();
/// # let mut schema_builder = Schema::builder();
/// # let year_field = schema_builder.add_u64_field("year", INT_INDEXED);
/// # let schema = schema_builder.build();
/// #
@@ -67,10 +66,7 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
///
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
///
/// let mut count_collector = CountCollector::default();
/// docs_in_the_sixties.search(&searcher, &mut count_collector)?;
///
/// let num_60s_books = count_collector.count();
/// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
///
/// # assert_eq!(num_60s_books, 2285);
/// # Ok(())
@@ -296,9 +292,8 @@ impl Weight for RangeWeight {
mod tests {
use super::RangeQuery;
use collector::CountCollector;
use query::Query;
use schema::{Document, Field, SchemaBuilder, INT_INDEXED};
use collector::Count;
use schema::{Document, Field, Schema, INT_INDEXED};
use std::collections::Bound;
use Index;
use Result;
@@ -306,7 +301,7 @@ mod tests {
#[test]
fn test_range_query_simple() {
fn run() -> Result<()> {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let year_field = schema_builder.add_u64_field("year", INT_INDEXED);
let schema = schema_builder.build();
@@ -327,9 +322,8 @@ mod tests {
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64);
// ... or `1960..=1969` if inclusive range is enabled.
let mut count_collector = CountCollector::default();
docs_in_the_sixties.search(&searcher, &mut count_collector)?;
assert_eq!(count_collector.count(), 2285);
let count = searcher.search(&docs_in_the_sixties, &Count)?;
assert_eq!(count, 2285);
Ok(())
}
@@ -340,7 +334,7 @@ mod tests {
fn test_range_query() {
let int_field: Field;
let schema = {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
int_field = schema_builder.add_i64_field("intfield", INT_INDEXED);
schema_builder.build()
};
@@ -363,11 +357,8 @@ mod tests {
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let count_multiples = |range_query: RangeQuery| {
let mut count_collector = CountCollector::default();
range_query.search(&searcher, &mut count_collector).unwrap();
count_collector.count()
};
let count_multiples =
|range_query: RangeQuery| searcher.search(&range_query, &Count).unwrap();
assert_eq!(count_multiples(RangeQuery::new_i64(int_field, 10..11)), 9);
assert_eq!(

View File

@@ -16,14 +16,14 @@ use Searcher;
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT};
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result, Term};
/// use tantivy::collector::{CountCollector, TopCollector, chain};
/// use tantivy::collector::Count;
/// use tantivy::query::RegexQuery;
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new();
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
@@ -47,19 +47,10 @@ use Searcher;
/// index.load_searchers()?;
/// let searcher = index.searcher();
///
/// {
/// let mut top_collector = TopCollector::with_limit(2);
/// let mut count_collector = CountCollector::default();
/// {
/// let mut collectors = chain().push(&mut top_collector).push(&mut count_collector);
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::new("d[ai]{2}ry".to_string(), title);
/// searcher.search(&query, &mut collectors).unwrap();
/// }
/// assert_eq!(count_collector.count(), 3);
/// assert!(top_collector.at_capacity());
/// }
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::new("d[ai]{2}ry".to_string(), title);
/// let count = searcher.search(&query, &Count)?;
/// assert_eq!(count, 3);
/// Ok(())
/// }
/// ```
@@ -95,15 +86,15 @@ impl Query for RegexQuery {
#[cfg(test)]
mod test {
use super::RegexQuery;
use collector::TopCollector;
use schema::SchemaBuilder;
use collector::TopDocs;
use schema::Schema;
use schema::TEXT;
use tests::assert_nearly_equals;
use Index;
#[test]
pub fn test_regex_query() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let country_field = schema_builder.add_text_field("country", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -120,20 +111,18 @@ mod test {
index.load_searchers().unwrap();
let searcher = index.searcher();
{
let mut collector = TopCollector::with_limit(2);
let regex_query = RegexQuery::new("jap[ao]n".to_string(), country_field);
searcher.search(&regex_query, &mut collector).unwrap();
let scored_docs = collector.top_docs();
let scored_docs = searcher
.search(&regex_query, &TopDocs::with_limit(2))
.unwrap();
assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
let (score, _) = scored_docs[0];
assert_nearly_equals(1f32, score);
}
{
let mut collector = TopCollector::with_limit(2);
let regex_query = RegexQuery::new("jap[A-Z]n".to_string(), country_field);
searcher.search(&regex_query, &mut collector).unwrap();
let scored_docs = collector.top_docs();
assert_eq!(scored_docs.len(), 0, "Expected ZERO document");
}
let regex_query = RegexQuery::new("jap[A-Z]n".to_string(), country_field);
let top_docs = searcher
.search(&regex_query, &TopDocs::with_limit(2))
.unwrap();
assert!(top_docs.is_empty(), "Expected ZERO document");
}
}

View File

@@ -1,8 +1,6 @@
use collector::Collector;
use common::BitSet;
use docset::{DocSet, SkipResult};
use downcast;
use fastfield::DeleteBitSet;
use std::ops::DerefMut;
use DocId;
use Score;
@@ -16,20 +14,11 @@ pub trait Scorer: downcast::Any + DocSet + 'static {
/// This method will perform a bit of computation and is not cached.
fn score(&mut self) -> Score;
/// Consumes the complete `DocSet` and
/// push the scored documents to the collector.
fn collect(&mut self, collector: &mut Collector, delete_bitset_opt: Option<&DeleteBitSet>) {
if let Some(delete_bitset) = delete_bitset_opt {
while self.advance() {
let doc = self.doc();
if !delete_bitset.is_deleted(doc) {
collector.collect(doc, self.score());
}
}
} else {
while self.advance() {
collector.collect(self.doc(), self.score());
}
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
fn for_each(&mut self, callback: &mut FnMut(DocId, Score)) {
while self.advance() {
callback(self.doc(), self.score());
}
}
}
@@ -44,9 +33,9 @@ impl Scorer for Box<Scorer> {
self.deref_mut().score()
}
fn collect(&mut self, collector: &mut Collector, delete_bitset: Option<&DeleteBitSet>) {
fn for_each(&mut self, callback: &mut FnMut(DocId, Score)) {
let scorer = self.deref_mut();
scorer.collect(collector, delete_bitset);
scorer.for_each(callback);
}
}

View File

@@ -1,3 +1,4 @@
mod term_query;
mod term_scorer;
mod term_weight;
@@ -9,17 +10,17 @@ pub use self::term_weight::TermWeight;
#[cfg(test)]
mod tests {
use collector::TopCollector;
use collector::TopDocs;
use docset::DocSet;
use query::{Query, QueryParser, Scorer, TermQuery};
use schema::{IndexRecordOption, SchemaBuilder, STRING, TEXT};
use schema::{IndexRecordOption, Schema, STRING, TEXT};
use tests::assert_nearly_equals;
use Index;
use Term;
#[test]
pub fn test_term_query_no_freq() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -49,7 +50,7 @@ mod tests {
#[test]
pub fn test_term_weight() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let left_field = schema_builder.add_text_field("left", TEXT);
let right_field = schema_builder.add_text_field("right", TEXT);
let large_field = schema_builder.add_text_field("large", TEXT);
@@ -68,37 +69,35 @@ mod tests {
index.load_searchers().unwrap();
let searcher = index.searcher();
{
let mut collector = TopCollector::with_limit(2);
let term = Term::from_field_text(left_field, "left2");
let term_query = TermQuery::new(term, IndexRecordOption::WithFreqs);
searcher.search(&term_query, &mut collector).unwrap();
let scored_docs = collector.top_docs();
assert_eq!(scored_docs.len(), 1);
let (score, _) = scored_docs[0];
let topdocs = searcher
.search(&term_query, &TopDocs::with_limit(2))
.unwrap();
assert_eq!(topdocs.len(), 1);
let (score, _) = topdocs[0];
assert_nearly_equals(0.77802235, score);
}
{
let mut collector = TopCollector::with_limit(2);
let term = Term::from_field_text(left_field, "left1");
let term_query = TermQuery::new(term, IndexRecordOption::WithFreqs);
searcher.search(&term_query, &mut collector).unwrap();
let scored_docs = collector.top_docs();
assert_eq!(scored_docs.len(), 2);
let (score1, _) = scored_docs[0];
let top_docs = searcher
.search(&term_query, &TopDocs::with_limit(2))
.unwrap();
assert_eq!(top_docs.len(), 2);
let (score1, _) = top_docs[0];
assert_nearly_equals(0.27101856, score1);
let (score2, _) = scored_docs[1];
let (score2, _) = top_docs[1];
assert_nearly_equals(0.13736556, score2);
}
{
let query_parser = QueryParser::for_index(&index, vec![]);
let query = query_parser.parse_query("left:left2 left:left1").unwrap();
let mut collector = TopCollector::with_limit(2);
searcher.search(&*query, &mut collector).unwrap();
let scored_docs = collector.top_docs();
assert_eq!(scored_docs.len(), 2);
let (score1, _) = scored_docs[0];
let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
assert_eq!(top_docs.len(), 2);
let (score1, _) = top_docs[0];
assert_nearly_equals(0.9153879, score1);
let (score2, _) = scored_docs[1];
let (score2, _) = top_docs[1];
assert_nearly_equals(0.27101856, score2);
}
}

View File

@@ -21,14 +21,14 @@ use Term;
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{SchemaBuilder, TEXT, IndexRecordOption};
/// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
/// use tantivy::{Index, Result, Term};
/// use tantivy::collector::{CountCollector, TopCollector, chain};
/// use tantivy::collector::{Count, TopDocs};
/// use tantivy::query::TermQuery;
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = SchemaBuilder::new();
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
@@ -52,20 +52,12 @@ use Term;
/// index.load_searchers()?;
/// let searcher = index.searcher();
///
/// {
/// let mut top_collector = TopCollector::with_limit(2);
/// let mut count_collector = CountCollector::default();
/// {
/// let mut collectors = chain().push(&mut top_collector).push(&mut count_collector);
/// let query = TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// );
/// searcher.search(&query, &mut collectors).unwrap();
/// }
/// assert_eq!(count_collector.count(), 2);
/// assert!(top_collector.at_capacity());
/// }
/// let query = TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// );
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
/// assert_eq!(count, 2);
///
/// Ok(())
/// }

View File

@@ -6,7 +6,7 @@ use Result;
/// for a given set of segments.
///
/// See [`Query`](./trait.Query.html).
pub trait Weight {
pub trait Weight: Send + Sync + 'static {
/// Returns the scorer for the given segment.
/// See [`Query`](./trait.Query.html).
fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>>;

View File

@@ -161,7 +161,7 @@ mod tests {
#[test]
fn test_doc() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("title", TEXT);
let mut doc = Document::default();
doc.add_text(text_field, "My title");

View File

@@ -27,7 +27,7 @@ directory.
```
use tantivy::schema::*;
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let title_options = TextOptions::default()
.set_stored()
.set_indexing_options(TextFieldIndexing::default()
@@ -44,11 +44,11 @@ We can split the problem of generating a search result page into two phases :
the search results page. (`doc_ids[] -> Document[]`)
In the first phase, the ability to search for documents by the given field is determined by the
[`TextIndexingOptions`](enum.TextIndexingOptions.html) of our [`TextOptions`]
(struct.TextOptions.html).
[`TextIndexingOptions`](enum.TextIndexingOptions.html) of our
[`TextOptions`](struct.TextOptions.html).
The effect of each possible setting is described more in detail [`TextIndexingOptions`]
(enum.TextIndexingOptions.html).
The effect of each possible setting is described more in detail
[`TextIndexingOptions`](enum.TextIndexingOptions.html).
On the other hand setting the field as stored or not determines whether the field should be returned
when [`searcher.doc(doc_address)`](../struct.Searcher.html#method.doc) is called.
@@ -62,7 +62,7 @@ The example can be rewritten :
```
use tantivy::schema::*;
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title_options", TEXT | STORED);
let schema = schema_builder.build();
```
@@ -75,7 +75,7 @@ let schema = schema_builder.build();
```
use tantivy::schema::*;
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let num_stars_options = IntOptions::default()
.set_stored()
.set_indexed();

View File

@@ -23,13 +23,14 @@ use std::fmt;
/// ```
/// use tantivy::schema::*;
///
/// let mut schema_builder = SchemaBuilder::default();
/// let mut schema_builder = Schema::builder();
/// let id_field = schema_builder.add_text_field("id", STRING);
/// let title_field = schema_builder.add_text_field("title", TEXT);
/// let body_field = schema_builder.add_text_field("body", TEXT);
/// let schema = schema_builder.build();
///
/// ```
#[derive(Default)]
pub struct SchemaBuilder {
fields: Vec<FieldEntry>,
fields_map: HashMap<String, Field>,
@@ -120,15 +121,6 @@ impl SchemaBuilder {
}
}
impl Default for SchemaBuilder {
fn default() -> SchemaBuilder {
SchemaBuilder {
fields: Vec::new(),
fields_map: HashMap::new(),
}
}
}
struct InnerSchema {
fields: Vec<FieldEntry>,
fields_map: HashMap<String, Field>, // transient
@@ -142,7 +134,6 @@ impl PartialEq for InnerSchema {
impl Eq for InnerSchema {}
/// Tantivy has a very strict schema.
/// You need to specify in advance, whether a field is indexed or not,
/// stored or not, and RAM-based or not.
@@ -156,7 +147,7 @@ impl Eq for InnerSchema {}
/// ```
/// use tantivy::schema::*;
///
/// let mut schema_builder = SchemaBuilder::default();
/// let mut schema_builder = Schema::builder();
/// let id_field = schema_builder.add_text_field("id", STRING);
/// let title_field = schema_builder.add_text_field("title", TEXT);
/// let body_field = schema_builder.add_text_field("body", TEXT);
@@ -182,6 +173,11 @@ impl Schema {
&self.0.fields
}
/// Creates a new builder.
pub fn builder() -> SchemaBuilder {
SchemaBuilder::default()
}
/// Returns the field options associated with a given name.
///
/// # Panics
@@ -327,7 +323,7 @@ mod tests {
#[test]
pub fn is_indexed_test() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let field_str = schema_builder.add_text_field("field_str", STRING);
let schema = schema_builder.build();
assert!(schema.get_field_entry(field_str).is_indexed());
@@ -335,7 +331,7 @@ mod tests {
#[test]
pub fn test_schema_serialization() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let count_options = IntOptions::default()
.set_stored()
.set_fast(Cardinality::SingleValue);
@@ -404,7 +400,7 @@ mod tests {
#[test]
pub fn test_document_to_json() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let count_options = IntOptions::default()
.set_stored()
.set_fast(Cardinality::SingleValue);
@@ -425,7 +421,7 @@ mod tests {
#[test]
pub fn test_parse_document() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let count_options = IntOptions::default()
.set_stored()
.set_fast(Cardinality::SingleValue);

View File

@@ -201,7 +201,7 @@ mod tests {
#[test]
pub fn test_term() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("text", STRING);
let title_field = schema_builder.add_text_field("title", STRING);
let count_field = schema_builder.add_text_field("count", STRING);

View File

@@ -141,7 +141,7 @@ mod tests {
assert!(field_options.get_indexing_options().is_some());
}
{
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build();
let field = schema.get_field("body").unwrap();

View File

@@ -22,6 +22,11 @@ impl HighlightSection {
fn new(start: usize, stop: usize) -> HighlightSection {
HighlightSection { start, stop }
}
/// Returns the bounds of the `HighlightSection`.
pub fn bounds(&self) -> (usize, usize) {
(self.start, self.stop)
}
}
#[derive(Debug)]
@@ -65,6 +70,8 @@ impl FragmentCandidate {
}
}
/// `Snippet`
/// Contains a fragment of a document, and some highlighed parts inside it.
#[derive(Debug)]
pub struct Snippet {
fragments: String,
@@ -75,6 +82,7 @@ const HIGHLIGHTEN_PREFIX: &str = "<b>";
const HIGHLIGHTEN_POSTFIX: &str = "</b>";
impl Snippet {
/// Create a new, empty, `Snippet`
pub fn empty() -> Snippet {
Snippet {
fragments: String::new(),
@@ -99,6 +107,16 @@ impl Snippet {
));
html
}
/// Returns a fragment from the `Snippet`.
pub fn fragments(&self) -> &str {
&self.fragments
}
/// Returns a list of higlighted positions from the `Snippet`.
pub fn highlighted(&self) -> &[HighlightSection] {
&self.highlighted
}
}
/// Returns a non-empty list of "good" fragments.
@@ -197,12 +215,12 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
/// # #[macro_use]
/// # extern crate tantivy;
/// # use tantivy::Index;
/// # use tantivy::schema::{SchemaBuilder, TEXT};
/// # use tantivy::schema::{Schema, TEXT};
/// # use tantivy::query::QueryParser;
/// use tantivy::SnippetGenerator;
///
/// # fn main() -> tantivy::Result<()> {
/// # let mut schema_builder = SchemaBuilder::default();
/// # let mut schema_builder = Schema::builder();
/// # let text_field = schema_builder.add_text_field("text", TEXT);
/// # let schema = schema_builder.build();
/// # let index = Index::create_in_ram(schema);
@@ -255,8 +273,7 @@ impl SnippetGenerator {
} else {
None
}
})
.collect();
}).collect();
let tokenizer = searcher.index().tokenizer_for_field(field)?;
Ok(SnippetGenerator {
terms_text,
@@ -306,7 +323,7 @@ impl SnippetGenerator {
mod tests {
use super::{search_fragments, select_best_fragment_combination};
use query::QueryParser;
use schema::{IndexRecordOption, SchemaBuilder, TextFieldIndexing, TextOptions, TEXT};
use schema::{IndexRecordOption, Schema, TextFieldIndexing, TextOptions, TEXT};
use std::collections::BTreeMap;
use std::iter::Iterator;
use tokenizer::{box_tokenizer, SimpleTokenizer};
@@ -328,8 +345,6 @@ to the project are from community members.[15]
Rust won first place for "most loved programming language" in the Stack Overflow Developer
Survey in 2016, 2017, and 2018."#;
#[test]
fn test_snippet() {
let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
@@ -345,13 +360,18 @@ Survey in 2016, 2017, and 2018."#;
assert_eq!(first.stop_offset, 89);
}
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
assert_eq!(snippet.fragments, "Rust is a systems programming language sponsored by \
Mozilla which\ndescribes it as a \"safe");
assert_eq!(snippet.to_html(), "<b>Rust</b> is a systems programming <b>language</b> \
sponsored by Mozilla which\ndescribes it as a &quot;safe")
assert_eq!(
snippet.fragments,
"Rust is a systems programming language sponsored by \
Mozilla which\ndescribes it as a \"safe"
);
assert_eq!(
snippet.to_html(),
"<b>Rust</b> is a systems programming <b>language</b> \
sponsored by Mozilla which\ndescribes it as a &quot;safe"
)
}
#[test]
fn test_snippet_scored_fragment() {
let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
@@ -385,10 +405,8 @@ Survey in 2016, 2017, and 2018."#;
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
assert_eq!(snippet.to_html(), "programming <b>language</b>")
}
}
#[test]
fn test_snippet_in_second_fragment() {
let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
@@ -495,10 +513,9 @@ Survey in 2016, 2017, and 2018."#;
assert_eq!(snippet.to_html(), "");
}
#[test]
fn test_snippet_generator_term_score() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -521,23 +538,32 @@ Survey in 2016, 2017, and 2018."#;
{
let query = query_parser.parse_query("a").unwrap();
let snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap();
assert_eq!(&btreemap!("a".to_string() => 0.25f32), snippet_generator.terms_text());
assert_eq!(
&btreemap!("a".to_string() => 0.25f32),
snippet_generator.terms_text()
);
}
{
let query = query_parser.parse_query("a b").unwrap();
let snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap();
assert_eq!(&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5), snippet_generator.terms_text());
assert_eq!(
&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5),
snippet_generator.terms_text()
);
}
{
let query = query_parser.parse_query("a b c").unwrap();
let snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap();
assert_eq!(&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5), snippet_generator.terms_text());
assert_eq!(
&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5),
snippet_generator.terms_text()
);
}
}
#[test]
fn test_snippet_generator() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_options = TextOptions::default().set_indexing_options(
TextFieldIndexing::default()
.set_tokenizer("en_stem")

View File

@@ -117,8 +117,8 @@ impl SegmentSpaceUsage {
/// Clones the underlying data.
/// Use the components directly if this is somehow in performance critical code.
pub fn component(&self, component: SegmentComponent) -> ComponentSpaceUsage {
use SegmentComponent::*;
use self::ComponentSpaceUsage::*;
use SegmentComponent::*;
match component {
POSTINGS => PerField(self.postings().clone()),
POSITIONS => PerField(self.positions().clone()),
@@ -221,7 +221,7 @@ impl StoreSpaceUsage {
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PerFieldSpaceUsage {
fields: HashMap<Field, FieldUsage>,
total: ByteCount
total: ByteCount,
}
impl PerFieldSpaceUsage {
@@ -265,7 +265,7 @@ impl FieldUsage {
}
pub(crate) fn add_field_idx(&mut self, idx: usize, size: ByteCount) {
if self.sub_num_bytes.len() < idx + 1{
if self.sub_num_bytes.len() < idx + 1 {
self.sub_num_bytes.resize(idx + 1, None);
}
assert!(self.sub_num_bytes[idx].is_none());
@@ -292,17 +292,17 @@ impl FieldUsage {
#[cfg(test)]
mod test {
use core::Index;
use schema::SchemaBuilder;
use schema::{FAST, INT_INDEXED, TEXT};
use schema::Field;
use schema::Schema;
use schema::STORED;
use schema::{FAST, INT_INDEXED, TEXT};
use space_usage::ByteCount;
use space_usage::PerFieldSpaceUsage;
use schema::STORED;
use Term;
#[test]
fn test_empty() {
let schema = SchemaBuilder::new().build();
let schema = Schema::builder().build();
let index = Index::create_in_ram(schema.clone());
index.load_searchers().unwrap();
@@ -311,18 +311,26 @@ mod test {
assert_eq!(0, searcher_space_usage.total());
}
fn expect_single_field(field_space: &PerFieldSpaceUsage, field: &Field, min_size: ByteCount, max_size: ByteCount) {
fn expect_single_field(
field_space: &PerFieldSpaceUsage,
field: &Field,
min_size: ByteCount,
max_size: ByteCount,
) {
assert!(field_space.total() >= min_size);
assert!(field_space.total() <= max_size);
assert_eq!(
vec![(field, field_space.total())],
field_space.fields().map(|(x,y)| (x, y.total())).collect::<Vec<_>>()
field_space
.fields()
.map(|(x, y)| (x, y.total()))
.collect::<Vec<_>>()
);
}
#[test]
fn test_fast_indexed() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let name = schema_builder.add_u64_field("name", FAST | INT_INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
@@ -354,13 +362,13 @@ mod test {
expect_single_field(segment.fast_fields(), &name, 1, 512);
expect_single_field(segment.fieldnorms(), &name, 1, 512);
// TODO: understand why the following fails
// assert_eq!(0, segment.store().total());
// assert_eq!(0, segment.store().total());
assert_eq!(0, segment.deletes());
}
#[test]
fn test_text() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let name = schema_builder.add_text_field("name", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
@@ -369,7 +377,9 @@ mod test {
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(name => "hi"));
index_writer.add_document(doc!(name => "this is a test"));
index_writer.add_document(doc!(name => "some more documents with some word overlap with the other test"));
index_writer.add_document(
doc!(name => "some more documents with some word overlap with the other test"),
);
index_writer.add_document(doc!(name => "hello hi goodbye"));
index_writer.commit().unwrap();
}
@@ -392,13 +402,13 @@ mod test {
assert_eq!(0, segment.fast_fields().total());
expect_single_field(segment.fieldnorms(), &name, 1, 512);
// TODO: understand why the following fails
// assert_eq!(0, segment.store().total());
// assert_eq!(0, segment.store().total());
assert_eq!(0, segment.deletes());
}
#[test]
fn test_store() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let name = schema_builder.add_text_field("name", STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
@@ -407,7 +417,9 @@ mod test {
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(name => "hi"));
index_writer.add_document(doc!(name => "this is a test"));
index_writer.add_document(doc!(name => "some more documents with some word overlap with the other test"));
index_writer.add_document(
doc!(name => "some more documents with some word overlap with the other test"),
);
index_writer.add_document(doc!(name => "hello hi goodbye"));
index_writer.commit().unwrap();
}
@@ -436,7 +448,7 @@ mod test {
#[test]
fn test_deletes() {
let mut schema_builder = SchemaBuilder::new();
let mut schema_builder = Schema::builder();
let name = schema_builder.add_u64_field("name", INT_INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
@@ -478,7 +490,7 @@ mod test {
assert_eq!(0, segment.fast_fields().total());
expect_single_field(segment.fieldnorms(), &name, 1, 512);
// TODO: understand why the following fails
// assert_eq!(0, segment.store().total());
// assert_eq!(0, segment.store().total());
assert!(segment.deletes() > 0);
}
}
}

View File

@@ -56,12 +56,12 @@ pub mod tests {
use directory::{Directory, RAMDirectory, WritePtr};
use schema::Document;
use schema::FieldValue;
use schema::Schema;
use schema::TextOptions;
use schema::{Schema, SchemaBuilder};
use std::path::Path;
pub fn write_lorem_ipsum_store(writer: WritePtr, num_docs: usize) -> Schema {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let field_body = schema_builder.add_text_field("body", TextOptions::default().set_stored());
let field_title =
schema_builder.add_text_field("title", TextOptions::default().set_stored());

View File

@@ -35,7 +35,7 @@ mod tests {
use core::Index;
use directory::{Directory, RAMDirectory, ReadOnlySource};
use postings::TermInfo;
use schema::{Document, FieldType, SchemaBuilder, TEXT};
use schema::{Document, FieldType, Schema, TEXT};
use std::path::PathBuf;
use std::str;
@@ -129,7 +129,7 @@ mod tests {
#[test]
fn test_term_iterator() {
let mut schema_builder = SchemaBuilder::default();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
{

View File

@@ -0,0 +1,78 @@
extern crate aho_corasick;
use self::aho_corasick::{AcAutomaton, Automaton};
use std::mem;
use super::{OffsetIncrements, OffsetIncrementsBuilder};
pub trait CharMogrifier {
fn process_text(&mut self, text: &str, dest: &mut String, correction: &mut OffsetIncrementsBuilder);
}
pub struct CharFilter {
text: String,
buffer: String,
mogrifiers: Vec<Box<CharMogrifier>>
}
impl CharFilter {
fn process_text(&mut self, text: &str) {
self.text.clear();
self.text.push_str(text);
self.buffer.clear();
let mut offset_increment_builder = OffsetIncrements::builder();
for mogrifier in &mut self.mogrifiers {
mogrifier.process_text(&self.text,
&mut self.buffer,
&mut offset_increment_builder);
mem::swap(&mut self.text, &mut self.buffer);
offset_increment_builder.new_layer();
}
}
}
pub struct SubstringReplacer<'a> {
automaton: AcAutomaton<&'a str>,
replacements: Vec<&'a str>
}
impl SubstringReplacer<'static> {
fn new(from_tos: Vec<(&'static str, &'static str)>) -> SubstringReplacer<'static> {
let from_ptns: Vec<&'static str> = from_tos
.iter()
.map(|(from_str, _)| *from_str)
.collect();
let to_strs: Vec<&'static str> = from_tos
.iter()
.map(|(_, to_str)| *to_str)
.collect();
let automaton = AcAutomaton::new(from_ptns);
SubstringReplacer {
automaton,
replacements: to_strs
}
}
}
impl<'a> CharMogrifier for SubstringReplacer<'a> {
// correction is an array that goes from old_offset -> new_offset.
// correction len is `text.len() + 1`
fn process_text(&mut self, text: &str, dest: &mut String, correction: &mut OffsetIncrementsBuilder) {
let mut start = 0;
for m in self.automaton.find(text) {
dest.push_str(&text[start..m.start]);
let replacement = self.replacements[m.pati];
let previous_len = m.end - m.start;
correction.register_inc(m.end, (replacement.len() as isize) - (previous_len as isize));
dest.push_str(replacement);
start = m.end;
}
dest.push_str(&text[start..]);
}
}
// lowercasing
// Unicode normalization*
// '
// accent simplification

View File

@@ -0,0 +1,4 @@
mod char_filter;
mod offset_increments;
pub use self::offset_increments::{OffsetIncrements, OffsetIncrementsBuilder};

View File

@@ -0,0 +1,264 @@
/*!
Stores an increasing mapping from naturals to naturals.
`CharFilter`s may make the original text longer or shorter.
Token's offset need to refer to their offset in the original
text.
This struct is in charge of doing an efficient book-keeping
of these shift in offsets and provide a mapping
from the transformed text to the original text.
We define the inverse of an increasing mapping `f` as:
g(i) = max {j | f(j) <= i}
!= min {j | f(i) >= i}
The name `inverse` is a bit misleading:
this is not really an involution.
Note that having a single definition has some bad side effects.
For instance, when trying to convert a segment of chars to
its offset in the original string, the reverse mapping may
return an empty string.
We could use a different definition of the reverse mapping
when computing the lower bound and the upper bound of the segment,
but then non-overlapping tokens could have overlapping origins.
# Example
```
forward mapping
[0,1,2,5,6,7]
Encoded sparsely as (3, 2)
reverse mapping
[0,1,2,2,2,3,4,5]
Encoded sparsely as [(3, -1), (4,-1), (5,-1)]
```
*/
/// Builds a reverse mapping using a sparse representation of the
/// forward mapping.
pub struct OffsetIncrementsBuilder {
cumulated: isize,
incs: Vec<(usize, isize)>,
}
impl OffsetIncrementsBuilder {
/// We require
/// - `from_offset + delta >= 0`
/// There is no need to call this function if delta = 0.
pub fn register_inc(&mut self, from_offset: usize, delta: isize) {
let mut cumulated = self.cumulated;
let from_offset_isize = from_offset as isize;
let to_offset = (from_offset_isize + self.cumulated) as usize;
if delta > 0 {
for i in 0..delta as usize {
cumulated += 1;
self.incs.push((to_offset + i, -cumulated));
}
} else {
assert_eq!(delta, -1);
cumulated -= 1;
self.incs.push((to_offset + 1, -cumulated));
}
println!("incs {:?}", self.incs);
self.cumulated = cumulated;
}
pub fn new_layer(&self) {
panic!();
}
fn build(self) -> OffsetIncrements {
OffsetIncrements {
incs: self.incs
}
}
}
#[derive(Default)]
pub struct OffsetIncrementsReader {
shifts: Vec<(usize, isize)>,
current_shift: isize,
idx: usize,
}
impl OffsetIncrementsReader {
fn new(shifts: Vec<(usize, isize)>) -> OffsetIncrementsReader {
OffsetIncrementsReader {
shifts,
current_shift: 0,
idx: 0,
}
}
fn convert_offset(&mut self, target: usize) -> usize {
while self.idx < self.shifts.len() {
let (offset, shift) = self.shifts[self.idx];
if offset > target {
break;
} else {
self.current_shift = shift;
}
self.idx += 1;
}
return (self.current_shift + target as isize) as usize;
}
}
pub struct OffsetIncrements {
incs: Vec<(usize, isize)>
}
impl OffsetIncrements {
pub fn builder() -> OffsetIncrementsBuilder {
OffsetIncrementsBuilder {
cumulated: 0,
incs: Vec::new(),
}
}
pub fn reader(&self) -> OffsetIncrementsReader {
OffsetIncrementsReader::new(self.incs.clone()) // TODO Fixme, no clone
}
}
#[cfg(test)]
mod tests {
use super::OffsetIncrements;
use super::OffsetIncrementsReader;
#[test]
fn test_offset_increment_reader_empty() {
let mut reader = OffsetIncrementsReader::new(vec![]);
for i in 0..3 {
assert_eq!(reader.convert_offset(i), i);
}
}
#[test]
fn test_offset_increment_reader_step() {
let mut reader = OffsetIncrementsReader::new(vec![(1, 1), (3, 3), (6, 2), (7, 1), (8, 0), (9, -1)]);
assert_eq!(reader.convert_offset(0), 0);
assert_eq!(reader.convert_offset(1), 2);
assert_eq!(reader.convert_offset(2), 3);
assert_eq!(reader.convert_offset(3), 6);
assert_eq!(reader.convert_offset(4), 7);
assert_eq!(reader.convert_offset(5), 8);
assert_eq!(reader.convert_offset(6), 8);
assert_eq!(reader.convert_offset(7), 8);
assert_eq!(reader.convert_offset(8), 8);
assert_eq!(reader.convert_offset(9), 8);
}
#[test]
fn test_offset_increment_reader_step_neg() {
let mut reader = OffsetIncrementsReader::new(vec![(1, -1), (2, -2), (3, -3)]);
assert_eq!(reader.convert_offset(0), 0);
assert_eq!(reader.convert_offset(1), 0);
assert_eq!(reader.convert_offset(2), 0);
assert_eq!(reader.convert_offset(3), 0);
assert_eq!(reader.convert_offset(4), 1);
assert_eq!(reader.convert_offset(5), 2);
assert_eq!(reader.convert_offset(6), 3);
assert_eq!(reader.convert_offset(7), 4);
}
fn aux_test_increment(increments: OffsetIncrements, expected: &[usize]) {
let mut reader = increments.reader();
for (i, el) in expected.iter().cloned().enumerate() {
assert_eq!(reader.convert_offset(i), el);
}
}
fn assert_is_increasing(v: &[usize]) {
assert!(v.len() > 0);
assert_eq!(v[0], 0);
let mut prec = 0;
for &val in &v[1..] {
assert!(val >= prec);
prec = val;
}
}
fn is_inverse(fwd: &[usize], rev: &[usize]) {
assert_is_increasing(fwd);
assert_is_increasing(rev);
println!("fwd {:?} rev {:?}", fwd, rev);
for (i, &antecedant) in rev.iter().enumerate() {
let expected = fwd
.iter()
.enumerate()
.filter(|(_, v)| **v <= i)
.map(|(ord, _)| ord)
.last()
.unwrap();
println!("i {}", i);
assert_eq!(expected, antecedant);
}
}
#[test]
fn test_is_inverse() {
is_inverse(&[0,1,1,1,2], &[0, 3, 4]);
}
fn is_reciprocal(left: &[usize], right: &[usize]) {
is_inverse(left, right);
}
#[test]
fn test_offset_increments_shorten() {
{
let mut offset_increment_builder = OffsetIncrements::builder();
// abcd -> abd
offset_increment_builder.register_inc(2, -1);
aux_test_increment(offset_increment_builder.build(), &[0, 1, 2, 4]);
}
{
let mut offset_increment_builder = OffsetIncrements::builder();
// abcdefgh -> abcdfgh
offset_increment_builder.register_inc(4, -1);
aux_test_increment(offset_increment_builder.build(), &[0, 1, 2, 3, 4, 6]);
}
{
let mut offset_increment_builder = OffsetIncrements::builder();
// abcd -> bcd
offset_increment_builder.register_inc(0, -1);
aux_test_increment(offset_increment_builder.build(), &[0, 2, 3]);
}
}
#[test]
fn test_offset_increments_builder() {
{
let mut offset_increment_builder = OffsetIncrements::builder();
offset_increment_builder.register_inc(2, 1);
// [0, 1, 3, 4, 5]
aux_test_increment(offset_increment_builder.build(), &[0,1,1,2,3,4,5]);
}
{
let mut offset_increment_builder = OffsetIncrements::builder();
offset_increment_builder.register_inc(3, 2);
// [0, 1, 2, 4, 5, 6]
aux_test_increment(offset_increment_builder.build(), &[0,1,2,2,2,3,4,5]);
}
{
let mut offset_increment_builder = OffsetIncrements::builder();
// 0, 0, 1, 2, 2, 2
offset_increment_builder.register_inc(1, 1);
offset_increment_builder.register_inc(3, 3);
aux_test_increment(offset_increment_builder.build(), &[0,0,1,2,2,2,2,3,4]);
}
}
}

View File

@@ -9,7 +9,7 @@
//! use tantivy::schema::*;
//!
//! # fn main() {
//! let mut schema_builder = SchemaBuilder::new();
//! let mut schema_builder = Schema::builder();
//!
//! let text_options = TextOptions::default()
//! .set_indexing_options(
@@ -82,12 +82,12 @@
//!
//! ```
//! # extern crate tantivy;
//! # use tantivy::schema::SchemaBuilder;
//! # use tantivy::schema::Schema;
//! # use tantivy::tokenizer::*;
//! # use tantivy::Index;
//! # fn main() {
//! # let custom_en_tokenizer = SimpleTokenizer;
//! # let schema = SchemaBuilder::new().build();
//! # let schema = Schema::builder().build();
//! let index = Index::create_in_ram(schema);
//! index.tokenizers()
//! .register("custom_en", custom_en_tokenizer);
@@ -101,12 +101,12 @@
//!
//! ```
//! extern crate tantivy;
//! use tantivy::schema::{SchemaBuilder, IndexRecordOption, TextOptions, TextFieldIndexing};
//! use tantivy::schema::{Schema, IndexRecordOption, TextOptions, TextFieldIndexing};
//! use tantivy::tokenizer::*;
//! use tantivy::Index;
//!
//! # fn main() {
//! let mut schema_builder = SchemaBuilder::new();
//! let mut schema_builder = Schema::builder();
//! let text_field_indexing = TextFieldIndexing::default()
//! .set_tokenizer("custom_en")
//! .set_index_option(IndexRecordOption::WithFreqsAndPositions);
@@ -140,6 +140,7 @@ mod stop_word_filter;
mod token_stream_chain;
mod tokenizer;
mod tokenizer_manager;
mod char_processing;
pub use self::alphanum_only::AlphaNumOnlyFilter;
pub use self::facet_tokenizer::FacetTokenizer;
@@ -157,13 +158,11 @@ pub use self::tokenizer::BoxedTokenizer;
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
pub use self::tokenizer_manager::TokenizerManager;
#[cfg(test)]
pub mod tests {
use super::Token;
use super::TokenizerManager;
/// This is a function that can be used in tests and doc tests
/// to assert a token's correctness.
pub fn assert_token(token: &Token, position: usize, text: &str, from: usize, to: usize) {

View File

@@ -108,7 +108,7 @@ impl NgramTokenizer {
/// Create a `NGramTokenizer` which generates tokens for all inner ngrams.
///
/// This is as opposed to only prefix ngrams .
pub fn all_ngrams(min_gram: usize, max_gram:usize) -> NgramTokenizer {
pub fn all_ngrams(min_gram: usize, max_gram: usize) -> NgramTokenizer {
Self::new(min_gram, max_gram, false)
}
@@ -137,9 +137,10 @@ impl<'a> Tokenizer<'a> for NgramTokenizer {
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
NgramTokenStream {
ngram_charidx_iterator: StutteringIterator::new(
CodepointFrontiers::for_str(text),
CodepointFrontiers::for_str(text),
self.min_gram,
self.max_gram),
self.max_gram,
),
prefix_only: self.prefix_only,
text,
token: Token::default(),
@@ -172,7 +173,6 @@ impl<'a> TokenStream for NgramTokenStream<'a> {
}
}
/// This iterator takes an underlying Iterator
/// and emits all of the pairs `(a,b)` such that
/// a and b are items emitted by the iterator at
@@ -190,11 +190,13 @@ struct StutteringIterator<T> {
memory: Vec<usize>,
cursor: usize,
gram_len: usize
gram_len: usize,
}
impl<T> StutteringIterator<T>
where T: Iterator<Item=usize> {
where
T: Iterator<Item = usize>,
{
pub fn new(mut underlying: T, min_gram: usize, max_gram: usize) -> StutteringIterator<T> {
assert!(min_gram > 0);
let memory: Vec<usize> = (&mut underlying).take(max_gram + 1).collect();
@@ -222,7 +224,9 @@ impl<T> StutteringIterator<T>
}
impl<T> Iterator for StutteringIterator<T>
where T: Iterator<Item=usize> {
where
T: Iterator<Item = usize>,
{
type Item = (usize, usize);
fn next(&mut self) -> Option<(usize, usize)> {
@@ -230,7 +234,7 @@ impl<T> Iterator for StutteringIterator<T>
// we have exhausted all options
// starting at `self.memory[self.cursor]`.
//
// Time to advance.
// Time to advance.
self.gram_len = self.min_gram;
if let Some(next_val) = self.underlying.next() {
self.memory[self.cursor] = next_val;
@@ -252,22 +256,20 @@ impl<T> Iterator for StutteringIterator<T>
}
}
/// Emits all of the offsets where a codepoint starts
/// or a codepoint ends.
///
/// By convention, we emit [0] for the empty string.
struct CodepointFrontiers<'a> {
s: &'a str,
next_el: Option<usize>
next_el: Option<usize>,
}
impl<'a> CodepointFrontiers<'a> {
fn for_str(s: &'a str) -> Self {
CodepointFrontiers {
s,
next_el: Some(0)
next_el: Some(0),
}
}
}
@@ -276,26 +278,20 @@ impl<'a> Iterator for CodepointFrontiers<'a> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.next_el
.map(|offset| {
if self.s.is_empty() {
self.next_el = None;
} else {
let first_codepoint_width = utf8_codepoint_width(self.s.as_bytes()[0]);
self.s = &self.s[first_codepoint_width..];
self.next_el = Some(offset + first_codepoint_width);
}
offset
})
self.next_el.map(|offset| {
if self.s.is_empty() {
self.next_el = None;
} else {
let first_codepoint_width = utf8_codepoint_width(self.s.as_bytes()[0]);
self.s = &self.s[first_codepoint_width..];
self.next_el = Some(offset + first_codepoint_width);
}
offset
})
}
}
const CODEPOINT_UTF8_WIDTH: [u8; 16] = [
1, 1, 1, 1,
1, 1, 1, 1,
2, 2, 2, 2,
2, 2, 3, 4,
];
const CODEPOINT_UTF8_WIDTH: [u8; 16] = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 4];
// Number of bytes to encode a codepoint in UTF-8 given
// the first byte.
@@ -309,13 +305,13 @@ fn utf8_codepoint_width(b: u8) -> usize {
#[cfg(test)]
mod tests {
use tokenizer::tokenizer::{TokenStream, Tokenizer};
use super::NgramTokenizer;
use tokenizer::Token;
use tokenizer::tests::assert_token;
use super::CodepointFrontiers;
use super::StutteringIterator;
use super::utf8_codepoint_width;
use super::CodepointFrontiers;
use super::NgramTokenizer;
use super::StutteringIterator;
use tokenizer::tests::assert_token;
use tokenizer::tokenizer::{TokenStream, Tokenizer};
use tokenizer::Token;
fn test_helper<T: TokenStream>(mut tokenizer: T) -> Vec<Token> {
let mut tokens: Vec<Token> = vec![];
@@ -323,7 +319,6 @@ mod tests {
tokens
}
#[test]
fn test_utf8_codepoint_width() {
// 0xxx
@@ -344,17 +339,16 @@ mod tests {
}
}
#[test]
fn test_codepoint_frontiers() {
assert_eq!(CodepointFrontiers::for_str("").collect::<Vec<_>>(), vec![0]);
assert_eq!(
CodepointFrontiers::for_str("abcd").collect::<Vec<_>>(),
vec![0,1,2,3,4]
vec![0, 1, 2, 3, 4]
);
assert_eq!(
CodepointFrontiers::for_str("aあ").collect::<Vec<_>>(),
vec![0,1,4]
CodepointFrontiers::for_str("aあ").collect::<Vec<_>>(),
vec![0, 1, 4]
);
}
@@ -425,7 +419,6 @@ mod tests {
assert!(tokens.is_empty());
}
#[test]
#[should_panic(expected = "min_gram must be greater than 0")]
fn test_ngram_min_max_interval_empty() {
@@ -438,7 +431,6 @@ mod tests {
NgramTokenizer::all_ngrams(2, 1);
}
#[test]
fn test_stutterring_iterator_empty() {
let rg: Vec<usize> = vec![0];
@@ -470,4 +462,4 @@ mod tests {
assert_eq!(it.next(), None);
}
}
}