mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-08 01:52:54 +00:00
Compare commits
35 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
94c73363e4 | ||
|
|
96f194635f | ||
|
|
444662485f | ||
|
|
943c25d0f8 | ||
|
|
5c0b2a4579 | ||
|
|
9870a9258d | ||
|
|
7102b363f5 | ||
|
|
66b4615e4e | ||
|
|
da46913839 | ||
|
|
3df037961f | ||
|
|
8ffae47854 | ||
|
|
1a90a1f3b0 | ||
|
|
dac50c6aeb | ||
|
|
31b22c5acc | ||
|
|
8e50921363 | ||
|
|
96a4f503ec | ||
|
|
9df288b0c9 | ||
|
|
b7c2d0de97 | ||
|
|
62445e0ec8 | ||
|
|
a228825462 | ||
|
|
d3eabd14bc | ||
|
|
c967031d21 | ||
|
|
d823163d52 | ||
|
|
c4f59f202d | ||
|
|
acd29b535d | ||
|
|
2cd31bcda2 | ||
|
|
99870de55c | ||
|
|
cad2d91845 | ||
|
|
79f3cd6cf4 | ||
|
|
e3abb4481b | ||
|
|
bfa61d2f2f | ||
|
|
6c0e621fdb | ||
|
|
a8cc5208f1 | ||
|
|
83eb0d0cb7 | ||
|
|
ee6e273365 |
10
.travis.yml
10
.travis.yml
@@ -29,7 +29,7 @@ addons:
|
|||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
# Android
|
# Android
|
||||||
- env: TARGET=aarch64-linux-android
|
- env: TARGET=aarch64-linux-android DISABLE_TESTS=1
|
||||||
#- env: TARGET=arm-linux-androideabi DISABLE_TESTS=1
|
#- env: TARGET=arm-linux-androideabi DISABLE_TESTS=1
|
||||||
#- env: TARGET=armv7-linux-androideabi DISABLE_TESTS=1
|
#- env: TARGET=armv7-linux-androideabi DISABLE_TESTS=1
|
||||||
#- env: TARGET=i686-linux-android DISABLE_TESTS=1
|
#- env: TARGET=i686-linux-android DISABLE_TESTS=1
|
||||||
@@ -61,6 +61,9 @@ before_script:
|
|||||||
script:
|
script:
|
||||||
- bash ci/script.sh
|
- bash ci/script.sh
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- cargo doc-upload
|
||||||
|
|
||||||
before_deploy:
|
before_deploy:
|
||||||
- sh ci/before_deploy.sh
|
- sh ci/before_deploy.sh
|
||||||
|
|
||||||
@@ -68,6 +71,11 @@ cache: cargo
|
|||||||
before_cache:
|
before_cache:
|
||||||
# Travis can't cache files that are not readable by "others"
|
# Travis can't cache files that are not readable by "others"
|
||||||
- chmod -R a+r $HOME/.cargo
|
- chmod -R a+r $HOME/.cargo
|
||||||
|
- find ./target/debug -type f -maxdepth 1 -delete
|
||||||
|
- rm -f ./target/.rustc_info.json
|
||||||
|
- rm -fr ./target/debug/{deps,.fingerprint}/tantivy*
|
||||||
|
- rm -r target/debug/examples/
|
||||||
|
- ls -1 examples/ | sed -e 's/\.rs$//' | xargs -I "{}" find target/* -name "*{}*" -type f -delete
|
||||||
|
|
||||||
#branches:
|
#branches:
|
||||||
# only:
|
# only:
|
||||||
|
|||||||
67
CHANGELOG.md
67
CHANGELOG.md
@@ -1,3 +1,41 @@
|
|||||||
|
Tantivy 0.10.0
|
||||||
|
=====================
|
||||||
|
|
||||||
|
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
|
||||||
|
|
||||||
|
- Added an ASCII folding filter (@drusellers)
|
||||||
|
- Bugfix in `query.count` in presence of deletes (@pmasurel)
|
||||||
|
|
||||||
|
Minor
|
||||||
|
---------
|
||||||
|
- Small simplification of the code.
|
||||||
|
Calling .freq() or .doc() when .advance() has never been called
|
||||||
|
on segment postings should panic from now on.
|
||||||
|
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
|
||||||
|
- Fast fields are now preloaded when the `SegmentReader` is created.
|
||||||
|
- `IndexMeta` is now public. (@hntd187)
|
||||||
|
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
|
||||||
|
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
||||||
|
only require a read lock. (@pmasurel)
|
||||||
|
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
|
||||||
|
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
|
||||||
|
|
||||||
|
## How to update?
|
||||||
|
|
||||||
|
Your existing indexes are usable as is, but you may need some
|
||||||
|
trivial updates.
|
||||||
|
|
||||||
|
### Fast fields
|
||||||
|
|
||||||
|
Fast fields used to be accessed directly from the `SegmentReader`.
|
||||||
|
The API changed, you are now required to acquire your fast field reader via the
|
||||||
|
`segment_reader.fast_fields()`, and use one of the typed method:
|
||||||
|
- `.u64()`, `.i64()` if your field is single-valued ;
|
||||||
|
- `.u64s()`, `.i64s()` if your field is multi-valued ;
|
||||||
|
- `.bytes()` if your field is bytes fast field.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Tantivy 0.9.0
|
Tantivy 0.9.0
|
||||||
=====================
|
=====================
|
||||||
*0.9.0 index format is not compatible with the
|
*0.9.0 index format is not compatible with the
|
||||||
@@ -15,6 +53,35 @@ previous index format.*
|
|||||||
for int fields. (@fulmicoton)
|
for int fields. (@fulmicoton)
|
||||||
- Added DateTime field (@barrotsteindev)
|
- Added DateTime field (@barrotsteindev)
|
||||||
- Added IndexReader. By default, index is reloaded automatically upon new commits (@fulmicoton)
|
- Added IndexReader. By default, index is reloaded automatically upon new commits (@fulmicoton)
|
||||||
|
- SIMD linear search within blocks (@fulmicoton)
|
||||||
|
|
||||||
|
## How to update ?
|
||||||
|
|
||||||
|
tantivy 0.9 brought some API breaking change.
|
||||||
|
To update from tantivy 0.8, you will need to go through the following steps.
|
||||||
|
|
||||||
|
- `schema::INT_INDEXED` and `schema::INT_STORED` should be replaced by `schema::INDEXED` and `schema::INT_STORED`.
|
||||||
|
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
|
||||||
|
`IndexReader` for this.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// create the reader. You typically need to create 1 reader for the entire
|
||||||
|
// lifetime of you program.
|
||||||
|
let reader = index.reader()?;
|
||||||
|
|
||||||
|
// Acquire a searcher (previously `index.searcher()`) is now written:
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
|
// With the default setting of the reader, you are not required to
|
||||||
|
// call `index.load_searchers()` anymore.
|
||||||
|
//
|
||||||
|
// The IndexReader will pick up that change automatically, regardless
|
||||||
|
// of whether the update was done in a different process or not.
|
||||||
|
// If this behavior is not wanted, you can create your reader with
|
||||||
|
// the `ReloadPolicy::Manual`, and manually decide when to reload the index
|
||||||
|
// by calling `reader.reload()?`.
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
Tantivy 0.8.2
|
Tantivy 0.8.2
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy"
|
name = "tantivy"
|
||||||
version = "0.9.0"
|
version = "0.10.0-dev"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
@@ -23,7 +23,7 @@ snap = {version="0.2"}
|
|||||||
atomicwrites = {version="0.2.2", optional=true}
|
atomicwrites = {version="0.2.2", optional=true}
|
||||||
tempfile = "3.0"
|
tempfile = "3.0"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
combine = "3"
|
combine = ">=3.6.0,<4.0.0"
|
||||||
tempdir = "0.3"
|
tempdir = "0.3"
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
|
|||||||
21
README.md
21
README.md
@@ -17,6 +17,7 @@
|
|||||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/6)
|
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/6)
|
||||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/7)
|
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/7)
|
||||||
|
|
||||||
|
[](https://www.patreon.com/fulmicoton)
|
||||||
|
|
||||||
|
|
||||||
**Tantivy** is a **full text search engine library** written in rust.
|
**Tantivy** is a **full text search engine library** written in rust.
|
||||||
@@ -27,6 +28,14 @@ to build such a search engine.
|
|||||||
|
|
||||||
Tantivy is, in fact, strongly inspired by Lucene's design.
|
Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||||
|
|
||||||
|
# Benchmark
|
||||||
|
|
||||||
|
Tantivy is typically faster than Lucene, but the results will depend on
|
||||||
|
the nature of the queries in your workload.
|
||||||
|
|
||||||
|
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
||||||
|
performance for different type of queries / collection.
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
|
|
||||||
- Full-text search
|
- Full-text search
|
||||||
@@ -87,6 +96,14 @@ To check out and run tests, you can simply run :
|
|||||||
Some tests will not run with just `cargo test` because of `fail-rs`.
|
Some tests will not run with just `cargo test` because of `fail-rs`.
|
||||||
To run the tests exhaustively, run `./run-tests.sh`.
|
To run the tests exhaustively, run `./run-tests.sh`.
|
||||||
|
|
||||||
# Contribute
|
# How can I support this project ?
|
||||||
|
|
||||||
Send me an email (paul.masurel at gmail.com) if you want to contribute to tantivy.
|
There are many ways to support this project.
|
||||||
|
|
||||||
|
- If you use tantivy, tell us about your experience on [gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
|
||||||
|
- Report bugs
|
||||||
|
- Write a blog post
|
||||||
|
- Complete documentation
|
||||||
|
- Contribute code (you can join [our gitter](https://gitter.im/tantivy-search/tantivy) )
|
||||||
|
- Talk about tantivy around you
|
||||||
|
- Drop a word on on [](https://saythanks.io/to/fulmicoton) or even [](https://www.patreon.com/fulmicoton)
|
||||||
|
|||||||
@@ -18,8 +18,8 @@ use tantivy::fastfield::FastFieldReader;
|
|||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::Field;
|
use tantivy::schema::Field;
|
||||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||||
use tantivy::Index;
|
|
||||||
use tantivy::SegmentReader;
|
use tantivy::SegmentReader;
|
||||||
|
use tantivy::{Index, TantivyError};
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct Stats {
|
struct Stats {
|
||||||
@@ -75,9 +75,18 @@ impl Collector for StatsCollector {
|
|||||||
fn for_segment(
|
fn for_segment(
|
||||||
&self,
|
&self,
|
||||||
_segment_local_id: u32,
|
_segment_local_id: u32,
|
||||||
segment: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> tantivy::Result<StatsSegmentCollector> {
|
) -> tantivy::Result<StatsSegmentCollector> {
|
||||||
let fast_field_reader = segment.fast_field_reader(self.field)?;
|
let fast_field_reader = segment_reader
|
||||||
|
.fast_fields()
|
||||||
|
.u64(self.field)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
let field_name = segment_reader.schema().get_field_name(self.field);
|
||||||
|
TantivyError::SchemaError(format!(
|
||||||
|
"Field {:?} is not a u64 fast field.",
|
||||||
|
field_name
|
||||||
|
))
|
||||||
|
})?;
|
||||||
Ok(StatsSegmentCollector {
|
Ok(StatsSegmentCollector {
|
||||||
fast_field_reader,
|
fast_field_reader,
|
||||||
stats: Stats::default(),
|
stats: Stats::default(),
|
||||||
|
|||||||
107
examples/multiple_producer.rs
Normal file
107
examples/multiple_producer.rs
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// # Indexing from different threads.
|
||||||
|
//
|
||||||
|
// It is fairly common to have to index from different threads.
|
||||||
|
// Tantivy forbids to create more than one `IndexWriter` at a time.
|
||||||
|
//
|
||||||
|
// This `IndexWriter` itself has its own multithreaded layer, so managing your own
|
||||||
|
// indexing threads will not help. However, it can still be useful for some applications.
|
||||||
|
//
|
||||||
|
// For instance, if preparing documents to send to tantivy before indexing is the bottleneck of
|
||||||
|
// your application, it is reasonable to have multiple threads.
|
||||||
|
//
|
||||||
|
// Another very common reason to want to index from multiple threads, is implementing a webserver
|
||||||
|
// with CRUD capabilities. The server framework will most likely handle request from
|
||||||
|
// different threads.
|
||||||
|
//
|
||||||
|
// The recommended way to address both of these use case is to wrap your `IndexWriter` into a
|
||||||
|
// `Arc<RwLock<IndexWriter>>`.
|
||||||
|
//
|
||||||
|
// While this is counterintuitive, adding and deleting documents do not require mutability
|
||||||
|
// over the `IndexWriter`, so several threads will be able to do this operation concurrently.
|
||||||
|
//
|
||||||
|
// The example below does not represent an actual real-life use case (who would spawn thread to
|
||||||
|
// index a single document?), but aims at demonstrating the mechanism that makes indexing
|
||||||
|
// from several threads possible.
|
||||||
|
|
||||||
|
extern crate tempdir;
|
||||||
|
|
||||||
|
// ---
|
||||||
|
// Importing tantivy...
|
||||||
|
#[macro_use]
|
||||||
|
extern crate tantivy;
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
use std::thread;
|
||||||
|
use std::time::Duration;
|
||||||
|
use tantivy::schema::{Schema, STORED, TEXT};
|
||||||
|
use tantivy::Opstamp;
|
||||||
|
use tantivy::{Index, IndexWriter};
|
||||||
|
|
||||||
|
fn main() -> tantivy::Result<()> {
|
||||||
|
// # Defining the schema
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let title = schema_builder.add_text_field("title", TEXT | STORED);
|
||||||
|
let body = schema_builder.add_text_field("body", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let index_writer: Arc<RwLock<IndexWriter>> = Arc::new(RwLock::new(index.writer(50_000_000)?));
|
||||||
|
|
||||||
|
// # First indexing thread.
|
||||||
|
let index_writer_clone_1 = index_writer.clone();
|
||||||
|
thread::spawn(move || {
|
||||||
|
// we index 100 times the document... for the sake of the example.
|
||||||
|
for i in 0..100 {
|
||||||
|
let opstamp = {
|
||||||
|
// A read lock is sufficient here.
|
||||||
|
let index_writer_rlock = index_writer_clone_1.read().unwrap();
|
||||||
|
index_writer_rlock.add_document(
|
||||||
|
doc!(
|
||||||
|
title => "Of Mice and Men",
|
||||||
|
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||||
|
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||||
|
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||||
|
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||||
|
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||||
|
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||||
|
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||||
|
limbs and branches that arch over the pool"
|
||||||
|
))
|
||||||
|
};
|
||||||
|
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
|
||||||
|
thread::sleep(Duration::from_millis(20));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// # Second indexing thread.
|
||||||
|
let index_writer_clone_2 = index_writer.clone();
|
||||||
|
// For convenience, tantivy also comes with a macro to
|
||||||
|
// reduce the boilerplate above.
|
||||||
|
thread::spawn(move || {
|
||||||
|
// we index 100 times the document... for the sake of the example.
|
||||||
|
for i in 0..100 {
|
||||||
|
// A read lock is sufficient here.
|
||||||
|
let opstamp = {
|
||||||
|
let index_writer_rlock = index_writer_clone_2.read().unwrap();
|
||||||
|
index_writer_rlock.add_document(doc!(
|
||||||
|
title => "Manufacturing consent",
|
||||||
|
body => "Some great book description..."
|
||||||
|
))
|
||||||
|
};
|
||||||
|
println!("add doc {} from thread 2 - opstamp {}", i, opstamp);
|
||||||
|
thread::sleep(Duration::from_millis(10));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// # In the main thread, we commit 10 times, once every 500ms.
|
||||||
|
for _ in 0..10 {
|
||||||
|
let opstamp: Opstamp = {
|
||||||
|
// Committing or rollbacking on the other hand requires write lock. This will block other threads.
|
||||||
|
let mut index_writer_wlock = index_writer.write().unwrap();
|
||||||
|
index_writer_wlock.commit().unwrap()
|
||||||
|
};
|
||||||
|
println!("committed with opstamp {}", opstamp);
|
||||||
|
thread::sleep(Duration::from_millis(500));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -17,6 +17,7 @@ use Result;
|
|||||||
use Score;
|
use Score;
|
||||||
use SegmentLocalId;
|
use SegmentLocalId;
|
||||||
use SegmentReader;
|
use SegmentReader;
|
||||||
|
use TantivyError;
|
||||||
|
|
||||||
struct Hit<'a> {
|
struct Hit<'a> {
|
||||||
count: u64,
|
count: u64,
|
||||||
@@ -264,7 +265,10 @@ impl Collector for FacetCollector {
|
|||||||
_: SegmentLocalId,
|
_: SegmentLocalId,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
) -> Result<FacetSegmentCollector> {
|
) -> Result<FacetSegmentCollector> {
|
||||||
let facet_reader = reader.facet_reader(self.field)?;
|
let field_name = reader.schema().get_field_name(self.field);
|
||||||
|
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
|
||||||
|
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
|
||||||
|
})?;
|
||||||
|
|
||||||
let mut collapse_mapping = Vec::new();
|
let mut collapse_mapping = Vec::new();
|
||||||
let mut counts = Vec::new();
|
let mut counts = Vec::new();
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use super::Collector;
|
|||||||
use super::SegmentCollector;
|
use super::SegmentCollector;
|
||||||
use collector::Fruit;
|
use collector::Fruit;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
use std::ops::Deref;
|
||||||
use DocId;
|
use DocId;
|
||||||
use Result;
|
use Result;
|
||||||
use Score;
|
use Score;
|
||||||
@@ -199,7 +200,10 @@ impl<'a> Collector for MultiCollector<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn requires_scoring(&self) -> bool {
|
fn requires_scoring(&self) -> bool {
|
||||||
self.collector_wrappers.iter().any(|c| c.requires_scoring())
|
self.collector_wrappers
|
||||||
|
.iter()
|
||||||
|
.map(Deref::deref)
|
||||||
|
.any(Collector::requires_scoring)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> {
|
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> {
|
||||||
|
|||||||
@@ -114,11 +114,15 @@ impl Collector for FastFieldTestCollector {
|
|||||||
fn for_segment(
|
fn for_segment(
|
||||||
&self,
|
&self,
|
||||||
_: SegmentLocalId,
|
_: SegmentLocalId,
|
||||||
reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> Result<FastFieldSegmentCollector> {
|
) -> Result<FastFieldSegmentCollector> {
|
||||||
|
let reader = segment_reader
|
||||||
|
.fast_fields()
|
||||||
|
.u64(self.field)
|
||||||
|
.expect("Requested field is not a fast field.");
|
||||||
Ok(FastFieldSegmentCollector {
|
Ok(FastFieldSegmentCollector {
|
||||||
vals: Vec::new(),
|
vals: Vec::new(),
|
||||||
reader: reader.fast_field_reader(self.field)?,
|
reader,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -170,11 +174,14 @@ impl Collector for BytesFastFieldTestCollector {
|
|||||||
fn for_segment(
|
fn for_segment(
|
||||||
&self,
|
&self,
|
||||||
_segment_local_id: u32,
|
_segment_local_id: u32,
|
||||||
segment: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
) -> Result<BytesFastFieldSegmentCollector> {
|
) -> Result<BytesFastFieldSegmentCollector> {
|
||||||
Ok(BytesFastFieldSegmentCollector {
|
Ok(BytesFastFieldSegmentCollector {
|
||||||
vals: Vec::new(),
|
vals: Vec::new(),
|
||||||
reader: segment.bytes_fast_field_reader(self.field)?,
|
reader: segment_reader
|
||||||
|
.fast_fields()
|
||||||
|
.bytes(self.field)
|
||||||
|
.expect("Field is not a bytes fast field."),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -191,7 +198,7 @@ impl SegmentCollector for BytesFastFieldSegmentCollector {
|
|||||||
type Fruit = Vec<u8>;
|
type Fruit = Vec<u8>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: f32) {
|
fn collect(&mut self, doc: u32, _score: f32) {
|
||||||
let data = self.reader.get_val(doc);
|
let data = self.reader.get_bytes(doc);
|
||||||
self.vals.extend(data);
|
self.vals.extend(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -98,11 +98,11 @@ where
|
|||||||
.collect())
|
.collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn for_segment(
|
pub(crate) fn for_segment<F: PartialOrd>(
|
||||||
&self,
|
&self,
|
||||||
segment_id: SegmentLocalId,
|
segment_id: SegmentLocalId,
|
||||||
_: &SegmentReader,
|
_: &SegmentReader,
|
||||||
) -> Result<TopSegmentCollector<T>> {
|
) -> Result<TopSegmentCollector<F>> {
|
||||||
Ok(TopSegmentCollector::new(segment_id, self.limit))
|
Ok(TopSegmentCollector::new(segment_id, self.limit))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,10 +5,12 @@ use collector::SegmentCollector;
|
|||||||
use fastfield::FastFieldReader;
|
use fastfield::FastFieldReader;
|
||||||
use fastfield::FastValue;
|
use fastfield::FastValue;
|
||||||
use schema::Field;
|
use schema::Field;
|
||||||
|
use std::marker::PhantomData;
|
||||||
use DocAddress;
|
use DocAddress;
|
||||||
use Result;
|
use Result;
|
||||||
use SegmentLocalId;
|
use SegmentLocalId;
|
||||||
use SegmentReader;
|
use SegmentReader;
|
||||||
|
use TantivyError;
|
||||||
|
|
||||||
/// The Top Field Collector keeps track of the K documents
|
/// The Top Field Collector keeps track of the K documents
|
||||||
/// sorted by a fast field in the index
|
/// sorted by a fast field in the index
|
||||||
@@ -106,8 +108,15 @@ impl<T: FastValue + PartialOrd + Send + Sync + 'static> Collector for TopDocsByF
|
|||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
) -> Result<TopFieldSegmentCollector<T>> {
|
) -> Result<TopFieldSegmentCollector<T>> {
|
||||||
let collector = self.collector.for_segment(segment_local_id, reader)?;
|
let collector = self.collector.for_segment(segment_local_id, reader)?;
|
||||||
let reader = reader.fast_field_reader(self.field)?;
|
let reader = reader.fast_fields().u64(self.field).ok_or_else(|| {
|
||||||
Ok(TopFieldSegmentCollector { collector, reader })
|
let field_name = reader.schema().get_field_name(self.field);
|
||||||
|
TantivyError::SchemaError(format!("Failed to find fast field reader {:?}", field_name))
|
||||||
|
})?;
|
||||||
|
Ok(TopFieldSegmentCollector {
|
||||||
|
collector,
|
||||||
|
reader,
|
||||||
|
_type: PhantomData,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn requires_scoring(&self) -> bool {
|
fn requires_scoring(&self) -> bool {
|
||||||
@@ -122,9 +131,10 @@ impl<T: FastValue + PartialOrd + Send + Sync + 'static> Collector for TopDocsByF
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct TopFieldSegmentCollector<T: FastValue + PartialOrd> {
|
pub struct TopFieldSegmentCollector<T> {
|
||||||
collector: TopSegmentCollector<T>,
|
collector: TopSegmentCollector<u64>,
|
||||||
reader: FastFieldReader<T>,
|
reader: FastFieldReader<u64>,
|
||||||
|
_type: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: FastValue + PartialOrd + Send + Sync + 'static> SegmentCollector
|
impl<T: FastValue + PartialOrd + Send + Sync + 'static> SegmentCollector
|
||||||
@@ -138,7 +148,11 @@ impl<T: FastValue + PartialOrd + Send + Sync + 'static> SegmentCollector
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn harvest(self) -> Vec<(T, DocAddress)> {
|
fn harvest(self) -> Vec<(T, DocAddress)> {
|
||||||
self.collector.harvest()
|
self.collector
|
||||||
|
.harvest()
|
||||||
|
.into_iter()
|
||||||
|
.map(|(val, doc_address)| (T::from_u64(val), doc_address))
|
||||||
|
.collect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -235,7 +249,7 @@ mod tests {
|
|||||||
.for_segment(0, segment)
|
.for_segment(0, segment)
|
||||||
.map(|_| ())
|
.map(|_| ())
|
||||||
.unwrap_err(),
|
.unwrap_err(),
|
||||||
TantivyError::FastFieldError(_)
|
TantivyError::SchemaError(_)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,10 @@ pub use self::serialize::{BinarySerializable, FixedSize};
|
|||||||
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
|
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
|
||||||
pub use byteorder::LittleEndian as Endianness;
|
pub use byteorder::LittleEndian as Endianness;
|
||||||
|
|
||||||
use std::io;
|
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
||||||
|
///
|
||||||
|
/// We do not allow segments with more than
|
||||||
|
pub const MAX_DOC_LIMIT: u32 = 1 << 31;
|
||||||
|
|
||||||
/// Computes the number of bits that will be used for bitpacking.
|
/// Computes the number of bits that will be used for bitpacking.
|
||||||
///
|
///
|
||||||
@@ -52,11 +55,6 @@ pub(crate) fn is_power_of_2(n: usize) -> bool {
|
|||||||
(n > 0) && (n & (n - 1) == 0)
|
(n > 0) && (n & (n - 1) == 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a default io error given a string.
|
|
||||||
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
|
||||||
io::Error::new(io::ErrorKind::Other, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Has length trait
|
/// Has length trait
|
||||||
pub trait HasLen {
|
pub trait HasLen {
|
||||||
/// Return length
|
/// Return length
|
||||||
@@ -134,4 +132,11 @@ pub(crate) mod test {
|
|||||||
assert_eq!(compute_num_bits(256), 9u8);
|
assert_eq!(compute_num_bits(256), 9u8);
|
||||||
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
|
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_max_doc() {
|
||||||
|
// this is the first time I write a unit test for a constant.
|
||||||
|
assert!(((super::MAX_DOC_LIMIT - 1) as i32) >= 0);
|
||||||
|
assert!((super::MAX_DOC_LIMIT as i32) < 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ use schema::Schema;
|
|||||||
use serde_json;
|
use serde_json;
|
||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
#[cfg(feature = "mmap")]
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokenizer::BoxedTokenizer;
|
use tokenizer::BoxedTokenizer;
|
||||||
@@ -339,7 +340,7 @@ impl Index {
|
|||||||
Ok(self
|
Ok(self
|
||||||
.searchable_segment_metas()?
|
.searchable_segment_metas()?
|
||||||
.iter()
|
.iter()
|
||||||
.map(|segment_meta| segment_meta.id())
|
.map(SegmentMeta::id)
|
||||||
.collect())
|
.collect())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -355,10 +356,8 @@ mod tests {
|
|||||||
use directory::RAMDirectory;
|
use directory::RAMDirectory;
|
||||||
use schema::Field;
|
use schema::Field;
|
||||||
use schema::{Schema, INDEXED, TEXT};
|
use schema::{Schema, INDEXED, TEXT};
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tempdir::TempDir;
|
|
||||||
use Index;
|
use Index;
|
||||||
use IndexReader;
|
use IndexReader;
|
||||||
use IndexWriter;
|
use IndexWriter;
|
||||||
@@ -444,61 +443,69 @@ mod tests {
|
|||||||
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[cfg(feature = "mmap")]
|
||||||
fn test_index_on_commit_reload_policy_mmap() {
|
mod mmap_specific {
|
||||||
let schema = throw_away_schema();
|
|
||||||
let field = schema.get_field("num_likes").unwrap();
|
|
||||||
let tempdir = TempDir::new("index").unwrap();
|
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
|
||||||
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
writer.commit().unwrap();
|
|
||||||
let reader = index
|
|
||||||
.reader_builder()
|
|
||||||
.reload_policy(ReloadPolicy::OnCommit)
|
|
||||||
.try_into()
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
|
||||||
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
use super::*;
|
||||||
fn test_index_manual_policy_mmap() {
|
use std::path::PathBuf;
|
||||||
let schema = throw_away_schema();
|
use tempdir::TempDir;
|
||||||
let field = schema.get_field("num_likes").unwrap();
|
|
||||||
let index = Index::create_from_tempdir(schema).unwrap();
|
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
writer.commit().unwrap();
|
|
||||||
let reader = index
|
|
||||||
.reader_builder()
|
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
|
||||||
.try_into()
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
|
||||||
writer.add_document(doc!(field=>1u64));
|
|
||||||
writer.commit().unwrap();
|
|
||||||
thread::sleep(Duration::from_millis(500));
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
|
||||||
reader.reload().unwrap();
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_index_on_commit_reload_policy_different_directories() {
|
fn test_index_on_commit_reload_policy_mmap() {
|
||||||
let schema = throw_away_schema();
|
let schema = throw_away_schema();
|
||||||
let field = schema.get_field("num_likes").unwrap();
|
let field = schema.get_field("num_likes").unwrap();
|
||||||
let tempdir = TempDir::new("index").unwrap();
|
let tempdir = TempDir::new("index").unwrap();
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
let tempdir_path = PathBuf::from(tempdir.path());
|
||||||
let write_index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
||||||
let read_index = Index::open_in_dir(&tempdir_path).unwrap();
|
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let reader = read_index
|
writer.commit().unwrap();
|
||||||
.reader_builder()
|
let reader = index
|
||||||
.reload_policy(ReloadPolicy::OnCommit)
|
.reader_builder()
|
||||||
.try_into()
|
.reload_policy(ReloadPolicy::OnCommit)
|
||||||
.unwrap();
|
.try_into()
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
.unwrap();
|
||||||
let mut writer = write_index.writer_with_num_threads(1, 3_000_000).unwrap();
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_manual_policy_mmap() {
|
||||||
|
let schema = throw_away_schema();
|
||||||
|
let field = schema.get_field("num_likes").unwrap();
|
||||||
|
let index = Index::create_from_tempdir(schema).unwrap();
|
||||||
|
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
writer.commit().unwrap();
|
||||||
|
let reader = index
|
||||||
|
.reader_builder()
|
||||||
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
|
.try_into()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
|
writer.add_document(doc!(field=>1u64));
|
||||||
|
writer.commit().unwrap();
|
||||||
|
thread::sleep(Duration::from_millis(500));
|
||||||
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
|
reader.reload().unwrap();
|
||||||
|
assert_eq!(reader.searcher().num_docs(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_on_commit_reload_policy_different_directories() {
|
||||||
|
let schema = throw_away_schema();
|
||||||
|
let field = schema.get_field("num_likes").unwrap();
|
||||||
|
let tempdir = TempDir::new("index").unwrap();
|
||||||
|
let tempdir_path = PathBuf::from(tempdir.path());
|
||||||
|
let write_index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
||||||
|
let read_index = Index::open_in_dir(&tempdir_path).unwrap();
|
||||||
|
let reader = read_index
|
||||||
|
.reader_builder()
|
||||||
|
.reload_policy(ReloadPolicy::OnCommit)
|
||||||
|
.try_into()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
|
let mut writer = write_index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_index_on_commit_reload_policy_aux(
|
fn test_index_on_commit_reload_policy_aux(
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use core::SegmentMeta;
|
|||||||
use schema::Schema;
|
use schema::Schema;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
use Opstamp;
|
||||||
|
|
||||||
/// Meta information about the `Index`.
|
/// Meta information about the `Index`.
|
||||||
///
|
///
|
||||||
@@ -15,7 +16,7 @@ use std::fmt;
|
|||||||
pub struct IndexMeta {
|
pub struct IndexMeta {
|
||||||
pub segments: Vec<SegmentMeta>,
|
pub segments: Vec<SegmentMeta>,
|
||||||
pub schema: Schema,
|
pub schema: Schema,
|
||||||
pub opstamp: u64,
|
pub opstamp: Opstamp,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub payload: Option<String>,
|
pub payload: Option<String>,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ impl Searcher {
|
|||||||
) -> Searcher {
|
) -> Searcher {
|
||||||
let store_readers = segment_readers
|
let store_readers = segment_readers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|segment_reader| segment_reader.get_store_reader())
|
.map(SegmentReader::get_store_reader)
|
||||||
.collect();
|
.collect();
|
||||||
Searcher {
|
Searcher {
|
||||||
schema,
|
schema,
|
||||||
@@ -218,7 +218,7 @@ impl fmt::Debug for Searcher {
|
|||||||
let segment_ids = self
|
let segment_ids = self
|
||||||
.segment_readers
|
.segment_readers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|segment_reader| segment_reader.segment_id())
|
.map(SegmentReader::segment_id)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
write!(f, "Searcher({:?})", segment_ids)
|
write!(f, "Searcher({:?})", segment_ids)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ use schema::Schema;
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::result;
|
use std::result;
|
||||||
|
use Opstamp;
|
||||||
use Result;
|
use Result;
|
||||||
|
|
||||||
/// A segment is a piece of the index.
|
/// A segment is a piece of the index.
|
||||||
@@ -50,7 +51,7 @@ impl Segment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: u64) -> Segment {
|
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
||||||
Segment {
|
Segment {
|
||||||
index: self.index,
|
index: self.index,
|
||||||
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),
|
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use serde;
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use Opstamp;
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref INVENTORY: Inventory<InnerSegmentMeta> = { Inventory::new() };
|
static ref INVENTORY: Inventory<InnerSegmentMeta> = { Inventory::new() };
|
||||||
@@ -13,7 +14,7 @@ lazy_static! {
|
|||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
struct DeleteMeta {
|
struct DeleteMeta {
|
||||||
num_deleted_docs: u32,
|
num_deleted_docs: u32,
|
||||||
opstamp: u64,
|
opstamp: Opstamp,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `SegmentMeta` contains simple meta information about a segment.
|
/// `SegmentMeta` contains simple meta information about a segment.
|
||||||
@@ -136,9 +137,9 @@ impl SegmentMeta {
|
|||||||
self.max_doc() - self.num_deleted_docs()
|
self.max_doc() - self.num_deleted_docs()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the opstamp of the last delete operation
|
/// Returns the `Opstamp` of the last delete operation
|
||||||
/// taken in account in this segment.
|
/// taken in account in this segment.
|
||||||
pub fn delete_opstamp(&self) -> Option<u64> {
|
pub fn delete_opstamp(&self) -> Option<Opstamp> {
|
||||||
self.tracked
|
self.tracked
|
||||||
.deletes
|
.deletes
|
||||||
.as_ref()
|
.as_ref()
|
||||||
@@ -152,7 +153,7 @@ impl SegmentMeta {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: u64) -> SegmentMeta {
|
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
|
||||||
let delete_meta = DeleteMeta {
|
let delete_meta = DeleteMeta {
|
||||||
num_deleted_docs,
|
num_deleted_docs,
|
||||||
opstamp,
|
opstamp,
|
||||||
|
|||||||
@@ -5,14 +5,10 @@ use core::Segment;
|
|||||||
use core::SegmentComponent;
|
use core::SegmentComponent;
|
||||||
use core::SegmentId;
|
use core::SegmentId;
|
||||||
use directory::ReadOnlySource;
|
use directory::ReadOnlySource;
|
||||||
use error::TantivyError;
|
|
||||||
use fastfield::DeleteBitSet;
|
use fastfield::DeleteBitSet;
|
||||||
use fastfield::FacetReader;
|
use fastfield::FacetReader;
|
||||||
use fastfield::FastFieldReader;
|
use fastfield::FastFieldReaders;
|
||||||
use fastfield::{self, FastFieldNotAvailableError};
|
|
||||||
use fastfield::{BytesFastFieldReader, FastValue, MultiValueIntFastFieldReader};
|
|
||||||
use fieldnorm::FieldNormReader;
|
use fieldnorm::FieldNormReader;
|
||||||
use schema::Cardinality;
|
|
||||||
use schema::Field;
|
use schema::Field;
|
||||||
use schema::FieldType;
|
use schema::FieldType;
|
||||||
use schema::Schema;
|
use schema::Schema;
|
||||||
@@ -51,7 +47,7 @@ pub struct SegmentReader {
|
|||||||
postings_composite: CompositeFile,
|
postings_composite: CompositeFile,
|
||||||
positions_composite: CompositeFile,
|
positions_composite: CompositeFile,
|
||||||
positions_idx_composite: CompositeFile,
|
positions_idx_composite: CompositeFile,
|
||||||
fast_fields_composite: CompositeFile,
|
fast_fields_readers: Arc<FastFieldReaders>,
|
||||||
fieldnorms_composite: CompositeFile,
|
fieldnorms_composite: CompositeFile,
|
||||||
|
|
||||||
store_source: ReadOnlySource,
|
store_source: ReadOnlySource,
|
||||||
@@ -105,93 +101,21 @@ impl SegmentReader {
|
|||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// May panic if the index is corrupted.
|
/// May panic if the index is corrupted.
|
||||||
pub fn fast_field_reader<Item: FastValue>(
|
pub fn fast_fields(&self) -> &FastFieldReaders {
|
||||||
&self,
|
&self.fast_fields_readers
|
||||||
field: Field,
|
|
||||||
) -> fastfield::Result<FastFieldReader<Item>> {
|
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
|
||||||
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::SingleValue)
|
|
||||||
{
|
|
||||||
self.fast_fields_composite
|
|
||||||
.open_read(field)
|
|
||||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
|
||||||
.map(FastFieldReader::open)
|
|
||||||
} else {
|
|
||||||
Err(FastFieldNotAvailableError::new(field_entry))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn fast_field_reader_with_idx<Item: FastValue>(
|
|
||||||
&self,
|
|
||||||
field: Field,
|
|
||||||
idx: usize,
|
|
||||||
) -> fastfield::Result<FastFieldReader<Item>> {
|
|
||||||
if let Some(ff_source) = self.fast_fields_composite.open_read_with_idx(field, idx) {
|
|
||||||
Ok(FastFieldReader::open(ff_source))
|
|
||||||
} else {
|
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
|
||||||
Err(FastFieldNotAvailableError::new(field_entry))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Accessor to the `MultiValueIntFastFieldReader` associated to a given `Field`.
|
|
||||||
/// May panick if the field is not a multivalued fastfield of the type `Item`.
|
|
||||||
pub fn multi_fast_field_reader<Item: FastValue>(
|
|
||||||
&self,
|
|
||||||
field: Field,
|
|
||||||
) -> fastfield::Result<MultiValueIntFastFieldReader<Item>> {
|
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
|
||||||
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::MultiValues)
|
|
||||||
{
|
|
||||||
let idx_reader = self.fast_field_reader_with_idx(field, 0)?;
|
|
||||||
let vals_reader = self.fast_field_reader_with_idx(field, 1)?;
|
|
||||||
Ok(MultiValueIntFastFieldReader::open(idx_reader, vals_reader))
|
|
||||||
} else {
|
|
||||||
Err(FastFieldNotAvailableError::new(field_entry))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Accessor to the `BytesFastFieldReader` associated to a given `Field`.
|
|
||||||
pub fn bytes_fast_field_reader(&self, field: Field) -> fastfield::Result<BytesFastFieldReader> {
|
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
|
||||||
match *field_entry.field_type() {
|
|
||||||
FieldType::Bytes => {}
|
|
||||||
_ => return Err(FastFieldNotAvailableError::new(field_entry)),
|
|
||||||
}
|
|
||||||
let idx_reader = self
|
|
||||||
.fast_fields_composite
|
|
||||||
.open_read_with_idx(field, 0)
|
|
||||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
|
||||||
.map(FastFieldReader::open)?;
|
|
||||||
let values = self
|
|
||||||
.fast_fields_composite
|
|
||||||
.open_read_with_idx(field, 1)
|
|
||||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
|
||||||
Ok(BytesFastFieldReader::open(idx_reader, values))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the `FacetReader` associated to a given `Field`.
|
/// Accessor to the `FacetReader` associated to a given `Field`.
|
||||||
pub fn facet_reader(&self, field: Field) -> Result<FacetReader> {
|
pub fn facet_reader(&self, field: Field) -> Option<FacetReader> {
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
||||||
return Err(TantivyError::InvalidArgument(format!(
|
return None;
|
||||||
"The field {:?} is not a \
|
|
||||||
hierarchical facet.",
|
|
||||||
field_entry
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
let term_ords_reader = self.multi_fast_field_reader(field)?;
|
let term_ords_reader = self.fast_fields().u64s(field)?;
|
||||||
let termdict_source = self.termdict_composite.open_read(field).ok_or_else(|| {
|
let termdict_source = self.termdict_composite.open_read(field)?;
|
||||||
TantivyError::InvalidArgument(format!(
|
|
||||||
"The field \"{}\" is a hierarchical \
|
|
||||||
but this segment does not seem to have the field term \
|
|
||||||
dictionary.",
|
|
||||||
field_entry.name()
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
let termdict = TermDictionary::from_source(&termdict_source);
|
let termdict = TermDictionary::from_source(&termdict_source);
|
||||||
let facet_reader = FacetReader::new(term_ords_reader, termdict);
|
let facet_reader = FacetReader::new(term_ords_reader, termdict);
|
||||||
Ok(facet_reader)
|
Some(facet_reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the segment's `Field norms`'s reader.
|
/// Accessor to the segment's `Field norms`'s reader.
|
||||||
@@ -247,8 +171,12 @@ impl SegmentReader {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let schema = segment.schema();
|
||||||
|
|
||||||
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
||||||
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
||||||
|
let fast_field_readers =
|
||||||
|
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
|
||||||
|
|
||||||
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||||
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
||||||
@@ -260,14 +188,13 @@ impl SegmentReader {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let schema = segment.schema();
|
|
||||||
Ok(SegmentReader {
|
Ok(SegmentReader {
|
||||||
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||||
max_doc: segment.meta().max_doc(),
|
max_doc: segment.meta().max_doc(),
|
||||||
num_docs: segment.meta().num_docs(),
|
num_docs: segment.meta().num_docs(),
|
||||||
termdict_composite,
|
termdict_composite,
|
||||||
postings_composite,
|
postings_composite,
|
||||||
fast_fields_composite,
|
fast_fields_readers: fast_field_readers,
|
||||||
fieldnorms_composite,
|
fieldnorms_composite,
|
||||||
segment_id: segment.id(),
|
segment_id: segment.id(),
|
||||||
store_source,
|
store_source,
|
||||||
@@ -381,12 +308,12 @@ impl SegmentReader {
|
|||||||
self.postings_composite.space_usage(),
|
self.postings_composite.space_usage(),
|
||||||
self.positions_composite.space_usage(),
|
self.positions_composite.space_usage(),
|
||||||
self.positions_idx_composite.space_usage(),
|
self.positions_idx_composite.space_usage(),
|
||||||
self.fast_fields_composite.space_usage(),
|
self.fast_fields_readers.space_usage(),
|
||||||
self.fieldnorms_composite.space_usage(),
|
self.fieldnorms_composite.space_usage(),
|
||||||
self.get_store_reader().space_usage(),
|
self.get_store_reader().space_usage(),
|
||||||
self.delete_bitset_opt
|
self.delete_bitset_opt
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|x| x.space_usage())
|
.map(DeleteBitSet::space_usage)
|
||||||
.unwrap_or(0),
|
.unwrap_or(0),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,14 +48,14 @@ impl RetryPolicy {
|
|||||||
///
|
///
|
||||||
/// It is transparently associated to a lock file, that gets deleted
|
/// It is transparently associated to a lock file, that gets deleted
|
||||||
/// on `Drop.` The lock is released automatically on `Drop`.
|
/// on `Drop.` The lock is released automatically on `Drop`.
|
||||||
pub struct DirectoryLock(Box<Drop + Send + 'static>);
|
pub struct DirectoryLock(Box<Drop + Send + Sync + 'static>);
|
||||||
|
|
||||||
struct DirectoryLockGuard {
|
struct DirectoryLockGuard {
|
||||||
directory: Box<Directory>,
|
directory: Box<Directory>,
|
||||||
path: PathBuf,
|
path: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Drop + Send + 'static> From<Box<T>> for DirectoryLock {
|
impl<T: Drop + Send + Sync + 'static> From<Box<T>> for DirectoryLock {
|
||||||
fn from(underlying: Box<T>) -> Self {
|
fn from(underlying: Box<T>) -> Self {
|
||||||
DirectoryLock(underlying)
|
DirectoryLock(underlying)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -260,95 +260,98 @@ impl Clone for ManagedDirectory {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::*;
|
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
use directory::MmapDirectory;
|
mod mmap_specific {
|
||||||
use std::io::Write;
|
|
||||||
use std::path::Path;
|
|
||||||
use tempdir::TempDir;
|
|
||||||
|
|
||||||
lazy_static! {
|
use super::super::*;
|
||||||
static ref TEST_PATH1: &'static Path = Path::new("some_path_for_test");
|
use std::path::Path;
|
||||||
static ref TEST_PATH2: &'static Path = Path::new("some_path_for_test2");
|
use tempdir::TempDir;
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
lazy_static! {
|
||||||
#[cfg(feature = "mmap")]
|
static ref TEST_PATH1: &'static Path = Path::new("some_path_for_test");
|
||||||
fn test_managed_directory() {
|
static ref TEST_PATH2: &'static Path = Path::new("some_path_for_test2");
|
||||||
let tempdir = TempDir::new("index").unwrap();
|
}
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
|
||||||
{
|
use directory::MmapDirectory;
|
||||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
use std::io::Write;
|
||||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
|
||||||
|
#[test]
|
||||||
|
fn test_managed_directory() {
|
||||||
|
let tempdir = TempDir::new("index").unwrap();
|
||||||
|
let tempdir_path = PathBuf::from(tempdir.path());
|
||||||
{
|
{
|
||||||
let mut write_file = managed_directory.open_write(*TEST_PATH1).unwrap();
|
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||||
write_file.flush().unwrap();
|
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||||
|
{
|
||||||
|
let mut write_file = managed_directory.open_write(*TEST_PATH1).unwrap();
|
||||||
|
write_file.flush().unwrap();
|
||||||
|
}
|
||||||
|
{
|
||||||
|
managed_directory
|
||||||
|
.atomic_write(*TEST_PATH2, &vec![0u8, 1u8])
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
{
|
||||||
|
assert!(managed_directory.exists(*TEST_PATH1));
|
||||||
|
assert!(managed_directory.exists(*TEST_PATH2));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let living_files: HashSet<PathBuf> =
|
||||||
|
[TEST_PATH1.to_owned()].into_iter().cloned().collect();
|
||||||
|
managed_directory.garbage_collect(|| living_files);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
assert!(managed_directory.exists(*TEST_PATH1));
|
||||||
|
assert!(!managed_directory.exists(*TEST_PATH2));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
managed_directory
|
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||||
.atomic_write(*TEST_PATH2, &vec![0u8, 1u8])
|
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||||
.unwrap();
|
{
|
||||||
}
|
assert!(managed_directory.exists(*TEST_PATH1));
|
||||||
{
|
assert!(!managed_directory.exists(*TEST_PATH2));
|
||||||
assert!(managed_directory.exists(*TEST_PATH1));
|
}
|
||||||
assert!(managed_directory.exists(*TEST_PATH2));
|
{
|
||||||
}
|
let living_files: HashSet<PathBuf> = HashSet::new();
|
||||||
{
|
managed_directory.garbage_collect(|| living_files);
|
||||||
let living_files: HashSet<PathBuf> =
|
}
|
||||||
[TEST_PATH1.to_owned()].into_iter().cloned().collect();
|
{
|
||||||
managed_directory.garbage_collect(|| living_files);
|
assert!(!managed_directory.exists(*TEST_PATH1));
|
||||||
}
|
assert!(!managed_directory.exists(*TEST_PATH2));
|
||||||
{
|
}
|
||||||
assert!(managed_directory.exists(*TEST_PATH1));
|
|
||||||
assert!(!managed_directory.exists(*TEST_PATH2));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
|
||||||
|
#[test]
|
||||||
|
fn test_managed_directory_gc_while_mmapped() {
|
||||||
|
let tempdir = TempDir::new("index").unwrap();
|
||||||
|
let tempdir_path = PathBuf::from(tempdir.path());
|
||||||
|
let living_files = HashSet::new();
|
||||||
|
|
||||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||||
{
|
managed_directory
|
||||||
assert!(managed_directory.exists(*TEST_PATH1));
|
.atomic_write(*TEST_PATH1, &vec![0u8, 1u8])
|
||||||
assert!(!managed_directory.exists(*TEST_PATH2));
|
.unwrap();
|
||||||
}
|
|
||||||
{
|
|
||||||
let living_files: HashSet<PathBuf> = HashSet::new();
|
|
||||||
managed_directory.garbage_collect(|| living_files);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assert!(!managed_directory.exists(*TEST_PATH1));
|
|
||||||
assert!(!managed_directory.exists(*TEST_PATH2));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[cfg(feature = "mmap ")]
|
|
||||||
fn test_managed_directory_gc_while_mmapped() {
|
|
||||||
let tempdir = TempDir::new("index").unwrap();
|
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
|
||||||
let living_files = HashSet::new();
|
|
||||||
|
|
||||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
|
||||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
|
||||||
managed_directory
|
|
||||||
.atomic_write(*TEST_PATH1, &vec![0u8, 1u8])
|
|
||||||
.unwrap();
|
|
||||||
assert!(managed_directory.exists(*TEST_PATH1));
|
|
||||||
|
|
||||||
let _mmap_read = managed_directory.open_read(*TEST_PATH1).unwrap();
|
|
||||||
managed_directory.garbage_collect(|| living_files.clone());
|
|
||||||
if cfg!(target_os = "windows") {
|
|
||||||
// On Windows, gc should try and fail the file as it is mmapped.
|
|
||||||
assert!(managed_directory.exists(*TEST_PATH1));
|
assert!(managed_directory.exists(*TEST_PATH1));
|
||||||
// unmap should happen here.
|
|
||||||
drop(_mmap_read);
|
let _mmap_read = managed_directory.open_read(*TEST_PATH1).unwrap();
|
||||||
// The file should still be in the list of managed file and
|
managed_directory.garbage_collect(|| living_files.clone());
|
||||||
// eventually be deleted once mmap is released.
|
if cfg!(target_os = "windows") {
|
||||||
managed_directory.garbage_collect(|| living_files);
|
// On Windows, gc should try and fail the file as it is mmapped.
|
||||||
assert!(!managed_directory.exists(*TEST_PATH1));
|
assert!(managed_directory.exists(*TEST_PATH1));
|
||||||
} else {
|
// unmap should happen here.
|
||||||
assert!(!managed_directory.exists(*TEST_PATH1));
|
drop(_mmap_read);
|
||||||
|
// The file should still be in the list of managed file and
|
||||||
|
// eventually be deleted once mmap is released.
|
||||||
|
managed_directory.garbage_collect(|| living_files);
|
||||||
|
assert!(!managed_directory.exists(*TEST_PATH1));
|
||||||
|
} else {
|
||||||
|
assert!(!managed_directory.exists(*TEST_PATH1));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ use self::notify::RawEvent;
|
|||||||
use self::notify::RecursiveMode;
|
use self::notify::RecursiveMode;
|
||||||
use self::notify::Watcher;
|
use self::notify::Watcher;
|
||||||
use atomicwrites;
|
use atomicwrites;
|
||||||
use common::make_io_err;
|
|
||||||
use core::META_FILEPATH;
|
use core::META_FILEPATH;
|
||||||
use directory::error::LockError;
|
use directory::error::LockError;
|
||||||
use directory::error::{DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
use directory::error::{DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||||
@@ -37,6 +36,11 @@ use std::sync::Weak;
|
|||||||
use std::thread;
|
use std::thread;
|
||||||
use tempdir::TempDir;
|
use tempdir::TempDir;
|
||||||
|
|
||||||
|
/// Create a default io error given a string.
|
||||||
|
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||||
|
io::Error::new(io::ErrorKind::Other, msg)
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns None iff the file exists, can be read, but is empty (and hence
|
/// Returns None iff the file exists, can be read, but is empty (and hence
|
||||||
/// cannot be mmapped)
|
/// cannot be mmapped)
|
||||||
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ impl InnerDirectory {
|
|||||||
self.fs
|
self.fs
|
||||||
.get(path)
|
.get(path)
|
||||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
||||||
.map(|el| el.clone())
|
.map(Clone::clone)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete(&mut self, path: &Path) -> result::Result<(), DeleteError> {
|
fn delete(&mut self, path: &Path) -> result::Result<(), DeleteError> {
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use common::BitSet;
|
use common::BitSet;
|
||||||
|
use fastfield::DeleteBitSet;
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
@@ -95,9 +96,23 @@ pub trait DocSet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number documents matching.
|
/// Returns the number documents matching.
|
||||||
///
|
|
||||||
/// Calling this method consumes the `DocSet`.
|
/// Calling this method consumes the `DocSet`.
|
||||||
fn count(&mut self) -> u32 {
|
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||||
|
let mut count = 0u32;
|
||||||
|
while self.advance() {
|
||||||
|
if !delete_bitset.is_deleted(self.doc()) {
|
||||||
|
count += 1u32;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
count
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the count of documents, deleted or not.
|
||||||
|
/// Calling this method consumes the `DocSet`.
|
||||||
|
///
|
||||||
|
/// Of course, the result is an upper bound of the result
|
||||||
|
/// given by `count()`.
|
||||||
|
fn count_including_deleted(&mut self) -> u32 {
|
||||||
let mut count = 0u32;
|
let mut count = 0u32;
|
||||||
while self.advance() {
|
while self.advance() {
|
||||||
count += 1u32;
|
count += 1u32;
|
||||||
@@ -127,9 +142,14 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
|||||||
unboxed.size_hint()
|
unboxed.size_hint()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn count(&mut self) -> u32 {
|
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
unboxed.count()
|
unboxed.count(delete_bitset)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn count_including_deleted(&mut self) -> u32 {
|
||||||
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
|
unboxed.count_including_deleted()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||||
|
|||||||
@@ -23,14 +23,14 @@ mod tests {
|
|||||||
index_writer.add_document(doc!(field=>vec![0u8; 1000]));
|
index_writer.add_document(doc!(field=>vec![0u8; 1000]));
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let bytes_reader = reader.bytes_fast_field_reader(field).unwrap();
|
let bytes_reader = segment_reader.fast_fields().bytes(field).unwrap();
|
||||||
|
|
||||||
assert_eq!(bytes_reader.get_val(0), &[0u8, 1, 2, 3]);
|
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
|
||||||
assert!(bytes_reader.get_val(1).is_empty());
|
assert!(bytes_reader.get_bytes(1).is_empty());
|
||||||
assert_eq!(bytes_reader.get_val(2), &[255u8]);
|
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
|
||||||
assert_eq!(bytes_reader.get_val(3), &[1u8, 3, 5, 7, 9]);
|
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
|
||||||
let long = vec![0u8; 1000];
|
let long = vec![0u8; 1000];
|
||||||
assert_eq!(bytes_reader.get_val(4), long.as_slice());
|
assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ use DocId;
|
|||||||
///
|
///
|
||||||
/// Reading the value for a document is done by reading the start index for it,
|
/// Reading the value for a document is done by reading the start index for it,
|
||||||
/// and the start index for the next document, and keeping the bytes in between.
|
/// and the start index for the next document, and keeping the bytes in between.
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct BytesFastFieldReader {
|
pub struct BytesFastFieldReader {
|
||||||
idx_reader: FastFieldReader<u64>,
|
idx_reader: FastFieldReader<u64>,
|
||||||
values: OwningRef<ReadOnlySource, [u8]>,
|
values: OwningRef<ReadOnlySource, [u8]>,
|
||||||
@@ -28,10 +29,20 @@ impl BytesFastFieldReader {
|
|||||||
BytesFastFieldReader { idx_reader, values }
|
BytesFastFieldReader { idx_reader, values }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the bytes associated to the given `doc`
|
fn range(&self, doc: DocId) -> (usize, usize) {
|
||||||
pub fn get_val(&self, doc: DocId) -> &[u8] {
|
|
||||||
let start = self.idx_reader.get(doc) as usize;
|
let start = self.idx_reader.get(doc) as usize;
|
||||||
let stop = self.idx_reader.get(doc + 1) as usize;
|
let stop = self.idx_reader.get(doc + 1) as usize;
|
||||||
|
(start, stop)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the bytes associated to the given `doc`
|
||||||
|
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
||||||
|
let (start, stop) = self.range(doc);
|
||||||
&self.values[start..stop]
|
&self.values[start..stop]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the overall number of bytes in this bytes fast field.
|
||||||
|
pub fn total_num_bytes(&self) -> usize {
|
||||||
|
self.values.len()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,16 +53,18 @@ impl DeleteBitSet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns whether the document has been marked as deleted.
|
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
||||||
|
pub fn is_alive(&self, doc: DocId) -> bool {
|
||||||
|
!self.is_deleted(doc)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true iff the document has been marked as deleted.
|
||||||
|
#[inline(always)]
|
||||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||||
if self.len == 0 {
|
let byte_offset = doc / 8u32;
|
||||||
false
|
let b: u8 = (*self.data)[byte_offset as usize];
|
||||||
} else {
|
let shift = (doc & 7u32) as u8;
|
||||||
let byte_offset = doc / 8u32;
|
b & (1u8 << shift) != 0
|
||||||
let b: u8 = (*self.data)[byte_offset as usize];
|
|
||||||
let shift = (doc & 7u32) as u8;
|
|
||||||
b & (1u8 << shift) != 0
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Summarize total space usage of this bitset.
|
/// Summarize total space usage of this bitset.
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ pub use self::error::{FastFieldNotAvailableError, Result};
|
|||||||
pub use self::facet_reader::FacetReader;
|
pub use self::facet_reader::FacetReader;
|
||||||
pub use self::multivalued::{MultiValueIntFastFieldReader, MultiValueIntFastFieldWriter};
|
pub use self::multivalued::{MultiValueIntFastFieldReader, MultiValueIntFastFieldWriter};
|
||||||
pub use self::reader::FastFieldReader;
|
pub use self::reader::FastFieldReader;
|
||||||
|
pub use self::readers::FastFieldReaders;
|
||||||
pub use self::serializer::FastFieldSerializer;
|
pub use self::serializer::FastFieldSerializer;
|
||||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||||
use common;
|
use common;
|
||||||
@@ -43,6 +44,7 @@ mod error;
|
|||||||
mod facet_reader;
|
mod facet_reader;
|
||||||
mod multivalued;
|
mod multivalued;
|
||||||
mod reader;
|
mod reader;
|
||||||
|
mod readers;
|
||||||
mod serializer;
|
mod serializer;
|
||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
@@ -78,10 +80,6 @@ impl FastValue for u64 {
|
|||||||
*self
|
*self
|
||||||
}
|
}
|
||||||
|
|
||||||
fn as_u64(&self) -> u64 {
|
|
||||||
*self
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||||
match *field_type {
|
match *field_type {
|
||||||
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||||
@@ -89,6 +87,10 @@ impl FastValue for u64 {
|
|||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn as_u64(&self) -> u64 {
|
||||||
|
*self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for i64 {
|
impl FastValue for i64 {
|
||||||
|
|||||||
@@ -37,9 +37,7 @@ mod tests {
|
|||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let mut vals = Vec::new();
|
let mut vals = Vec::new();
|
||||||
let multi_value_reader = segment_reader
|
let multi_value_reader = segment_reader.fast_fields().u64s(field).unwrap();
|
||||||
.multi_fast_field_reader::<u64>(field)
|
|
||||||
.unwrap();
|
|
||||||
{
|
{
|
||||||
multi_value_reader.get_vals(2, &mut vals);
|
multi_value_reader.get_vals(2, &mut vals);
|
||||||
assert_eq!(&vals, &[4u64]);
|
assert_eq!(&vals, &[4u64]);
|
||||||
@@ -198,9 +196,9 @@ mod tests {
|
|||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
|
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let mut vals = Vec::new();
|
let mut vals = Vec::new();
|
||||||
let multi_value_reader = reader.multi_fast_field_reader::<i64>(field).unwrap();
|
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
|
||||||
{
|
{
|
||||||
multi_value_reader.get_vals(2, &mut vals);
|
multi_value_reader.get_vals(2, &mut vals);
|
||||||
assert_eq!(&vals, &[-4i64]);
|
assert_eq!(&vals, &[-4i64]);
|
||||||
|
|||||||
@@ -26,6 +26,13 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn into_u64s_reader(self) -> MultiValueIntFastFieldReader<u64> {
|
||||||
|
MultiValueIntFastFieldReader {
|
||||||
|
idx_reader: self.idx_reader,
|
||||||
|
vals_reader: self.vals_reader.into_u64_reader(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns `(start, stop)`, such that the values associated
|
/// Returns `(start, stop)`, such that the values associated
|
||||||
/// to the given document are `start..stop`.
|
/// to the given document are `start..stop`.
|
||||||
fn range(&self, doc: DocId) -> (u64, u64) {
|
fn range(&self, doc: DocId) -> (u64, u64) {
|
||||||
@@ -41,13 +48,24 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
|
|||||||
vals.resize(len, Item::default());
|
vals.resize(len, Item::default());
|
||||||
self.vals_reader.get_range_u64(start, &mut vals[..]);
|
self.vals_reader.get_range_u64(start, &mut vals[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the number of values associated with the document `DocId`.
|
||||||
|
pub fn num_vals(&self, doc: DocId) -> usize {
|
||||||
|
let (start, stop) = self.range(doc);
|
||||||
|
(stop - start) as usize
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the overall number of values in this field .
|
||||||
|
pub fn total_num_vals(&self) -> u64 {
|
||||||
|
self.idx_reader.max_value()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use core::Index;
|
use core::Index;
|
||||||
use schema::{Document, Facet, Schema};
|
use schema::{Facet, Schema};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_multifastfield_reader() {
|
fn test_multifastfield_reader() {
|
||||||
@@ -58,22 +76,12 @@ mod tests {
|
|||||||
let mut index_writer = index
|
let mut index_writer = index
|
||||||
.writer_with_num_threads(1, 30_000_000)
|
.writer_with_num_threads(1, 30_000_000)
|
||||||
.expect("Failed to create index writer.");
|
.expect("Failed to create index writer.");
|
||||||
{
|
index_writer.add_document(doc!(
|
||||||
let mut doc = Document::new();
|
facet_field => Facet::from("/category/cat2"),
|
||||||
doc.add_facet(facet_field, "/category/cat2");
|
facet_field => Facet::from("/category/cat1"),
|
||||||
doc.add_facet(facet_field, "/category/cat1");
|
));
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc!(facet_field => Facet::from("/category/cat2")));
|
||||||
}
|
index_writer.add_document(doc!(facet_field => Facet::from("/category/cat3")));
|
||||||
{
|
|
||||||
let mut doc = Document::new();
|
|
||||||
doc.add_facet(facet_field, "/category/cat2");
|
|
||||||
index_writer.add_document(doc);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let mut doc = Document::new();
|
|
||||||
doc.add_facet(facet_field, "/category/cat3");
|
|
||||||
index_writer.add_document(doc);
|
|
||||||
}
|
|
||||||
index_writer.commit().expect("Commit failed");
|
index_writer.commit().expect("Commit failed");
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
|||||||
@@ -50,6 +50,15 @@ impl<Item: FastValue> FastFieldReader<Item> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
|
||||||
|
FastFieldReader {
|
||||||
|
bit_unpacker: self.bit_unpacker,
|
||||||
|
min_value_u64: self.min_value_u64,
|
||||||
|
max_value_u64: self.max_value_u64,
|
||||||
|
_phantom: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Return the value associated to the given document.
|
/// Return the value associated to the given document.
|
||||||
///
|
///
|
||||||
/// This accessor should return as fast as possible.
|
/// This accessor should return as fast as possible.
|
||||||
|
|||||||
191
src/fastfield/readers.rs
Normal file
191
src/fastfield/readers.rs
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
use common::CompositeFile;
|
||||||
|
use fastfield::BytesFastFieldReader;
|
||||||
|
use fastfield::MultiValueIntFastFieldReader;
|
||||||
|
use fastfield::{FastFieldNotAvailableError, FastFieldReader};
|
||||||
|
use schema::{Cardinality, Field, FieldType, Schema};
|
||||||
|
use space_usage::PerFieldSpaceUsage;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use Result;
|
||||||
|
|
||||||
|
/// Provides access to all of the FastFieldReader.
|
||||||
|
///
|
||||||
|
/// Internally, `FastFieldReaders` have preloaded fast field readers,
|
||||||
|
/// and just wraps several `HashMap`.
|
||||||
|
pub struct FastFieldReaders {
|
||||||
|
fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
|
||||||
|
fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
|
||||||
|
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
|
||||||
|
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
|
||||||
|
fast_bytes: HashMap<Field, BytesFastFieldReader>,
|
||||||
|
fast_fields_composite: CompositeFile,
|
||||||
|
}
|
||||||
|
|
||||||
|
enum FastType {
|
||||||
|
I64,
|
||||||
|
U64,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
|
||||||
|
match field_type {
|
||||||
|
FieldType::U64(options) => options
|
||||||
|
.get_fastfield_cardinality()
|
||||||
|
.map(|cardinality| (FastType::U64, cardinality)),
|
||||||
|
FieldType::I64(options) => options
|
||||||
|
.get_fastfield_cardinality()
|
||||||
|
.map(|cardinality| (FastType::I64, cardinality)),
|
||||||
|
FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FastFieldReaders {
|
||||||
|
pub(crate) fn load_all(
|
||||||
|
schema: &Schema,
|
||||||
|
fast_fields_composite: &CompositeFile,
|
||||||
|
) -> Result<FastFieldReaders> {
|
||||||
|
let mut fast_field_readers = FastFieldReaders {
|
||||||
|
fast_field_i64: Default::default(),
|
||||||
|
fast_field_u64: Default::default(),
|
||||||
|
fast_field_i64s: Default::default(),
|
||||||
|
fast_field_u64s: Default::default(),
|
||||||
|
fast_bytes: Default::default(),
|
||||||
|
fast_fields_composite: fast_fields_composite.clone(),
|
||||||
|
};
|
||||||
|
for (field_id, field_entry) in schema.fields().iter().enumerate() {
|
||||||
|
let field = Field(field_id as u32);
|
||||||
|
let field_type = field_entry.field_type();
|
||||||
|
if field_type == &FieldType::Bytes {
|
||||||
|
let idx_reader = fast_fields_composite
|
||||||
|
.open_read_with_idx(field, 0)
|
||||||
|
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||||
|
.map(FastFieldReader::open)?;
|
||||||
|
let data = fast_fields_composite
|
||||||
|
.open_read_with_idx(field, 1)
|
||||||
|
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
||||||
|
fast_field_readers
|
||||||
|
.fast_bytes
|
||||||
|
.insert(field, BytesFastFieldReader::open(idx_reader, data));
|
||||||
|
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
|
||||||
|
match cardinality {
|
||||||
|
Cardinality::SingleValue => {
|
||||||
|
if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
|
||||||
|
match fast_type {
|
||||||
|
FastType::U64 => {
|
||||||
|
let fast_field_reader = FastFieldReader::open(fast_field_data);
|
||||||
|
fast_field_readers
|
||||||
|
.fast_field_u64
|
||||||
|
.insert(field, fast_field_reader);
|
||||||
|
}
|
||||||
|
FastType::I64 => {
|
||||||
|
fast_field_readers.fast_field_i64.insert(
|
||||||
|
field,
|
||||||
|
FastFieldReader::open(fast_field_data.clone()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Cardinality::MultiValues => {
|
||||||
|
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
|
||||||
|
let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
|
||||||
|
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
|
||||||
|
let idx_reader = FastFieldReader::open(fast_field_idx);
|
||||||
|
match fast_type {
|
||||||
|
FastType::I64 => {
|
||||||
|
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||||
|
let multivalued_int_fast_field =
|
||||||
|
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||||
|
fast_field_readers
|
||||||
|
.fast_field_i64s
|
||||||
|
.insert(field, multivalued_int_fast_field);
|
||||||
|
}
|
||||||
|
FastType::U64 => {
|
||||||
|
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||||
|
let multivalued_int_fast_field =
|
||||||
|
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||||
|
fast_field_readers
|
||||||
|
.fast_field_u64s
|
||||||
|
.insert(field, multivalued_int_fast_field);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(fast_field_readers)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||||
|
self.fast_fields_composite.space_usage()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `u64` fast field reader reader associated to `field`.
|
||||||
|
///
|
||||||
|
/// If `field` is not a u64 fast field, this method returns `None`.
|
||||||
|
pub fn u64(&self, field: Field) -> Option<FastFieldReader<u64>> {
|
||||||
|
self.fast_field_u64.get(&field).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// If the field is a u64-fast field return the associated reader.
|
||||||
|
/// If the field is a i64-fast field, return the associated u64 reader. Values are
|
||||||
|
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
|
||||||
|
///
|
||||||
|
/// This method is useful when merging segment reader.
|
||||||
|
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
|
||||||
|
if let Some(u64_ff_reader) = self.u64(field) {
|
||||||
|
return Some(u64_ff_reader);
|
||||||
|
}
|
||||||
|
if let Some(i64_ff_reader) = self.i64(field) {
|
||||||
|
return Some(i64_ff_reader.into_u64_reader());
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `i64` fast field reader reader associated to `field`.
|
||||||
|
///
|
||||||
|
/// If `field` is not a i64 fast field, this method returns `None`.
|
||||||
|
pub fn i64(&self, field: Field) -> Option<FastFieldReader<i64>> {
|
||||||
|
self.fast_field_i64.get(&field).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`.
|
||||||
|
///
|
||||||
|
/// If `field` is not a u64 multi-valued fast field, this method returns `None`.
|
||||||
|
pub fn u64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
|
||||||
|
self.fast_field_u64s.get(&field).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// If the field is a u64s-fast field return the associated reader.
|
||||||
|
/// If the field is a i64s-fast field, return the associated u64s reader. Values are
|
||||||
|
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping.
|
||||||
|
///
|
||||||
|
/// This method is useful when merging segment reader.
|
||||||
|
pub(crate) fn u64s_lenient(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
|
||||||
|
if let Some(u64s_ff_reader) = self.u64s(field) {
|
||||||
|
return Some(u64s_ff_reader);
|
||||||
|
}
|
||||||
|
if let Some(i64s_ff_reader) = self.i64s(field) {
|
||||||
|
return Some(i64s_ff_reader.into_u64s_reader());
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a `i64s` multi-valued fast field reader reader associated to `field`.
|
||||||
|
///
|
||||||
|
/// If `field` is not a i64 multi-valued fast field, this method returns `None`.
|
||||||
|
pub fn i64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<i64>> {
|
||||||
|
self.fast_field_i64s.get(&field).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `bytes` fast field reader associated to `field`.
|
||||||
|
///
|
||||||
|
/// If `field` is not a bytes fast field, returns `None`.
|
||||||
|
pub fn bytes(&self, field: Field) -> Option<BytesFastFieldReader> {
|
||||||
|
self.fast_bytes.get(&field).cloned()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -13,7 +13,6 @@ fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
#[ignore]
|
||||||
#[cfg(feature = "mmap")]
|
|
||||||
fn test_indexing() {
|
fn test_indexing() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use super::operation::DeleteOperation;
|
|||||||
use std::mem;
|
use std::mem;
|
||||||
use std::ops::DerefMut;
|
use std::ops::DerefMut;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
use Opstamp;
|
||||||
|
|
||||||
// The DeleteQueue is similar in conceptually to a multiple
|
// The DeleteQueue is similar in conceptually to a multiple
|
||||||
// consumer single producer broadcast channel.
|
// consumer single producer broadcast channel.
|
||||||
@@ -184,7 +185,7 @@ impl DeleteCursor {
|
|||||||
/// queue are consume and the next get will return None.
|
/// queue are consume and the next get will return None.
|
||||||
/// - the next get will return the first operation with an
|
/// - the next get will return the first operation with an
|
||||||
/// `opstamp >= target_opstamp`.
|
/// `opstamp >= target_opstamp`.
|
||||||
pub fn skip_to(&mut self, target_opstamp: u64) {
|
pub fn skip_to(&mut self, target_opstamp: Opstamp) {
|
||||||
// TODO Can be optimize as we work with block.
|
// TODO Can be optimize as we work with block.
|
||||||
while self.is_behind_opstamp(target_opstamp) {
|
while self.is_behind_opstamp(target_opstamp) {
|
||||||
self.advance();
|
self.advance();
|
||||||
@@ -192,7 +193,7 @@ impl DeleteCursor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
|
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
|
||||||
fn is_behind_opstamp(&mut self, target_opstamp: u64) -> bool {
|
fn is_behind_opstamp(&mut self, target_opstamp: Opstamp) -> bool {
|
||||||
self.get()
|
self.get()
|
||||||
.map(|operation| operation.opstamp < target_opstamp)
|
.map(|operation| operation.opstamp < target_opstamp)
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use DocId;
|
use DocId;
|
||||||
|
use Opstamp;
|
||||||
|
|
||||||
// Doc to opstamp is used to identify which
|
// Doc to opstamp is used to identify which
|
||||||
// document should be deleted.
|
// document should be deleted.
|
||||||
@@ -23,7 +24,7 @@ pub enum DocToOpstampMapping {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl From<Vec<u64>> for DocToOpstampMapping {
|
impl From<Vec<u64>> for DocToOpstampMapping {
|
||||||
fn from(opstamps: Vec<u64>) -> DocToOpstampMapping {
|
fn from(opstamps: Vec<Opstamp>) -> DocToOpstampMapping {
|
||||||
DocToOpstampMapping::WithMap(Arc::new(opstamps))
|
DocToOpstampMapping::WithMap(Arc::new(opstamps))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -35,7 +36,7 @@ impl DocToOpstampMapping {
|
|||||||
//
|
//
|
||||||
// The edge case opstamp = some doc opstamp is in practise
|
// The edge case opstamp = some doc opstamp is in practise
|
||||||
// never called.
|
// never called.
|
||||||
pub fn compute_doc_limit(&self, target_opstamp: u64) -> DocId {
|
pub fn compute_doc_limit(&self, target_opstamp: Opstamp) -> DocId {
|
||||||
match *self {
|
match *self {
|
||||||
DocToOpstampMapping::WithMap(ref doc_opstamps) => {
|
DocToOpstampMapping::WithMap(ref doc_opstamps) => {
|
||||||
match doc_opstamps.binary_search(&target_opstamp) {
|
match doc_opstamps.binary_search(&target_opstamp) {
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ use std::ops::Range;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
|
use Opstamp;
|
||||||
use Result;
|
use Result;
|
||||||
|
|
||||||
// Size of the margin for the heap. A segment is closed when the remaining memory
|
// Size of the margin for the heap. A segment is closed when the remaining memory
|
||||||
@@ -99,7 +100,7 @@ pub struct IndexWriter {
|
|||||||
delete_queue: DeleteQueue,
|
delete_queue: DeleteQueue,
|
||||||
|
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
committed_opstamp: u64,
|
committed_opstamp: Opstamp,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open a new index writer. Attempts to acquire a lockfile.
|
/// Open a new index writer. Attempts to acquire a lockfile.
|
||||||
@@ -177,7 +178,7 @@ pub fn compute_deleted_bitset(
|
|||||||
segment_reader: &SegmentReader,
|
segment_reader: &SegmentReader,
|
||||||
delete_cursor: &mut DeleteCursor,
|
delete_cursor: &mut DeleteCursor,
|
||||||
doc_opstamps: &DocToOpstampMapping,
|
doc_opstamps: &DocToOpstampMapping,
|
||||||
target_opstamp: u64,
|
target_opstamp: Opstamp,
|
||||||
) -> Result<bool> {
|
) -> Result<bool> {
|
||||||
let mut might_have_changed = false;
|
let mut might_have_changed = false;
|
||||||
|
|
||||||
@@ -219,7 +220,7 @@ pub fn compute_deleted_bitset(
|
|||||||
pub fn advance_deletes(
|
pub fn advance_deletes(
|
||||||
mut segment: Segment,
|
mut segment: Segment,
|
||||||
segment_entry: &mut SegmentEntry,
|
segment_entry: &mut SegmentEntry,
|
||||||
target_opstamp: u64,
|
target_opstamp: Opstamp,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
{
|
{
|
||||||
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
|
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
|
||||||
@@ -299,11 +300,11 @@ fn index_documents(
|
|||||||
// the worker thread.
|
// the worker thread.
|
||||||
assert!(num_docs > 0);
|
assert!(num_docs > 0);
|
||||||
|
|
||||||
let doc_opstamps: Vec<u64> = segment_writer.finalize()?;
|
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
|
||||||
|
|
||||||
let segment_meta = SegmentMeta::new(segment_id, num_docs);
|
let segment_meta = SegmentMeta::new(segment_id, num_docs);
|
||||||
|
|
||||||
let last_docstamp: u64 = *(doc_opstamps.last().unwrap());
|
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
||||||
|
|
||||||
let delete_bitset_opt = if delete_cursor.get().is_some() {
|
let delete_bitset_opt = if delete_cursor.get().is_some() {
|
||||||
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
|
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
|
||||||
@@ -494,7 +495,7 @@ impl IndexWriter {
|
|||||||
/// state as it was after the last commit.
|
/// state as it was after the last commit.
|
||||||
///
|
///
|
||||||
/// The opstamp at the last commit is returned.
|
/// The opstamp at the last commit is returned.
|
||||||
pub fn rollback(&mut self) -> Result<()> {
|
pub fn rollback(&mut self) -> Result<Opstamp> {
|
||||||
info!("Rolling back to opstamp {}", self.committed_opstamp);
|
info!("Rolling back to opstamp {}", self.committed_opstamp);
|
||||||
|
|
||||||
// marks the segment updater as killed. From now on, all
|
// marks the segment updater as killed. From now on, all
|
||||||
@@ -529,7 +530,7 @@ impl IndexWriter {
|
|||||||
// was dropped with the index_writer.
|
// was dropped with the index_writer.
|
||||||
for _ in document_receiver.clone() {}
|
for _ in document_receiver.clone() {}
|
||||||
|
|
||||||
Ok(())
|
Ok(self.committed_opstamp)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prepares a commit.
|
/// Prepares a commit.
|
||||||
@@ -567,7 +568,7 @@ impl IndexWriter {
|
|||||||
info!("Preparing commit");
|
info!("Preparing commit");
|
||||||
|
|
||||||
// this will drop the current document channel
|
// this will drop the current document channel
|
||||||
// and recreate a new one channels.
|
// and recreate a new one.
|
||||||
self.recreate_document_channel();
|
self.recreate_document_channel();
|
||||||
|
|
||||||
let former_workers_join_handle = mem::replace(&mut self.workers_join_handle, Vec::new());
|
let former_workers_join_handle = mem::replace(&mut self.workers_join_handle, Vec::new());
|
||||||
@@ -601,7 +602,7 @@ impl IndexWriter {
|
|||||||
/// Commit returns the `opstamp` of the last document
|
/// Commit returns the `opstamp` of the last document
|
||||||
/// that made it in the commit.
|
/// that made it in the commit.
|
||||||
///
|
///
|
||||||
pub fn commit(&mut self) -> Result<u64> {
|
pub fn commit(&mut self) -> Result<Opstamp> {
|
||||||
self.prepare_commit()?.commit()
|
self.prepare_commit()?.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -617,7 +618,7 @@ impl IndexWriter {
|
|||||||
///
|
///
|
||||||
/// Like adds, the deletion itself will be visible
|
/// Like adds, the deletion itself will be visible
|
||||||
/// only after calling `commit()`.
|
/// only after calling `commit()`.
|
||||||
pub fn delete_term(&mut self, term: Term) -> u64 {
|
pub fn delete_term(&self, term: Term) -> Opstamp {
|
||||||
let opstamp = self.stamper.stamp();
|
let opstamp = self.stamper.stamp();
|
||||||
let delete_operation = DeleteOperation { opstamp, term };
|
let delete_operation = DeleteOperation { opstamp, term };
|
||||||
self.delete_queue.push(delete_operation);
|
self.delete_queue.push(delete_operation);
|
||||||
@@ -631,7 +632,7 @@ impl IndexWriter {
|
|||||||
///
|
///
|
||||||
/// This is also the opstamp of the commit that is currently
|
/// This is also the opstamp of the commit that is currently
|
||||||
/// available for searchers.
|
/// available for searchers.
|
||||||
pub fn commit_opstamp(&self) -> u64 {
|
pub fn commit_opstamp(&self) -> Opstamp {
|
||||||
self.committed_opstamp
|
self.committed_opstamp
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -645,7 +646,7 @@ impl IndexWriter {
|
|||||||
///
|
///
|
||||||
/// Currently it represents the number of documents that
|
/// Currently it represents the number of documents that
|
||||||
/// have been added since the creation of the index.
|
/// have been added since the creation of the index.
|
||||||
pub fn add_document(&mut self, document: Document) -> u64 {
|
pub fn add_document(&self, document: Document) -> Opstamp {
|
||||||
let opstamp = self.stamper.stamp();
|
let opstamp = self.stamper.stamp();
|
||||||
let add_operation = AddOperation { opstamp, document };
|
let add_operation = AddOperation { opstamp, document };
|
||||||
let send_result = self.operation_sender.send(vec![add_operation]);
|
let send_result = self.operation_sender.send(vec![add_operation]);
|
||||||
@@ -662,7 +663,7 @@ impl IndexWriter {
|
|||||||
/// The total number of stamps generated by this method is `count + 1`;
|
/// The total number of stamps generated by this method is `count + 1`;
|
||||||
/// each operation gets a stamp from the `stamps` iterator and `last_opstamp`
|
/// each operation gets a stamp from the `stamps` iterator and `last_opstamp`
|
||||||
/// is for the batch itself.
|
/// is for the batch itself.
|
||||||
fn get_batch_opstamps(&mut self, count: u64) -> (u64, Range<u64>) {
|
fn get_batch_opstamps(&self, count: Opstamp) -> (Opstamp, Range<Opstamp>) {
|
||||||
let Range { start, end } = self.stamper.stamps(count + 1u64);
|
let Range { start, end } = self.stamper.stamps(count + 1u64);
|
||||||
let last_opstamp = end - 1;
|
let last_opstamp = end - 1;
|
||||||
let stamps = Range {
|
let stamps = Range {
|
||||||
@@ -688,7 +689,7 @@ impl IndexWriter {
|
|||||||
/// Like adds and deletes (see `IndexWriter.add_document` and
|
/// Like adds and deletes (see `IndexWriter.add_document` and
|
||||||
/// `IndexWriter.delete_term`), the changes made by calling `run` will be
|
/// `IndexWriter.delete_term`), the changes made by calling `run` will be
|
||||||
/// visible to readers only after calling `commit()`.
|
/// visible to readers only after calling `commit()`.
|
||||||
pub fn run(&mut self, user_operations: Vec<UserOperation>) -> u64 {
|
pub fn run(&self, user_operations: Vec<UserOperation>) -> Opstamp {
|
||||||
let count = user_operations.len() as u64;
|
let count = user_operations.len() as u64;
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
return self.stamper.stamp();
|
return self.stamper.stamp();
|
||||||
@@ -739,7 +740,7 @@ mod tests {
|
|||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let operations = vec![
|
let operations = vec![
|
||||||
UserOperation::Add(doc!(text_field=>"a")),
|
UserOperation::Add(doc!(text_field=>"a")),
|
||||||
UserOperation::Add(doc!(text_field=>"b")),
|
UserOperation::Add(doc!(text_field=>"b")),
|
||||||
@@ -801,7 +802,7 @@ mod tests {
|
|||||||
fn test_empty_operations_group() {
|
fn test_empty_operations_group() {
|
||||||
let schema_builder = schema::Schema::builder();
|
let schema_builder = schema::Schema::builder();
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer(3_000_000).unwrap();
|
let index_writer = index.writer(3_000_000).unwrap();
|
||||||
let operations1 = vec![];
|
let operations1 = vec![];
|
||||||
let batch_opstamp1 = index_writer.run(operations1);
|
let batch_opstamp1 = index_writer.run(operations1);
|
||||||
assert_eq!(batch_opstamp1, 0u64);
|
assert_eq!(batch_opstamp1, 0u64);
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ impl MergePolicy for LogMergePolicy {
|
|||||||
|
|
||||||
let mut size_sorted_tuples = segments
|
let mut size_sorted_tuples = segments
|
||||||
.iter()
|
.iter()
|
||||||
.map(|x| x.num_docs())
|
.map(SegmentMeta::num_docs)
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.collect::<Vec<(usize, u32)>>();
|
.collect::<Vec<(usize, u32)>>();
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use census::{Inventory, TrackedObject};
|
use census::{Inventory, TrackedObject};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use Opstamp;
|
||||||
use SegmentId;
|
use SegmentId;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
@@ -17,8 +18,8 @@ impl MergeOperationInventory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A `MergeOperation` has two role.
|
/// A `MergeOperation` has two roles.
|
||||||
/// It carries all of the information required to describe a merge :
|
/// It carries all of the information required to describe a merge:
|
||||||
/// - `target_opstamp` is the opstamp up to which we want to consume the
|
/// - `target_opstamp` is the opstamp up to which we want to consume the
|
||||||
/// delete queue and reflect their deletes.
|
/// delete queue and reflect their deletes.
|
||||||
/// - `segment_ids` is the list of segment to be merged.
|
/// - `segment_ids` is the list of segment to be merged.
|
||||||
@@ -35,14 +36,14 @@ pub struct MergeOperation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct InnerMergeOperation {
|
struct InnerMergeOperation {
|
||||||
target_opstamp: u64,
|
target_opstamp: Opstamp,
|
||||||
segment_ids: Vec<SegmentId>,
|
segment_ids: Vec<SegmentId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MergeOperation {
|
impl MergeOperation {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
inventory: &MergeOperationInventory,
|
inventory: &MergeOperationInventory,
|
||||||
target_opstamp: u64,
|
target_opstamp: Opstamp,
|
||||||
segment_ids: Vec<SegmentId>,
|
segment_ids: Vec<SegmentId>,
|
||||||
) -> MergeOperation {
|
) -> MergeOperation {
|
||||||
let inner_merge_operation = InnerMergeOperation {
|
let inner_merge_operation = InnerMergeOperation {
|
||||||
@@ -54,7 +55,7 @@ impl MergeOperation {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn target_opstamp(&self) -> u64 {
|
pub fn target_opstamp(&self) -> Opstamp {
|
||||||
self.inner.target_opstamp
|
self.inner.target_opstamp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
|
use common::MAX_DOC_LIMIT;
|
||||||
use core::Segment;
|
use core::Segment;
|
||||||
use core::SegmentReader;
|
use core::SegmentReader;
|
||||||
use core::SerializableSegment;
|
use core::SerializableSegment;
|
||||||
use docset::DocSet;
|
use docset::DocSet;
|
||||||
|
use fastfield::BytesFastFieldReader;
|
||||||
use fastfield::DeleteBitSet;
|
use fastfield::DeleteBitSet;
|
||||||
use fastfield::FastFieldReader;
|
use fastfield::FastFieldReader;
|
||||||
use fastfield::FastFieldSerializer;
|
use fastfield::FastFieldSerializer;
|
||||||
@@ -23,6 +25,7 @@ use termdict::TermMerger;
|
|||||||
use termdict::TermOrdinal;
|
use termdict::TermOrdinal;
|
||||||
use DocId;
|
use DocId;
|
||||||
use Result;
|
use Result;
|
||||||
|
use TantivyError;
|
||||||
|
|
||||||
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
|
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
|
||||||
let mut total_tokens = 0u64;
|
let mut total_tokens = 0u64;
|
||||||
@@ -70,7 +73,7 @@ fn compute_min_max_val(
|
|||||||
// some deleted documents,
|
// some deleted documents,
|
||||||
// we need to recompute the max / min
|
// we need to recompute the max / min
|
||||||
(0..max_doc)
|
(0..max_doc)
|
||||||
.filter(|doc_id| !delete_bitset.is_deleted(*doc_id))
|
.filter(|doc_id| delete_bitset.is_alive(*doc_id))
|
||||||
.map(|doc_id| u64_reader.get(doc_id))
|
.map(|doc_id| u64_reader.get(doc_id))
|
||||||
.minmax()
|
.minmax()
|
||||||
.into_option()
|
.into_option()
|
||||||
@@ -150,6 +153,14 @@ impl IndexMerger {
|
|||||||
readers.push(reader);
|
readers.push(reader);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if max_doc >= MAX_DOC_LIMIT {
|
||||||
|
let err_msg = format!(
|
||||||
|
"The segment resulting from this merge would have {} docs,\
|
||||||
|
which exceeds the limit {}.",
|
||||||
|
max_doc, MAX_DOC_LIMIT
|
||||||
|
);
|
||||||
|
return Err(TantivyError::InvalidArgument(err_msg));
|
||||||
|
}
|
||||||
Ok(IndexMerger {
|
Ok(IndexMerger {
|
||||||
schema,
|
schema,
|
||||||
readers,
|
readers,
|
||||||
@@ -229,7 +240,10 @@ impl IndexMerger {
|
|||||||
let mut max_value = u64::min_value();
|
let mut max_value = u64::min_value();
|
||||||
|
|
||||||
for reader in &self.readers {
|
for reader in &self.readers {
|
||||||
let u64_reader: FastFieldReader<u64> = reader.fast_field_reader(field)?;
|
let u64_reader: FastFieldReader<u64> = reader
|
||||||
|
.fast_fields()
|
||||||
|
.u64_lenient(field)
|
||||||
|
.expect("Failed to find a reader for single fast field. This is a tantivy bug and it should never happen.");
|
||||||
if let Some((seg_min_val, seg_max_val)) =
|
if let Some((seg_min_val, seg_max_val)) =
|
||||||
compute_min_max_val(&u64_reader, reader.max_doc(), reader.delete_bitset())
|
compute_min_max_val(&u64_reader, reader.max_doc(), reader.delete_bitset())
|
||||||
{
|
{
|
||||||
@@ -272,24 +286,28 @@ impl IndexMerger {
|
|||||||
fast_field_serializer: &mut FastFieldSerializer,
|
fast_field_serializer: &mut FastFieldSerializer,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut total_num_vals = 0u64;
|
let mut total_num_vals = 0u64;
|
||||||
|
let mut u64s_readers: Vec<MultiValueIntFastFieldReader<u64>> = Vec::new();
|
||||||
|
|
||||||
// In the first pass, we compute the total number of vals.
|
// In the first pass, we compute the total number of vals.
|
||||||
//
|
//
|
||||||
// This is required by the bitpacker, as it needs to know
|
// This is required by the bitpacker, as it needs to know
|
||||||
// what should be the bit length use for bitpacking.
|
// what should be the bit length use for bitpacking.
|
||||||
for reader in &self.readers {
|
for reader in &self.readers {
|
||||||
let idx_reader = reader.fast_field_reader_with_idx::<u64>(field, 0)?;
|
let u64s_reader = reader.fast_fields()
|
||||||
|
.u64s_lenient(field)
|
||||||
|
.expect("Failed to find index for multivalued field. This is a bug in tantivy, please report.");
|
||||||
|
|
||||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||||
for doc in 0u32..reader.max_doc() {
|
for doc in 0u32..reader.max_doc() {
|
||||||
if !delete_bitset.is_deleted(doc) {
|
if delete_bitset.is_alive(doc) {
|
||||||
let start = idx_reader.get(doc);
|
let num_vals = u64s_reader.num_vals(doc) as u64;
|
||||||
let end = idx_reader.get(doc + 1);
|
total_num_vals += num_vals;
|
||||||
total_num_vals += end - start;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
total_num_vals += idx_reader.max_value();
|
total_num_vals += u64s_reader.total_num_vals();
|
||||||
}
|
}
|
||||||
|
u64s_readers.push(u64s_reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
// We can now create our `idx` serializer, and in a second pass,
|
// We can now create our `idx` serializer, and in a second pass,
|
||||||
@@ -297,13 +315,10 @@ impl IndexMerger {
|
|||||||
let mut serialize_idx =
|
let mut serialize_idx =
|
||||||
fast_field_serializer.new_u64_fast_field_with_idx(field, 0, total_num_vals, 0)?;
|
fast_field_serializer.new_u64_fast_field_with_idx(field, 0, total_num_vals, 0)?;
|
||||||
let mut idx = 0;
|
let mut idx = 0;
|
||||||
for reader in &self.readers {
|
for (segment_reader, u64s_reader) in self.readers.iter().zip(&u64s_readers) {
|
||||||
let idx_reader = reader.fast_field_reader_with_idx::<u64>(field, 0)?;
|
for doc in segment_reader.doc_ids_alive() {
|
||||||
for doc in reader.doc_ids_alive() {
|
|
||||||
serialize_idx.add_val(idx)?;
|
serialize_idx.add_val(idx)?;
|
||||||
let start = idx_reader.get(doc);
|
idx += u64s_reader.num_vals(doc) as u64;
|
||||||
let end = idx_reader.get(doc + 1);
|
|
||||||
idx += end - start;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
serialize_idx.add_val(idx)?;
|
serialize_idx.add_val(idx)?;
|
||||||
@@ -334,8 +349,10 @@ impl IndexMerger {
|
|||||||
for (segment_ord, segment_reader) in self.readers.iter().enumerate() {
|
for (segment_ord, segment_reader) in self.readers.iter().enumerate() {
|
||||||
let term_ordinal_mapping: &[TermOrdinal] =
|
let term_ordinal_mapping: &[TermOrdinal] =
|
||||||
term_ordinal_mappings.get_segment(segment_ord);
|
term_ordinal_mappings.get_segment(segment_ord);
|
||||||
let ff_reader: MultiValueIntFastFieldReader<u64> =
|
let ff_reader: MultiValueIntFastFieldReader<u64> = segment_reader
|
||||||
segment_reader.multi_fast_field_reader(field)?;
|
.fast_fields()
|
||||||
|
.u64s(field)
|
||||||
|
.expect("Could not find multivalued u64 fast value reader.");
|
||||||
// TODO optimize if no deletes
|
// TODO optimize if no deletes
|
||||||
for doc in segment_reader.doc_ids_alive() {
|
for doc in segment_reader.doc_ids_alive() {
|
||||||
ff_reader.get_vals(doc, &mut vals);
|
ff_reader.get_vals(doc, &mut vals);
|
||||||
@@ -367,6 +384,8 @@ impl IndexMerger {
|
|||||||
|
|
||||||
let mut vals = Vec::with_capacity(100);
|
let mut vals = Vec::with_capacity(100);
|
||||||
|
|
||||||
|
let mut ff_readers = Vec::new();
|
||||||
|
|
||||||
// Our values are bitpacked and we need to know what should be
|
// Our values are bitpacked and we need to know what should be
|
||||||
// our bitwidth and our minimum value before serializing any values.
|
// our bitwidth and our minimum value before serializing any values.
|
||||||
//
|
//
|
||||||
@@ -375,7 +394,10 @@ impl IndexMerger {
|
|||||||
// maximum value and initialize our Serializer.
|
// maximum value and initialize our Serializer.
|
||||||
for reader in &self.readers {
|
for reader in &self.readers {
|
||||||
let ff_reader: MultiValueIntFastFieldReader<u64> =
|
let ff_reader: MultiValueIntFastFieldReader<u64> =
|
||||||
reader.multi_fast_field_reader(field)?;
|
reader.fast_fields().u64s_lenient(field).expect(
|
||||||
|
"Failed to find multivalued fast field reader. This is a bug in \
|
||||||
|
tantivy. Please report.",
|
||||||
|
);
|
||||||
for doc in reader.doc_ids_alive() {
|
for doc in reader.doc_ids_alive() {
|
||||||
ff_reader.get_vals(doc, &mut vals);
|
ff_reader.get_vals(doc, &mut vals);
|
||||||
for &val in &vals {
|
for &val in &vals {
|
||||||
@@ -383,6 +405,7 @@ impl IndexMerger {
|
|||||||
max_value = cmp::max(val, max_value);
|
max_value = cmp::max(val, max_value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ff_readers.push(ff_reader);
|
||||||
// TODO optimize when no deletes
|
// TODO optimize when no deletes
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -395,9 +418,7 @@ impl IndexMerger {
|
|||||||
{
|
{
|
||||||
let mut serialize_vals = fast_field_serializer
|
let mut serialize_vals = fast_field_serializer
|
||||||
.new_u64_fast_field_with_idx(field, min_value, max_value, 1)?;
|
.new_u64_fast_field_with_idx(field, min_value, max_value, 1)?;
|
||||||
for reader in &self.readers {
|
for (reader, ff_reader) in self.readers.iter().zip(ff_readers) {
|
||||||
let ff_reader: MultiValueIntFastFieldReader<u64> =
|
|
||||||
reader.multi_fast_field_reader(field)?;
|
|
||||||
// TODO optimize if no deletes
|
// TODO optimize if no deletes
|
||||||
for doc in reader.doc_ids_alive() {
|
for doc in reader.doc_ids_alive() {
|
||||||
ff_reader.get_vals(doc, &mut vals);
|
ff_reader.get_vals(doc, &mut vals);
|
||||||
@@ -416,19 +437,53 @@ impl IndexMerger {
|
|||||||
field: Field,
|
field: Field,
|
||||||
fast_field_serializer: &mut FastFieldSerializer,
|
fast_field_serializer: &mut FastFieldSerializer,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
self.write_fast_field_idx(field, fast_field_serializer)?;
|
let mut total_num_vals = 0u64;
|
||||||
|
let mut bytes_readers: Vec<BytesFastFieldReader> = Vec::new();
|
||||||
|
|
||||||
|
for reader in &self.readers {
|
||||||
|
let bytes_reader = reader.fast_fields().bytes(field).expect(
|
||||||
|
"Failed to find bytes fast field reader. This is a bug in tantivy, please report.",
|
||||||
|
);
|
||||||
|
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||||
|
for doc in 0u32..reader.max_doc() {
|
||||||
|
if delete_bitset.is_alive(doc) {
|
||||||
|
let num_vals = bytes_reader.get_bytes(doc).len() as u64;
|
||||||
|
total_num_vals += num_vals;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
total_num_vals += bytes_reader.total_num_bytes() as u64;
|
||||||
|
}
|
||||||
|
bytes_readers.push(bytes_reader);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// We can now create our `idx` serializer, and in a second pass,
|
||||||
|
// can effectively push the different indexes.
|
||||||
|
let mut serialize_idx =
|
||||||
|
fast_field_serializer.new_u64_fast_field_with_idx(field, 0, total_num_vals, 0)?;
|
||||||
|
let mut idx = 0;
|
||||||
|
for (segment_reader, bytes_reader) in self.readers.iter().zip(&bytes_readers) {
|
||||||
|
for doc in segment_reader.doc_ids_alive() {
|
||||||
|
serialize_idx.add_val(idx)?;
|
||||||
|
idx += bytes_reader.get_bytes(doc).len() as u64;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
serialize_idx.add_val(idx)?;
|
||||||
|
serialize_idx.close_field()?;
|
||||||
|
}
|
||||||
|
|
||||||
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field_with_idx(field, 1)?;
|
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field_with_idx(field, 1)?;
|
||||||
for reader in &self.readers {
|
for segment_reader in &self.readers {
|
||||||
let bytes_reader = reader.bytes_fast_field_reader(field)?;
|
let bytes_reader = segment_reader.fast_fields().bytes(field)
|
||||||
|
.expect("Failed to find bytes field in fast field reader. This is a bug in tantivy. Please report.");
|
||||||
// TODO: optimize if no deletes
|
// TODO: optimize if no deletes
|
||||||
for doc in reader.doc_ids_alive() {
|
for doc in segment_reader.doc_ids_alive() {
|
||||||
let val = bytes_reader.get_val(doc);
|
let val = bytes_reader.get_bytes(doc);
|
||||||
serialize_vals.write_all(val)?;
|
serialize_vals.write_all(val)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
serialize_vals.flush()?;
|
serialize_vals.flush()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -969,14 +1024,16 @@ mod tests {
|
|||||||
|
|
||||||
let score_field_reader = searcher
|
let score_field_reader = searcher
|
||||||
.segment_reader(0)
|
.segment_reader(0)
|
||||||
.fast_field_reader::<u64>(score_field)
|
.fast_fields()
|
||||||
|
.u64(score_field)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(score_field_reader.min_value(), 4000);
|
assert_eq!(score_field_reader.min_value(), 4000);
|
||||||
assert_eq!(score_field_reader.max_value(), 7000);
|
assert_eq!(score_field_reader.max_value(), 7000);
|
||||||
|
|
||||||
let score_field_reader = searcher
|
let score_field_reader = searcher
|
||||||
.segment_reader(1)
|
.segment_reader(1)
|
||||||
.fast_field_reader::<u64>(score_field)
|
.fast_fields()
|
||||||
|
.u64(score_field)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(score_field_reader.min_value(), 1);
|
assert_eq!(score_field_reader.min_value(), 1);
|
||||||
assert_eq!(score_field_reader.max_value(), 3);
|
assert_eq!(score_field_reader.max_value(), 3);
|
||||||
@@ -1027,7 +1084,8 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let score_field_reader = searcher
|
let score_field_reader = searcher
|
||||||
.segment_reader(0)
|
.segment_reader(0)
|
||||||
.fast_field_reader::<u64>(score_field)
|
.fast_fields()
|
||||||
|
.u64(score_field)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(score_field_reader.min_value(), 3);
|
assert_eq!(score_field_reader.min_value(), 3);
|
||||||
assert_eq!(score_field_reader.max_value(), 7000);
|
assert_eq!(score_field_reader.max_value(), 7000);
|
||||||
@@ -1073,7 +1131,8 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let score_field_reader = searcher
|
let score_field_reader = searcher
|
||||||
.segment_reader(0)
|
.segment_reader(0)
|
||||||
.fast_field_reader::<u64>(score_field)
|
.fast_fields()
|
||||||
|
.u64(score_field)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(score_field_reader.min_value(), 3);
|
assert_eq!(score_field_reader.min_value(), 3);
|
||||||
assert_eq!(score_field_reader.max_value(), 7000);
|
assert_eq!(score_field_reader.max_value(), 7000);
|
||||||
@@ -1125,7 +1184,8 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let score_field_reader = searcher
|
let score_field_reader = searcher
|
||||||
.segment_reader(0)
|
.segment_reader(0)
|
||||||
.fast_field_reader::<u64>(score_field)
|
.fast_fields()
|
||||||
|
.u64(score_field)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(score_field_reader.min_value(), 6000);
|
assert_eq!(score_field_reader.min_value(), 6000);
|
||||||
assert_eq!(score_field_reader.max_value(), 7000);
|
assert_eq!(score_field_reader.max_value(), 7000);
|
||||||
@@ -1371,7 +1431,7 @@ mod tests {
|
|||||||
|
|
||||||
{
|
{
|
||||||
let segment = searcher.segment_reader(0u32);
|
let segment = searcher.segment_reader(0u32);
|
||||||
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
|
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
||||||
|
|
||||||
ff_reader.get_vals(0, &mut vals);
|
ff_reader.get_vals(0, &mut vals);
|
||||||
assert_eq!(&vals, &[1, 2]);
|
assert_eq!(&vals, &[1, 2]);
|
||||||
@@ -1406,7 +1466,7 @@ mod tests {
|
|||||||
|
|
||||||
{
|
{
|
||||||
let segment = searcher.segment_reader(1u32);
|
let segment = searcher.segment_reader(1u32);
|
||||||
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
|
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
||||||
ff_reader.get_vals(0, &mut vals);
|
ff_reader.get_vals(0, &mut vals);
|
||||||
assert_eq!(&vals, &[28, 27]);
|
assert_eq!(&vals, &[28, 27]);
|
||||||
|
|
||||||
@@ -1416,7 +1476,7 @@ mod tests {
|
|||||||
|
|
||||||
{
|
{
|
||||||
let segment = searcher.segment_reader(2u32);
|
let segment = searcher.segment_reader(2u32);
|
||||||
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
|
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
||||||
ff_reader.get_vals(0, &mut vals);
|
ff_reader.get_vals(0, &mut vals);
|
||||||
assert_eq!(&vals, &[20]);
|
assert_eq!(&vals, &[20]);
|
||||||
}
|
}
|
||||||
@@ -1449,7 +1509,7 @@ mod tests {
|
|||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
);
|
);
|
||||||
let segment = searcher.segment_reader(0u32);
|
let segment = searcher.segment_reader(0u32);
|
||||||
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
|
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
||||||
|
|
||||||
ff_reader.get_vals(0, &mut vals);
|
ff_reader.get_vals(0, &mut vals);
|
||||||
assert_eq!(&vals, &[1, 2]);
|
assert_eq!(&vals, &[1, 2]);
|
||||||
|
|||||||
@@ -1,17 +1,18 @@
|
|||||||
use schema::Document;
|
use schema::Document;
|
||||||
use schema::Term;
|
use schema::Term;
|
||||||
|
use Opstamp;
|
||||||
|
|
||||||
/// Timestamped Delete operation.
|
/// Timestamped Delete operation.
|
||||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||||
pub struct DeleteOperation {
|
pub struct DeleteOperation {
|
||||||
pub opstamp: u64,
|
pub opstamp: Opstamp,
|
||||||
pub term: Term,
|
pub term: Term,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Timestamped Add operation.
|
/// Timestamped Add operation.
|
||||||
#[derive(Eq, PartialEq, Debug)]
|
#[derive(Eq, PartialEq, Debug)]
|
||||||
pub struct AddOperation {
|
pub struct AddOperation {
|
||||||
pub opstamp: u64,
|
pub opstamp: Opstamp,
|
||||||
pub document: Document,
|
pub document: Document,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,16 @@
|
|||||||
use super::IndexWriter;
|
use super::IndexWriter;
|
||||||
|
use Opstamp;
|
||||||
use Result;
|
use Result;
|
||||||
|
|
||||||
/// A prepared commit
|
/// A prepared commit
|
||||||
pub struct PreparedCommit<'a> {
|
pub struct PreparedCommit<'a> {
|
||||||
index_writer: &'a mut IndexWriter,
|
index_writer: &'a mut IndexWriter,
|
||||||
payload: Option<String>,
|
payload: Option<String>,
|
||||||
opstamp: u64,
|
opstamp: Opstamp,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> PreparedCommit<'a> {
|
impl<'a> PreparedCommit<'a> {
|
||||||
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: u64) -> PreparedCommit {
|
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit {
|
||||||
PreparedCommit {
|
PreparedCommit {
|
||||||
index_writer,
|
index_writer,
|
||||||
payload: None,
|
payload: None,
|
||||||
@@ -17,7 +18,7 @@ impl<'a> PreparedCommit<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn opstamp(&self) -> u64 {
|
pub fn opstamp(&self) -> Opstamp {
|
||||||
self.opstamp
|
self.opstamp
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -25,11 +26,11 @@ impl<'a> PreparedCommit<'a> {
|
|||||||
self.payload = Some(payload.to_string())
|
self.payload = Some(payload.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn abort(self) -> Result<()> {
|
pub fn abort(self) -> Result<Opstamp> {
|
||||||
self.index_writer.rollback()
|
self.index_writer.rollback()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn commit(self) -> Result<u64> {
|
pub fn commit(self) -> Result<Opstamp> {
|
||||||
info!("committing {}", self.opstamp);
|
info!("committing {}", self.opstamp);
|
||||||
self.index_writer
|
self.index_writer
|
||||||
.segment_updater()
|
.segment_updater()
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ impl SegmentRegister {
|
|||||||
.values()
|
.values()
|
||||||
.map(|segment_entry| segment_entry.meta().clone())
|
.map(|segment_entry| segment_entry.meta().clone())
|
||||||
.collect();
|
.collect();
|
||||||
segment_ids.sort_by_key(|meta| meta.id());
|
segment_ids.sort_by_key(SegmentMeta::id);
|
||||||
segment_ids
|
segment_ids
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ use std::sync::Arc;
|
|||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
|
use Opstamp;
|
||||||
use Result;
|
use Result;
|
||||||
|
|
||||||
/// Save the index meta file.
|
/// Save the index meta file.
|
||||||
@@ -224,7 +225,7 @@ impl SegmentUpdater {
|
|||||||
///
|
///
|
||||||
/// Tne method returns copies of the segment entries,
|
/// Tne method returns copies of the segment entries,
|
||||||
/// updated with the delete information.
|
/// updated with the delete information.
|
||||||
fn purge_deletes(&self, target_opstamp: u64) -> Result<Vec<SegmentEntry>> {
|
fn purge_deletes(&self, target_opstamp: Opstamp) -> Result<Vec<SegmentEntry>> {
|
||||||
let mut segment_entries = self.0.segment_manager.segment_entries();
|
let mut segment_entries = self.0.segment_manager.segment_entries();
|
||||||
for segment_entry in &mut segment_entries {
|
for segment_entry in &mut segment_entries {
|
||||||
let segment = self.0.index.segment(segment_entry.meta().clone());
|
let segment = self.0.index.segment(segment_entry.meta().clone());
|
||||||
@@ -233,7 +234,7 @@ impl SegmentUpdater {
|
|||||||
Ok(segment_entries)
|
Ok(segment_entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_metas(&self, opstamp: u64, commit_message: Option<String>) {
|
pub fn save_metas(&self, opstamp: Opstamp, commit_message: Option<String>) {
|
||||||
if self.is_alive() {
|
if self.is_alive() {
|
||||||
let index = &self.0.index;
|
let index = &self.0.index;
|
||||||
let directory = index.directory();
|
let directory = index.directory();
|
||||||
@@ -280,7 +281,7 @@ impl SegmentUpdater {
|
|||||||
.garbage_collect(|| self.0.segment_manager.list_files());
|
.garbage_collect(|| self.0.segment_manager.list_files());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn commit(&self, opstamp: u64, payload: Option<String>) -> Result<()> {
|
pub fn commit(&self, opstamp: Opstamp, payload: Option<String>) -> Result<()> {
|
||||||
self.run_async(move |segment_updater| {
|
self.run_async(move |segment_updater| {
|
||||||
if segment_updater.is_alive() {
|
if segment_updater.is_alive() {
|
||||||
let segment_entries = segment_updater
|
let segment_entries = segment_updater
|
||||||
@@ -420,6 +421,7 @@ impl SegmentUpdater {
|
|||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
merge_candidates.extend(committed_merge_candidates.into_iter());
|
merge_candidates.extend(committed_merge_candidates.into_iter());
|
||||||
|
|
||||||
for merge_operation in merge_candidates {
|
for merge_operation in merge_candidates {
|
||||||
match self.start_merge_impl(merge_operation) {
|
match self.start_merge_impl(merge_operation) {
|
||||||
Ok(merge_future) => {
|
Ok(merge_future) => {
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use fastfield::FastFieldsWriter;
|
|||||||
use fieldnorm::FieldNormsWriter;
|
use fieldnorm::FieldNormsWriter;
|
||||||
use indexer::segment_serializer::SegmentSerializer;
|
use indexer::segment_serializer::SegmentSerializer;
|
||||||
use postings::MultiFieldPostingsWriter;
|
use postings::MultiFieldPostingsWriter;
|
||||||
|
use schema::FieldEntry;
|
||||||
use schema::FieldType;
|
use schema::FieldType;
|
||||||
use schema::Schema;
|
use schema::Schema;
|
||||||
use schema::Term;
|
use schema::Term;
|
||||||
@@ -15,6 +16,7 @@ use tokenizer::BoxedTokenizer;
|
|||||||
use tokenizer::FacetTokenizer;
|
use tokenizer::FacetTokenizer;
|
||||||
use tokenizer::{TokenStream, Tokenizer};
|
use tokenizer::{TokenStream, Tokenizer};
|
||||||
use DocId;
|
use DocId;
|
||||||
|
use Opstamp;
|
||||||
use Result;
|
use Result;
|
||||||
|
|
||||||
/// A `SegmentWriter` is in charge of creating segment index from a
|
/// A `SegmentWriter` is in charge of creating segment index from a
|
||||||
@@ -28,7 +30,7 @@ pub struct SegmentWriter {
|
|||||||
segment_serializer: SegmentSerializer,
|
segment_serializer: SegmentSerializer,
|
||||||
fast_field_writers: FastFieldsWriter,
|
fast_field_writers: FastFieldsWriter,
|
||||||
fieldnorms_writer: FieldNormsWriter,
|
fieldnorms_writer: FieldNormsWriter,
|
||||||
doc_opstamps: Vec<u64>,
|
doc_opstamps: Vec<Opstamp>,
|
||||||
tokenizers: Vec<Option<Box<BoxedTokenizer>>>,
|
tokenizers: Vec<Option<Box<BoxedTokenizer>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,7 +55,7 @@ impl SegmentWriter {
|
|||||||
schema
|
schema
|
||||||
.fields()
|
.fields()
|
||||||
.iter()
|
.iter()
|
||||||
.map(|field_entry| field_entry.field_type())
|
.map(FieldEntry::field_type)
|
||||||
.map(|field_type| match *field_type {
|
.map(|field_type| match *field_type {
|
||||||
FieldType::Str(ref text_options) => text_options
|
FieldType::Str(ref text_options) => text_options
|
||||||
.get_indexing_options()
|
.get_indexing_options()
|
||||||
|
|||||||
@@ -1,70 +1,27 @@
|
|||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use Opstamp;
|
||||||
|
|
||||||
// AtomicU64 have not landed in stable.
|
/// Stamper provides Opstamps, which is just an auto-increment id to label
|
||||||
// For the moment let's just use AtomicUsize on
|
/// an operation.
|
||||||
// x86/64 bit platform, and a mutex on other platform.
|
///
|
||||||
#[cfg(target_arch = "x86_64")]
|
/// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`.
|
||||||
mod archicture_impl {
|
|
||||||
|
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct AtomicU64Ersatz(AtomicUsize);
|
|
||||||
|
|
||||||
impl AtomicU64Ersatz {
|
|
||||||
pub fn new(first_opstamp: u64) -> AtomicU64Ersatz {
|
|
||||||
AtomicU64Ersatz(AtomicUsize::new(first_opstamp as usize))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
|
|
||||||
self.0.fetch_add(val as usize, order) as u64
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(target_arch = "x86_64"))]
|
|
||||||
mod archicture_impl {
|
|
||||||
|
|
||||||
use std::sync::atomic::Ordering;
|
|
||||||
/// Under other architecture, we rely on a mutex.
|
|
||||||
use std::sync::RwLock;
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct AtomicU64Ersatz(RwLock<u64>);
|
|
||||||
|
|
||||||
impl AtomicU64Ersatz {
|
|
||||||
pub fn new(first_opstamp: u64) -> AtomicU64Ersatz {
|
|
||||||
AtomicU64Ersatz(RwLock::new(first_opstamp))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn fetch_add(&self, incr: u64, _order: Ordering) -> u64 {
|
|
||||||
let mut lock = self.0.write().unwrap();
|
|
||||||
let previous_val = *lock;
|
|
||||||
*lock = previous_val + incr;
|
|
||||||
previous_val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
use self::archicture_impl::AtomicU64Ersatz;
|
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
pub struct Stamper(Arc<AtomicU64Ersatz>);
|
pub struct Stamper(Arc<AtomicU64>);
|
||||||
|
|
||||||
impl Stamper {
|
impl Stamper {
|
||||||
pub fn new(first_opstamp: u64) -> Stamper {
|
pub fn new(first_opstamp: Opstamp) -> Stamper {
|
||||||
Stamper(Arc::new(AtomicU64Ersatz::new(first_opstamp)))
|
Stamper(Arc::new(AtomicU64::new(first_opstamp)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn stamp(&self) -> u64 {
|
pub fn stamp(&self) -> Opstamp {
|
||||||
self.0.fetch_add(1u64, Ordering::SeqCst) as u64
|
self.0.fetch_add(1u64, Ordering::SeqCst) as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Given a desired count `n`, `stamps` returns an iterator that
|
/// Given a desired count `n`, `stamps` returns an iterator that
|
||||||
/// will supply `n` number of u64 stamps.
|
/// will supply `n` number of u64 stamps.
|
||||||
pub fn stamps(&self, n: u64) -> Range<u64> {
|
pub fn stamps(&self, n: u64) -> Range<Opstamp> {
|
||||||
let start = self.0.fetch_add(n, Ordering::SeqCst);
|
let start = self.0.fetch_add(n, Ordering::SeqCst);
|
||||||
Range {
|
Range {
|
||||||
start,
|
start,
|
||||||
@@ -92,4 +49,5 @@ mod test {
|
|||||||
assert_eq!(stamper.stamps(3u64), (12..15));
|
assert_eq!(stamper.stamps(3u64), (12..15));
|
||||||
assert_eq!(stamper.stamp(), 15u64);
|
assert_eq!(stamper.stamp(), 15u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
37
src/lib.rs
37
src/lib.rs
@@ -174,6 +174,7 @@ extern crate downcast_rs;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate fail;
|
extern crate fail;
|
||||||
|
|
||||||
|
#[cfg(feature = "mmap")]
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod functional_test;
|
mod functional_test;
|
||||||
|
|
||||||
@@ -225,7 +226,7 @@ mod docset;
|
|||||||
pub use self::docset::{DocSet, SkipResult};
|
pub use self::docset::{DocSet, SkipResult};
|
||||||
|
|
||||||
pub use core::SegmentComponent;
|
pub use core::SegmentComponent;
|
||||||
pub use core::{Index, Searcher, Segment, SegmentId, SegmentMeta};
|
pub use core::{Index, Searcher, Segment, SegmentId, SegmentMeta, IndexMeta};
|
||||||
pub use core::{InvertedIndexReader, SegmentReader};
|
pub use core::{InvertedIndexReader, SegmentReader};
|
||||||
pub use directory::Directory;
|
pub use directory::Directory;
|
||||||
pub use indexer::IndexWriter;
|
pub use indexer::IndexWriter;
|
||||||
@@ -253,6 +254,16 @@ pub mod merge_policy {
|
|||||||
/// as they are added in the segment.
|
/// as they are added in the segment.
|
||||||
pub type DocId = u32;
|
pub type DocId = u32;
|
||||||
|
|
||||||
|
/// A u64 assigned to every operation incrementally
|
||||||
|
///
|
||||||
|
/// All operations modifying the index receives an monotonic Opstamp.
|
||||||
|
/// The resulting state of the index is consistent with the opstamp ordering.
|
||||||
|
///
|
||||||
|
/// For instance, a commit with opstamp `32_423` will reflect all Add and Delete operations
|
||||||
|
/// with an opstamp `<= 32_423`. A delete operation with opstamp n will no affect a document added
|
||||||
|
/// with opstamp `n+1`.
|
||||||
|
pub type Opstamp = u64;
|
||||||
|
|
||||||
/// A f32 that represents the relevance of the document to the query
|
/// A f32 that represents the relevance of the document to the query
|
||||||
///
|
///
|
||||||
/// This is modelled internally as a `f32`. The
|
/// This is modelled internally as a `f32`. The
|
||||||
@@ -875,28 +886,28 @@ mod tests {
|
|||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||||
{
|
{
|
||||||
let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(text_field);
|
let fast_field_reader_opt = segment_reader.fast_fields().u64(text_field);
|
||||||
assert!(fast_field_reader_res.is_err());
|
assert!(fast_field_reader_opt.is_none());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(stored_int_field);
|
let fast_field_reader_opt = segment_reader.fast_fields().u64(stored_int_field);
|
||||||
assert!(fast_field_reader_res.is_err());
|
assert!(fast_field_reader_opt.is_none());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(fast_field_signed);
|
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_signed);
|
||||||
assert!(fast_field_reader_res.is_err());
|
assert!(fast_field_reader_opt.is_none());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let fast_field_reader_res = segment_reader.fast_field_reader::<i64>(fast_field_signed);
|
let fast_field_reader_opt = segment_reader.fast_fields().i64(fast_field_signed);
|
||||||
assert!(fast_field_reader_res.is_ok());
|
assert!(fast_field_reader_opt.is_some());
|
||||||
let fast_field_reader = fast_field_reader_res.unwrap();
|
let fast_field_reader = fast_field_reader_opt.unwrap();
|
||||||
assert_eq!(fast_field_reader.get(0), 4i64)
|
assert_eq!(fast_field_reader.get(0), 4i64)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let fast_field_reader_res = segment_reader.fast_field_reader::<i64>(fast_field_signed);
|
let fast_field_reader_opt = segment_reader.fast_fields().i64(fast_field_signed);
|
||||||
assert!(fast_field_reader_res.is_ok());
|
assert!(fast_field_reader_opt.is_some());
|
||||||
let fast_field_reader = fast_field_reader_res.unwrap();
|
let fast_field_reader = fast_field_reader_opt.unwrap();
|
||||||
assert_eq!(fast_field_reader.get(0), 4i64)
|
assert_eq!(fast_field_reader.get(0), 4i64)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
249
src/postings/block_search.rs
Normal file
249
src/postings/block_search.rs
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
use postings::compression::AlignedBuffer;
|
||||||
|
|
||||||
|
/// This modules define the logic used to search for a doc in a given
|
||||||
|
/// block. (at most 128 docs)
|
||||||
|
///
|
||||||
|
/// Searching within a block is a hotspot when running intersection.
|
||||||
|
/// so it was worth defining it in its own module.
|
||||||
|
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
mod sse2 {
|
||||||
|
use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
|
||||||
|
use std::arch::x86_64::__m128i as DataType;
|
||||||
|
use std::arch::x86_64::_mm_add_epi32 as op_add;
|
||||||
|
use std::arch::x86_64::_mm_cmplt_epi32 as op_lt;
|
||||||
|
use std::arch::x86_64::_mm_load_si128 as op_load; // requires 128-bits alignment
|
||||||
|
use std::arch::x86_64::_mm_set1_epi32 as set1;
|
||||||
|
use std::arch::x86_64::_mm_setzero_si128 as set0;
|
||||||
|
use std::arch::x86_64::_mm_sub_epi32 as op_sub;
|
||||||
|
use std::arch::x86_64::{_mm_cvtsi128_si32, _mm_shuffle_epi32};
|
||||||
|
|
||||||
|
const MASK1: i32 = 78;
|
||||||
|
const MASK2: i32 = 177;
|
||||||
|
|
||||||
|
/// Performs an exhaustive linear search over the
|
||||||
|
///
|
||||||
|
/// There is no early exit here. We simply count the
|
||||||
|
/// number of elements that are `< target`.
|
||||||
|
pub(crate) fn linear_search_sse2_128(arr: &AlignedBuffer, target: u32) -> usize {
|
||||||
|
unsafe {
|
||||||
|
let ptr = arr as *const AlignedBuffer as *const DataType;
|
||||||
|
let vkey = set1(target as i32);
|
||||||
|
let mut cnt = set0();
|
||||||
|
// We work over 4 `__m128i` at a time.
|
||||||
|
// A single `__m128i` actual contains 4 `u32`.
|
||||||
|
for i in 0..(COMPRESSION_BLOCK_SIZE as isize) / (4 * 4) {
|
||||||
|
let cmp1 = op_lt(op_load(ptr.offset(i * 4)), vkey);
|
||||||
|
let cmp2 = op_lt(op_load(ptr.offset(i * 4 + 1)), vkey);
|
||||||
|
let cmp3 = op_lt(op_load(ptr.offset(i * 4 + 2)), vkey);
|
||||||
|
let cmp4 = op_lt(op_load(ptr.offset(i * 4 + 3)), vkey);
|
||||||
|
let sum = op_add(op_add(cmp1, cmp2), op_add(cmp3, cmp4));
|
||||||
|
cnt = op_sub(cnt, sum);
|
||||||
|
}
|
||||||
|
cnt = op_add(cnt, _mm_shuffle_epi32(cnt, MASK1));
|
||||||
|
cnt = op_add(cnt, _mm_shuffle_epi32(cnt, MASK2));
|
||||||
|
_mm_cvtsi128_si32(cnt) as usize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::linear_search_sse2_128;
|
||||||
|
use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_linear_search_sse2_128_u32() {
|
||||||
|
let mut block = [0u32; COMPRESSION_BLOCK_SIZE];
|
||||||
|
for el in 0u32..128u32 {
|
||||||
|
block[el as usize] = el * 2 + 1 << 18;
|
||||||
|
}
|
||||||
|
let target = block[64] + 1;
|
||||||
|
assert_eq!(linear_search_sse2_128(&AlignedBuffer(block), target), 65);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This `linear search` browser exhaustively through the array.
|
||||||
|
/// but the early exit is very difficult to predict.
|
||||||
|
///
|
||||||
|
/// Coupled with `exponential search` this function is likely
|
||||||
|
/// to be called with the same `len`
|
||||||
|
fn linear_search(arr: &[u32], target: u32) -> usize {
|
||||||
|
arr.iter().map(|&el| if el < target { 1 } else { 0 }).sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn exponential_search(arr: &[u32], target: u32) -> (usize, usize) {
|
||||||
|
let end = arr.len();
|
||||||
|
let mut begin = 0;
|
||||||
|
for &pivot in &[1, 3, 7, 15, 31, 63] {
|
||||||
|
if pivot >= end {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if arr[pivot] > target {
|
||||||
|
return (begin, pivot);
|
||||||
|
}
|
||||||
|
begin = pivot;
|
||||||
|
}
|
||||||
|
(begin, end)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn galloping(block_docs: &[u32], target: u32) -> usize {
|
||||||
|
let (start, end) = exponential_search(&block_docs, target);
|
||||||
|
start + linear_search(&block_docs[start..end], target)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tantivy may rely on SIMD instructions to search for a specific document within
|
||||||
|
/// a given block.
|
||||||
|
#[derive(Clone, Copy, PartialEq)]
|
||||||
|
pub enum BlockSearcher {
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
SSE2,
|
||||||
|
Scalar,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockSearcher {
|
||||||
|
/// Search the first index containing an element greater or equal to
|
||||||
|
/// the target.
|
||||||
|
///
|
||||||
|
/// The results should be equivalent to
|
||||||
|
/// ```ignore
|
||||||
|
/// block[..]
|
||||||
|
// .iter()
|
||||||
|
// .take_while(|&&val| val < target)
|
||||||
|
// .count()
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// The `start` argument is just used to hint that the response is
|
||||||
|
/// greater than beyond `start`. The implementation may or may not use
|
||||||
|
/// it for optimization.
|
||||||
|
///
|
||||||
|
/// # Assumption
|
||||||
|
///
|
||||||
|
/// The array len is > start.
|
||||||
|
/// The block is sorted
|
||||||
|
/// The target is assumed greater or equal to the `arr[start]`.
|
||||||
|
/// The target is assumed smaller or equal to the last element of the block.
|
||||||
|
///
|
||||||
|
/// Currently the scalar implementation starts by an exponential search, and
|
||||||
|
/// then operates a linear search in the result subarray.
|
||||||
|
///
|
||||||
|
/// If SSE2 instructions are available in the `(platform, running CPU)`,
|
||||||
|
/// then we use a different implementation that does an exhaustive linear search over
|
||||||
|
/// the full block whenever the block is full (`len == 128`). It is surprisingly faster, most likely because of the lack
|
||||||
|
/// of branch.
|
||||||
|
pub(crate) fn search_in_block(
|
||||||
|
self,
|
||||||
|
block_docs: &AlignedBuffer,
|
||||||
|
len: usize,
|
||||||
|
start: usize,
|
||||||
|
target: u32,
|
||||||
|
) -> usize {
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
{
|
||||||
|
use postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||||
|
if self == BlockSearcher::SSE2 && len == COMPRESSION_BLOCK_SIZE {
|
||||||
|
return sse2::linear_search_sse2_128(block_docs, target);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
start + galloping(&block_docs.0[start..len], target)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for BlockSearcher {
|
||||||
|
fn default() -> BlockSearcher {
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
{
|
||||||
|
if is_x86_feature_detected!("sse2") {
|
||||||
|
return BlockSearcher::SSE2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BlockSearcher::Scalar
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::exponential_search;
|
||||||
|
use super::linear_search;
|
||||||
|
use super::BlockSearcher;
|
||||||
|
use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_linear_search() {
|
||||||
|
let len: usize = 50;
|
||||||
|
let arr: Vec<u32> = (0..len).map(|el| 1u32 + (el as u32) * 2).collect();
|
||||||
|
for target in 1..*arr.last().unwrap() {
|
||||||
|
let res = linear_search(&arr[..], target);
|
||||||
|
if res > 0 {
|
||||||
|
assert!(arr[res - 1] < target);
|
||||||
|
}
|
||||||
|
if res < len {
|
||||||
|
assert!(arr[res] >= target);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_exponentiel_search() {
|
||||||
|
assert_eq!(exponential_search(&[1, 2], 0), (0, 1));
|
||||||
|
assert_eq!(exponential_search(&[1, 2], 1), (0, 1));
|
||||||
|
assert_eq!(
|
||||||
|
exponential_search(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 7),
|
||||||
|
(3, 7)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn util_test_search_in_block(block_searcher: BlockSearcher, block: &[u32], target: u32) {
|
||||||
|
let cursor = search_in_block_trivial_but_slow(block, target);
|
||||||
|
assert!(block.len() < COMPRESSION_BLOCK_SIZE);
|
||||||
|
let mut output_buffer = [u32::max_value(); COMPRESSION_BLOCK_SIZE];
|
||||||
|
output_buffer[..block.len()].copy_from_slice(block);
|
||||||
|
for i in 0..cursor {
|
||||||
|
assert_eq!(
|
||||||
|
block_searcher.search_in_block(
|
||||||
|
&AlignedBuffer(output_buffer),
|
||||||
|
block.len(),
|
||||||
|
i,
|
||||||
|
target
|
||||||
|
),
|
||||||
|
cursor
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn util_test_search_in_block_all(block_searcher: BlockSearcher, block: &[u32]) {
|
||||||
|
use std::collections::HashSet;
|
||||||
|
let mut targets = HashSet::new();
|
||||||
|
for (i, val) in block.iter().cloned().enumerate() {
|
||||||
|
if i > 0 {
|
||||||
|
targets.insert(val - 1);
|
||||||
|
}
|
||||||
|
targets.insert(val);
|
||||||
|
}
|
||||||
|
for target in targets {
|
||||||
|
util_test_search_in_block(block_searcher, block, target);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn search_in_block_trivial_but_slow(block: &[u32], target: u32) -> usize {
|
||||||
|
block.iter().take_while(|&&val| val < target).count()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_search_in_block_util(block_searcher: BlockSearcher) {
|
||||||
|
for len in 1u32..128u32 {
|
||||||
|
let v: Vec<u32> = (0..len).map(|i| i * 2).collect();
|
||||||
|
util_test_search_in_block_all(block_searcher, &v[..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_search_in_block_scalar() {
|
||||||
|
test_search_in_block_util(BlockSearcher::Scalar);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(target_arch = "x86_64")]
|
||||||
|
#[test]
|
||||||
|
fn test_search_in_block_sse2() {
|
||||||
|
test_search_in_block_util(BlockSearcher::SSE2);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -43,9 +43,14 @@ impl BlockEncoder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// We ensure that the OutputBuffer is align on 128 bits
|
||||||
|
/// in order to run SSE2 linear search on it.
|
||||||
|
#[repr(align(128))]
|
||||||
|
pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
|
||||||
|
|
||||||
pub struct BlockDecoder {
|
pub struct BlockDecoder {
|
||||||
bitpacker: BitPacker4x,
|
bitpacker: BitPacker4x,
|
||||||
pub output: [u32; COMPRESSION_BLOCK_SIZE + 1],
|
output: AlignedBuffer,
|
||||||
pub output_len: usize,
|
pub output_len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -55,11 +60,9 @@ impl BlockDecoder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_val(val: u32) -> BlockDecoder {
|
pub fn with_val(val: u32) -> BlockDecoder {
|
||||||
let mut output = [val; COMPRESSION_BLOCK_SIZE + 1];
|
|
||||||
output[COMPRESSION_BLOCK_SIZE] = 0u32;
|
|
||||||
BlockDecoder {
|
BlockDecoder {
|
||||||
bitpacker: BitPacker4x::new(),
|
bitpacker: BitPacker4x::new(),
|
||||||
output,
|
output: AlignedBuffer([val; COMPRESSION_BLOCK_SIZE]),
|
||||||
output_len: 0,
|
output_len: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -72,23 +75,28 @@ impl BlockDecoder {
|
|||||||
) -> usize {
|
) -> usize {
|
||||||
self.output_len = COMPRESSION_BLOCK_SIZE;
|
self.output_len = COMPRESSION_BLOCK_SIZE;
|
||||||
self.bitpacker
|
self.bitpacker
|
||||||
.decompress_sorted(offset, &compressed_data, &mut self.output, num_bits)
|
.decompress_sorted(offset, &compressed_data, &mut self.output.0, num_bits)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn uncompress_block_unsorted(&mut self, compressed_data: &[u8], num_bits: u8) -> usize {
|
pub fn uncompress_block_unsorted(&mut self, compressed_data: &[u8], num_bits: u8) -> usize {
|
||||||
self.output_len = COMPRESSION_BLOCK_SIZE;
|
self.output_len = COMPRESSION_BLOCK_SIZE;
|
||||||
self.bitpacker
|
self.bitpacker
|
||||||
.decompress(&compressed_data, &mut self.output, num_bits)
|
.decompress(&compressed_data, &mut self.output.0, num_bits)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn output_array(&self) -> &[u32] {
|
pub fn output_array(&self) -> &[u32] {
|
||||||
&self.output[..self.output_len]
|
&self.output.0[..self.output_len]
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub(crate) fn output_aligned(&self) -> (&AlignedBuffer, usize) {
|
||||||
|
(&self.output, self.output_len)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn output(&self, idx: usize) -> u32 {
|
pub fn output(&self, idx: usize) -> u32 {
|
||||||
self.output[idx]
|
self.output.0[idx]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -159,12 +167,12 @@ impl VIntDecoder for BlockDecoder {
|
|||||||
num_els: usize,
|
num_els: usize,
|
||||||
) -> usize {
|
) -> usize {
|
||||||
self.output_len = num_els;
|
self.output_len = num_els;
|
||||||
vint::uncompress_sorted(compressed_data, &mut self.output[..num_els], offset)
|
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize {
|
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize {
|
||||||
self.output_len = num_els;
|
self.output_len = num_els;
|
||||||
vint::uncompress_unsorted(compressed_data, &mut self.output[..num_els])
|
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
Postings module (also called inverted index)
|
Postings module (also called inverted index)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
mod block_search;
|
||||||
pub(crate) mod compression;
|
pub(crate) mod compression;
|
||||||
/// Postings module
|
/// Postings module
|
||||||
///
|
///
|
||||||
@@ -16,6 +17,8 @@ mod skip;
|
|||||||
mod stacker;
|
mod stacker;
|
||||||
mod term_info;
|
mod term_info;
|
||||||
|
|
||||||
|
pub(crate) use self::block_search::BlockSearcher;
|
||||||
|
|
||||||
pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
|
pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
|
||||||
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
||||||
|
|
||||||
@@ -52,13 +55,15 @@ pub mod tests {
|
|||||||
use fieldnorm::FieldNormReader;
|
use fieldnorm::FieldNormReader;
|
||||||
use indexer::operation::AddOperation;
|
use indexer::operation::AddOperation;
|
||||||
use indexer::SegmentWriter;
|
use indexer::SegmentWriter;
|
||||||
|
use merge_policy::NoMergePolicy;
|
||||||
use query::Scorer;
|
use query::Scorer;
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
use rand::{Rng, SeedableRng};
|
use rand::{Rng, SeedableRng};
|
||||||
use schema::Field;
|
|
||||||
use schema::IndexRecordOption;
|
|
||||||
use schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
|
use schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
|
||||||
|
use schema::{Field, TextOptions};
|
||||||
|
use schema::{IndexRecordOption, TextFieldIndexing};
|
||||||
use std::iter;
|
use std::iter;
|
||||||
|
use tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN};
|
||||||
use DocId;
|
use DocId;
|
||||||
use Score;
|
use Score;
|
||||||
|
|
||||||
@@ -104,9 +109,7 @@ pub mod tests {
|
|||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
|
let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
|
||||||
let term = Term::from_field_text(title, "abc");
|
let term = Term::from_field_text(title, "abc");
|
||||||
|
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||||
@@ -159,6 +162,52 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_drop_token_that_are_too_long() {
|
||||||
|
let ok_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN).collect();
|
||||||
|
let mut exceeding_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN + 1).collect();
|
||||||
|
exceeding_token_text.push_str(" hello");
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_options = TextOptions::default().set_indexing_options(
|
||||||
|
TextFieldIndexing::default()
|
||||||
|
.set_index_option(IndexRecordOption::WithFreqsAndPositions)
|
||||||
|
.set_tokenizer("simple_no_truncation"),
|
||||||
|
);
|
||||||
|
let text_field = schema_builder.add_text_field("text", text_options);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
index
|
||||||
|
.tokenizers()
|
||||||
|
.register("simple_no_truncation", SimpleTokenizer);
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
|
{
|
||||||
|
index_writer.add_document(doc!(text_field=>exceeding_token_text));
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
reader.reload().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
|
let inverted_index = segment_reader.inverted_index(text_field);
|
||||||
|
assert_eq!(inverted_index.terms().num_terms(), 1);
|
||||||
|
let mut bytes = vec![];
|
||||||
|
assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
|
||||||
|
assert_eq!(&bytes, b"hello");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
index_writer.add_document(doc!(text_field=>ok_token_text.clone()));
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
reader.reload().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let segment_reader = searcher.segment_reader(1u32);
|
||||||
|
let inverted_index = segment_reader.inverted_index(text_field);
|
||||||
|
assert_eq!(inverted_index.terms().num_terms(), 1);
|
||||||
|
let mut bytes = vec![];
|
||||||
|
assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
|
||||||
|
assert_eq!(&bytes[..], ok_token_text.as_bytes());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_position_and_fieldnorm1() {
|
pub fn test_position_and_fieldnorm1() {
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
|
|||||||
@@ -12,8 +12,8 @@ use std::io;
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::ops::DerefMut;
|
use std::ops::DerefMut;
|
||||||
use termdict::TermOrdinal;
|
use termdict::TermOrdinal;
|
||||||
use tokenizer::Token;
|
|
||||||
use tokenizer::TokenStream;
|
use tokenizer::TokenStream;
|
||||||
|
use tokenizer::{Token, MAX_TOKEN_LEN};
|
||||||
use DocId;
|
use DocId;
|
||||||
use Result;
|
use Result;
|
||||||
|
|
||||||
@@ -210,8 +210,18 @@ pub trait PostingsWriter {
|
|||||||
) -> u32 {
|
) -> u32 {
|
||||||
let mut term = Term::for_field(field);
|
let mut term = Term::for_field(field);
|
||||||
let mut sink = |token: &Token| {
|
let mut sink = |token: &Token| {
|
||||||
term.set_text(token.text.as_str());
|
// We skip all tokens with a len greater than u16.
|
||||||
self.subscribe(term_index, doc_id, token.position as u32, &term, heap);
|
if token.text.len() <= MAX_TOKEN_LEN {
|
||||||
|
term.set_text(token.text.as_str());
|
||||||
|
self.subscribe(term_index, doc_id, token.position as u32, &term, heap);
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \
|
||||||
|
MAX_TOKEN_LEN in the documentation for more information.",
|
||||||
|
token.text.len(),
|
||||||
|
MAX_TOKEN_LEN
|
||||||
|
);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
token_stream.process(&mut sink)
|
token_stream.process(&mut sink)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,9 +4,10 @@ use common::{BinarySerializable, VInt};
|
|||||||
use docset::{DocSet, SkipResult};
|
use docset::{DocSet, SkipResult};
|
||||||
use owned_read::OwnedRead;
|
use owned_read::OwnedRead;
|
||||||
use positions::PositionReader;
|
use positions::PositionReader;
|
||||||
use postings::compression::compressed_block_size;
|
use postings::compression::{compressed_block_size, AlignedBuffer};
|
||||||
use postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
|
use postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
|
||||||
use postings::serializer::PostingsSerializer;
|
use postings::serializer::PostingsSerializer;
|
||||||
|
use postings::BlockSearcher;
|
||||||
use postings::FreqReadingOption;
|
use postings::FreqReadingOption;
|
||||||
use postings::Postings;
|
use postings::Postings;
|
||||||
use postings::SkipReader;
|
use postings::SkipReader;
|
||||||
@@ -60,6 +61,7 @@ pub struct SegmentPostings {
|
|||||||
block_cursor: BlockSegmentPostings,
|
block_cursor: BlockSegmentPostings,
|
||||||
cur: usize,
|
cur: usize,
|
||||||
position_computer: Option<PositionComputer>,
|
position_computer: Option<PositionComputer>,
|
||||||
|
block_searcher: BlockSearcher,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentPostings {
|
impl SegmentPostings {
|
||||||
@@ -70,6 +72,7 @@ impl SegmentPostings {
|
|||||||
block_cursor: empty_block_cursor,
|
block_cursor: empty_block_cursor,
|
||||||
cur: COMPRESSION_BLOCK_SIZE,
|
cur: COMPRESSION_BLOCK_SIZE,
|
||||||
position_computer: None,
|
position_computer: None,
|
||||||
|
block_searcher: BlockSearcher::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -117,42 +120,33 @@ impl SegmentPostings {
|
|||||||
block_cursor: segment_block_postings,
|
block_cursor: segment_block_postings,
|
||||||
cur: COMPRESSION_BLOCK_SIZE, // cursor within the block
|
cur: COMPRESSION_BLOCK_SIZE, // cursor within the block
|
||||||
position_computer: positions_stream_opt.map(PositionComputer::new),
|
position_computer: positions_stream_opt.map(PositionComputer::new),
|
||||||
|
block_searcher: BlockSearcher::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn linear_search(arr: &[u32], target: u32) -> usize {
|
|
||||||
arr.iter().map(|&el| if el < target { 1 } else { 0 }).sum()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn exponential_search(arr: &[u32], target: u32) -> (usize, usize) {
|
|
||||||
let end = arr.len();
|
|
||||||
let mut begin = 0;
|
|
||||||
for &pivot in &[1, 3, 7, 15, 31, 63] {
|
|
||||||
if pivot >= end {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if arr[pivot] > target {
|
|
||||||
return (begin, pivot);
|
|
||||||
}
|
|
||||||
begin = pivot;
|
|
||||||
}
|
|
||||||
(begin, end)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Search the first index containing an element greater or equal to the target.
|
|
||||||
///
|
|
||||||
/// # Assumption
|
|
||||||
///
|
|
||||||
/// The array is assumed non empty.
|
|
||||||
/// The target is assumed greater or equal to the first element.
|
|
||||||
/// The target is assumed smaller or equal to the last element.
|
|
||||||
fn search_within_block(block_docs: &[u32], target: u32) -> usize {
|
|
||||||
let (start, end) = exponential_search(block_docs, target);
|
|
||||||
start + linear_search(&block_docs[start..end], target)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DocSet for SegmentPostings {
|
impl DocSet for SegmentPostings {
|
||||||
|
// goes to the next element.
|
||||||
|
// next needs to be called a first time to point to the correct element.
|
||||||
|
#[inline]
|
||||||
|
fn advance(&mut self) -> bool {
|
||||||
|
if self.position_computer.is_some() && self.cur < COMPRESSION_BLOCK_SIZE {
|
||||||
|
let term_freq = self.term_freq() as usize;
|
||||||
|
if let Some(position_computer) = self.position_computer.as_mut() {
|
||||||
|
position_computer.add_skip(term_freq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.cur += 1;
|
||||||
|
if self.cur >= self.block_cursor.block_len() {
|
||||||
|
self.cur = 0;
|
||||||
|
if !self.block_cursor.advance() {
|
||||||
|
self.cur = COMPRESSION_BLOCK_SIZE;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
if !self.advance() {
|
if !self.advance() {
|
||||||
return SkipResult::End;
|
return SkipResult::End;
|
||||||
@@ -175,7 +169,6 @@ impl DocSet for SegmentPostings {
|
|||||||
|
|
||||||
// skip blocks until one that might contain the target
|
// skip blocks until one that might contain the target
|
||||||
// check if we need to go to the next block
|
// check if we need to go to the next block
|
||||||
let need_positions = self.position_computer.is_some();
|
|
||||||
let mut sum_freqs_skipped: u32 = 0;
|
let mut sum_freqs_skipped: u32 = 0;
|
||||||
if !self
|
if !self
|
||||||
.block_cursor
|
.block_cursor
|
||||||
@@ -189,7 +182,7 @@ impl DocSet for SegmentPostings {
|
|||||||
// we are not in the right block.
|
// we are not in the right block.
|
||||||
//
|
//
|
||||||
// First compute all of the freqs skipped from the current block.
|
// First compute all of the freqs skipped from the current block.
|
||||||
if need_positions {
|
if self.position_computer.is_some() {
|
||||||
sum_freqs_skipped = self.block_cursor.freqs()[self.cur..].iter().sum();
|
sum_freqs_skipped = self.block_cursor.freqs()[self.cur..].iter().sum();
|
||||||
match self.block_cursor.skip_to(target) {
|
match self.block_cursor.skip_to(target) {
|
||||||
BlockSegmentPostingsSkipResult::Success(block_skip_freqs) => {
|
BlockSegmentPostingsSkipResult::Success(block_skip_freqs) => {
|
||||||
@@ -208,25 +201,21 @@ impl DocSet for SegmentPostings {
|
|||||||
self.cur = 0;
|
self.cur = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// we're in the right block now, start with an exponential search
|
let cur = self.cur;
|
||||||
let block_docs = self.block_cursor.docs();
|
|
||||||
let new_cur = self
|
|
||||||
.cur
|
|
||||||
.wrapping_add(search_within_block(&block_docs[self.cur..], target));
|
|
||||||
|
|
||||||
if need_positions {
|
// we're in the right block now, start with an exponential search
|
||||||
sum_freqs_skipped += self.block_cursor.freqs()[self.cur..new_cur]
|
let (output, len) = self.block_cursor.docs_aligned();
|
||||||
.iter()
|
let new_cur = self
|
||||||
.sum::<u32>();
|
.block_searcher
|
||||||
self.position_computer
|
.search_in_block(&output, len, cur, target);
|
||||||
.as_mut()
|
if let Some(position_computer) = self.position_computer.as_mut() {
|
||||||
.unwrap()
|
sum_freqs_skipped += self.block_cursor.freqs()[cur..new_cur].iter().sum::<u32>();
|
||||||
.add_skip(sum_freqs_skipped as usize);
|
position_computer.add_skip(sum_freqs_skipped as usize);
|
||||||
}
|
}
|
||||||
self.cur = new_cur;
|
self.cur = new_cur;
|
||||||
|
|
||||||
// `doc` is now the first element >= `target`
|
// `doc` is now the first element >= `target`
|
||||||
let doc = block_docs[new_cur];
|
let doc = output.0[new_cur];
|
||||||
debug_assert!(doc >= target);
|
debug_assert!(doc >= target);
|
||||||
if doc == target {
|
if doc == target {
|
||||||
SkipResult::Reached
|
SkipResult::Reached
|
||||||
@@ -235,40 +224,25 @@ impl DocSet for SegmentPostings {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// goes to the next element.
|
|
||||||
// next needs to be called a first time to point to the correct element.
|
|
||||||
#[inline]
|
|
||||||
fn advance(&mut self) -> bool {
|
|
||||||
if self.position_computer.is_some() {
|
|
||||||
let term_freq = self.term_freq() as usize;
|
|
||||||
self.position_computer.as_mut().unwrap().add_skip(term_freq);
|
|
||||||
}
|
|
||||||
self.cur += 1;
|
|
||||||
if self.cur >= self.block_cursor.block_len() {
|
|
||||||
self.cur = 0;
|
|
||||||
if !self.block_cursor.advance() {
|
|
||||||
self.cur = COMPRESSION_BLOCK_SIZE;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
fn size_hint(&self) -> u32 {
|
|
||||||
self.len() as u32
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the current document's `DocId`.
|
/// Return the current document's `DocId`.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Will panics if called without having called advance before.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
let docs = self.block_cursor.docs();
|
let docs = self.block_cursor.docs();
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
self.cur < docs.len(),
|
self.cur < docs.len(),
|
||||||
"Have you forgotten to call `.advance()` at least once before calling .doc()."
|
"Have you forgotten to call `.advance()` at least once before calling `.doc()` ."
|
||||||
);
|
);
|
||||||
docs[self.cur]
|
docs[self.cur]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn size_hint(&self) -> u32 {
|
||||||
|
self.len() as u32
|
||||||
|
}
|
||||||
|
|
||||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||||
// finish the current block
|
// finish the current block
|
||||||
if self.advance() {
|
if self.advance() {
|
||||||
@@ -292,17 +266,33 @@ impl HasLen for SegmentPostings {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Postings for SegmentPostings {
|
impl Postings for SegmentPostings {
|
||||||
|
/// Returns the frequency associated to the current document.
|
||||||
|
/// If the schema is set up so that no frequency have been encoded,
|
||||||
|
/// this method should always return 1.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Will panics if called without having called advance before.
|
||||||
fn term_freq(&self) -> u32 {
|
fn term_freq(&self) -> u32 {
|
||||||
|
debug_assert!(
|
||||||
|
// Here we do not use the len of `freqs()`
|
||||||
|
// because it is actually ok to request for the freq of doc
|
||||||
|
// even if no frequency were encoded for the field.
|
||||||
|
//
|
||||||
|
// In that case we hit the block just as if the frequency had been
|
||||||
|
// decoded. The block is simply prefilled by the value 1.
|
||||||
|
self.cur < COMPRESSION_BLOCK_SIZE,
|
||||||
|
"Have you forgotten to call `.advance()` at least once before calling \
|
||||||
|
`.term_freq()`."
|
||||||
|
);
|
||||||
self.block_cursor.freq(self.cur)
|
self.block_cursor.freq(self.cur)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn positions_with_offset(&mut self, offset: u32, output: &mut Vec<u32>) {
|
fn positions_with_offset(&mut self, offset: u32, output: &mut Vec<u32>) {
|
||||||
if self.position_computer.is_some() {
|
let term_freq = self.term_freq() as usize;
|
||||||
output.resize(self.term_freq() as usize, 0u32);
|
if let Some(position_comp) = self.position_computer.as_mut() {
|
||||||
self.position_computer
|
output.resize(term_freq, 0u32);
|
||||||
.as_mut()
|
position_comp.positions_with_offset(offset, &mut output[..]);
|
||||||
.unwrap()
|
|
||||||
.positions_with_offset(offset, &mut output[..])
|
|
||||||
} else {
|
} else {
|
||||||
output.clear();
|
output.clear();
|
||||||
}
|
}
|
||||||
@@ -424,6 +414,10 @@ impl BlockSegmentPostings {
|
|||||||
self.doc_decoder.output_array()
|
self.doc_decoder.output_array()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn docs_aligned(&self) -> (&AlignedBuffer, usize) {
|
||||||
|
self.doc_decoder.output_aligned()
|
||||||
|
}
|
||||||
|
|
||||||
/// Return the document at index `idx` of the block.
|
/// Return the document at index `idx` of the block.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn doc(&self, idx: usize) -> u32 {
|
pub fn doc(&self, idx: usize) -> u32 {
|
||||||
@@ -614,16 +608,13 @@ impl<'b> Streamer<'b> for BlockSegmentPostings {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::exponential_search;
|
|
||||||
use super::linear_search;
|
|
||||||
use super::search_within_block;
|
|
||||||
use super::BlockSegmentPostings;
|
use super::BlockSegmentPostings;
|
||||||
use super::BlockSegmentPostingsSkipResult;
|
use super::BlockSegmentPostingsSkipResult;
|
||||||
use super::SegmentPostings;
|
use super::SegmentPostings;
|
||||||
use common::HasLen;
|
use common::HasLen;
|
||||||
use core::Index;
|
use core::Index;
|
||||||
use docset::DocSet;
|
use docset::DocSet;
|
||||||
|
use postings::postings::Postings;
|
||||||
use schema::IndexRecordOption;
|
use schema::IndexRecordOption;
|
||||||
use schema::Schema;
|
use schema::Schema;
|
||||||
use schema::Term;
|
use schema::Term;
|
||||||
@@ -632,21 +623,6 @@ mod tests {
|
|||||||
use DocId;
|
use DocId;
|
||||||
use SkipResult;
|
use SkipResult;
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_linear_search() {
|
|
||||||
let len: usize = 50;
|
|
||||||
let arr: Vec<u32> = (0..len).map(|el| 1u32 + (el as u32) * 2).collect();
|
|
||||||
for target in 1..*arr.last().unwrap() {
|
|
||||||
let res = linear_search(&arr[..], target);
|
|
||||||
if res > 0 {
|
|
||||||
assert!(arr[res - 1] < target);
|
|
||||||
}
|
|
||||||
if res < len {
|
|
||||||
assert!(arr[res] >= target);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_empty_segment_postings() {
|
fn test_empty_segment_postings() {
|
||||||
let mut postings = SegmentPostings::empty();
|
let mut postings = SegmentPostings::empty();
|
||||||
@@ -655,6 +631,18 @@ mod tests {
|
|||||||
assert_eq!(postings.len(), 0);
|
assert_eq!(postings.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic(expected = "Have you forgotten to call `.advance()`")]
|
||||||
|
fn test_panic_if_doc_called_before_advance() {
|
||||||
|
SegmentPostings::empty().doc();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic(expected = "Have you forgotten to call `.advance()`")]
|
||||||
|
fn test_panic_if_freq_called_before_advance() {
|
||||||
|
SegmentPostings::empty().term_freq();
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_empty_block_segment_postings() {
|
fn test_empty_block_segment_postings() {
|
||||||
let mut postings = BlockSegmentPostings::empty();
|
let mut postings = BlockSegmentPostings::empty();
|
||||||
@@ -662,56 +650,6 @@ mod tests {
|
|||||||
assert_eq!(postings.doc_freq(), 0);
|
assert_eq!(postings.doc_freq(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn search_within_block_trivial_but_slow(block: &[u32], target: u32) -> usize {
|
|
||||||
block
|
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.enumerate()
|
|
||||||
.filter(|&(_, ref val)| *val >= target)
|
|
||||||
.next()
|
|
||||||
.unwrap()
|
|
||||||
.0
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_exponentiel_search() {
|
|
||||||
assert_eq!(exponential_search(&[1, 2], 0), (0, 1));
|
|
||||||
assert_eq!(exponential_search(&[1, 2], 1), (0, 1));
|
|
||||||
assert_eq!(
|
|
||||||
exponential_search(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 7),
|
|
||||||
(3, 7)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn util_test_search_within_block(block: &[u32], target: u32) {
|
|
||||||
assert_eq!(
|
|
||||||
search_within_block(block, target),
|
|
||||||
search_within_block_trivial_but_slow(block, target)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn util_test_search_within_block_all(block: &[u32]) {
|
|
||||||
use std::collections::HashSet;
|
|
||||||
let mut targets = HashSet::new();
|
|
||||||
for (i, val) in block.iter().cloned().enumerate() {
|
|
||||||
if i > 0 {
|
|
||||||
targets.insert(val - 1);
|
|
||||||
}
|
|
||||||
targets.insert(val);
|
|
||||||
}
|
|
||||||
for target in targets {
|
|
||||||
util_test_search_within_block(block, target);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_search_within_block() {
|
|
||||||
for len in 1u32..128u32 {
|
|
||||||
let v: Vec<u32> = (0..len).map(|i| i * 2).collect();
|
|
||||||
util_test_search_within_block_all(&v[..]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_block_segment_postings() {
|
fn test_block_segment_postings() {
|
||||||
let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>());
|
let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>());
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use termdict::{TermDictionaryBuilder, TermOrdinal};
|
|||||||
use DocId;
|
use DocId;
|
||||||
use Result;
|
use Result;
|
||||||
|
|
||||||
/// `PostingsSerializer` is in charge of serializing
|
/// `InvertedIndexSerializer` is in charge of serializing
|
||||||
/// postings on disk, in the
|
/// postings on disk, in the
|
||||||
/// * `.idx` (inverted index)
|
/// * `.idx` (inverted index)
|
||||||
/// * `.pos` (positions file)
|
/// * `.pos` (positions file)
|
||||||
@@ -54,7 +54,7 @@ pub struct InvertedIndexSerializer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl InvertedIndexSerializer {
|
impl InvertedIndexSerializer {
|
||||||
/// Open a new `PostingsSerializer` for the given segment
|
/// Open a new `InvertedIndexSerializer` for the given segment
|
||||||
fn create(
|
fn create(
|
||||||
terms_write: CompositeWrite<WritePtr>,
|
terms_write: CompositeWrite<WritePtr>,
|
||||||
postings_write: CompositeWrite<WritePtr>,
|
postings_write: CompositeWrite<WritePtr>,
|
||||||
@@ -175,7 +175,7 @@ impl<'a> FieldSerializer<'a> {
|
|||||||
let positions_idx = self
|
let positions_idx = self
|
||||||
.positions_serializer_opt
|
.positions_serializer_opt
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|positions_serializer| positions_serializer.positions_idx())
|
.map(PositionSerializer::positions_idx)
|
||||||
.unwrap_or(0u64);
|
.unwrap_or(0u64);
|
||||||
TermInfo {
|
TermInfo {
|
||||||
doc_freq: 0,
|
doc_freq: 0,
|
||||||
|
|||||||
@@ -205,4 +205,332 @@ mod tests {
|
|||||||
assert_eq!(score_docs(&boolean_query), vec![0.977973, 0.84699446]);
|
assert_eq!(score_docs(&boolean_query), vec![0.977973, 0.84699446]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
DoC 0
|
||||||
|
{
|
||||||
|
"_index": "test",
|
||||||
|
"_type": "_doc",
|
||||||
|
"_id": "0",
|
||||||
|
"matched": true,
|
||||||
|
"explanation": {
|
||||||
|
"value": 6.2610235,
|
||||||
|
"description": "max of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 6.1969156,
|
||||||
|
"description": "sum of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 6.1969156,
|
||||||
|
"description": "weight(text:оксана in 561) [PerFieldSimilarity], result of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 6.1969156,
|
||||||
|
"description": "score(freq=1.0), product of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 2.2,
|
||||||
|
"description": "boost",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 5.65998,
|
||||||
|
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5)) from:",
|
||||||
|
"details": [{
|
||||||
|
"value": 3,
|
||||||
|
"description": "n, number of documents containing term",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 1004,
|
||||||
|
"description": "N, total number of documents with field",
|
||||||
|
"details": []
|
||||||
|
}]
|
||||||
|
}, {
|
||||||
|
"value": 0.49766606,
|
||||||
|
"description": "tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:",
|
||||||
|
"details": [{
|
||||||
|
"value": 1.0,
|
||||||
|
"description": "freq, occurrences of term within document",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 1.2,
|
||||||
|
"description": "k1, term saturation parameter",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 0.75,
|
||||||
|
"description": "b, length normalization parameter",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 19.0,
|
||||||
|
"description": "dl, length of field",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 24.105577,
|
||||||
|
"description": "avgdl, average length of field",
|
||||||
|
"details": []
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}, {
|
||||||
|
"value": 6.2610235,
|
||||||
|
"description": "sum of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 6.2610235,
|
||||||
|
"description": "weight(title:оксана in 561) [PerFieldSimilarity], result of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 6.2610235,
|
||||||
|
"description": "score(freq=1.0), product of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 2.2,
|
||||||
|
"description": "boost",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 5.4086657,
|
||||||
|
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5)) from:",
|
||||||
|
"details": [{
|
||||||
|
"value": 4,
|
||||||
|
"description": "n, number of documents containing term",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 1004,
|
||||||
|
"description": "N, total number of documents with field",
|
||||||
|
"details": []
|
||||||
|
}]
|
||||||
|
}, {
|
||||||
|
"value": 0.52617776,
|
||||||
|
"description": "tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:",
|
||||||
|
"details": [{
|
||||||
|
"value": 1.0,
|
||||||
|
"description": "freq, occurrences of term within document",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 1.2,
|
||||||
|
"description": "k1, term saturation parameter",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 0.75,
|
||||||
|
"description": "b, length normalization parameter",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 4.0,
|
||||||
|
"description": "dl, length of field",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 5.99502,
|
||||||
|
"description": "avgdl, average length of field",
|
||||||
|
"details": []
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
doc 2
|
||||||
|
{
|
||||||
|
"_index": "test",
|
||||||
|
"_type": "_doc",
|
||||||
|
"_id": "2",
|
||||||
|
"matched": true,
|
||||||
|
"explanation": {
|
||||||
|
"value": 11.911896,
|
||||||
|
"description": "max of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 11.911896,
|
||||||
|
"description": "sum of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 5.4068284,
|
||||||
|
"description": "weight(title:оксана in 0) [PerFieldSimilarity], result of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 5.4068284,
|
||||||
|
"description": "score(freq=1.0), product of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 2.2,
|
||||||
|
"description": "boost",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 5.4086657,
|
||||||
|
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5)) from:",
|
||||||
|
"details": [{
|
||||||
|
"value": 4,
|
||||||
|
"description": "n, number of documents containing term",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 1004,
|
||||||
|
"description": "N, total number of documents with field",
|
||||||
|
"details": []
|
||||||
|
}]
|
||||||
|
}, {
|
||||||
|
"value": 0.45439103,
|
||||||
|
"description": "tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:",
|
||||||
|
"details": [{
|
||||||
|
"value": 1.0,
|
||||||
|
"description": "freq, occurrences of term within document",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 1.2,
|
||||||
|
"description": "k1, term saturation parameter",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 0.75,
|
||||||
|
"description": "b, length normalization parameter",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 6.0,
|
||||||
|
"description": "dl, length of field",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 5.99502,
|
||||||
|
"description": "avgdl, average length of field",
|
||||||
|
"details": []
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}, {
|
||||||
|
"value": 6.505067,
|
||||||
|
"description": "weight(title:лифенко in 0) [PerFieldSimilarity], result of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 6.505067,
|
||||||
|
"description": "score(freq=1.0), product of:",
|
||||||
|
"details": [{
|
||||||
|
"value": 2.2,
|
||||||
|
"description": "boost",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 6.5072775,
|
||||||
|
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5)) from:",
|
||||||
|
"details": [{
|
||||||
|
"value": 1,
|
||||||
|
"description": "n, number of documents containing term",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 1004,
|
||||||
|
"description": "N, total number of documents with field",
|
||||||
|
"details": []
|
||||||
|
}]
|
||||||
|
}, {
|
||||||
|
"value": 0.45439103,
|
||||||
|
"description": "tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:",
|
||||||
|
"details": [{
|
||||||
|
"value": 1.0,
|
||||||
|
"description": "freq, occurrences of term within document",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 1.2,
|
||||||
|
"description": "k1, term saturation parameter",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 0.75,
|
||||||
|
"description": "b, length normalization parameter",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 6.0,
|
||||||
|
"description": "dl, length of field",
|
||||||
|
"details": []
|
||||||
|
}, {
|
||||||
|
"value": 5.99502,
|
||||||
|
"description": "avgdl, average length of field",
|
||||||
|
"details": []
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
// motivated by #554
|
||||||
|
#[test]
|
||||||
|
fn test_bm25_several_fields() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let title = schema_builder.add_text_field("title", TEXT);
|
||||||
|
let text = schema_builder.add_text_field("text", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
// tf = 1 0
|
||||||
|
title => "Законы притяжения Оксана Кулакова",
|
||||||
|
// tf = 1 0
|
||||||
|
text => "Законы притяжения Оксана Кулакова] \n\nТема: Сексуальное искусство, Женственность\nТип товара: Запись вебинара (аудио)\nПродолжительность: 1,5 часа\n\nСсылка на вебинар:\n ",
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
// tf = 1 0
|
||||||
|
title => "Любимые русские пироги (Оксана Путан)",
|
||||||
|
// tf = 2 0
|
||||||
|
text => "http://i95.fastpic.ru/big/2017/0628/9a/615b9c8504d94a3893d7f496ac53539a.jpg \n\nОт издателя\nОксана Путан профессиональный повар, автор кулинарных книг и известный кулинарный блогер. Ее рецепты отличаются практичностью, доступностью и пользуются огромной популярностью в русскоязычном интернете. Это третья книга автора о самом вкусном и ароматном настоящих русских пирогах и выпечке!\nДаже новички на кухне легко готовят по ее рецептам. Оксана описывает процесс приготовления настолько подробно и понятно, что вам остается только наслаждаться готовкой и не тратить время на лишние усилия. Готовьте легко и просто!\n\nhttps://www.ozon.ru/context/detail/id/139872462/"
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
// tf = 1 1
|
||||||
|
title => "PDF Мастер Класс \"Морячок\" (Оксана Лифенко)",
|
||||||
|
// tf = 0 0
|
||||||
|
text => "https://i.ibb.co/pzvHrDN/I3d U T6 Gg TM.jpg\nhttps://i.ibb.co/NFrb6v6/N0ls Z9nwjb U.jpg\nВ описание входит штаны, кофта, берет, матросский воротник. Описание продается в формате PDF, состоит из 12 страниц формата А4 и может быть напечатано на любом принтере.\nОписание предназначено для кукол BJD RealPuki от FairyLand, но может подойти и другим подобным куклам. Также вы можете вязать этот наряд из обычной пряжи, и он подойдет для куколок побольше.\nhttps://vk.com/market 95724412?w=product 95724412_2212"
|
||||||
|
));
|
||||||
|
for _ in 0..1_000 {
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
title => "a b d e f g",
|
||||||
|
text => "maitre corbeau sur un arbre perche tenait dans son bec un fromage Maitre rnard par lodeur alleche lui tint a peu pres ce langage."
|
||||||
|
));
|
||||||
|
}
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![title, text]);
|
||||||
|
let query = query_parser
|
||||||
|
.parse_query("Оксана Лифенко")
|
||||||
|
.unwrap();
|
||||||
|
let weight = query.weight(&searcher, true).unwrap();
|
||||||
|
let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||||
|
// let mut scores = vec![];
|
||||||
|
// while
|
||||||
|
println!("=====|");
|
||||||
|
scorer.advance();
|
||||||
|
dbg!("scorer.score()");
|
||||||
|
assert!(false);
|
||||||
|
|
||||||
|
// scores.push(scorer.score());
|
||||||
|
// assert_eq!(scores, &[0.8017307, 0.72233325, 1.0300813]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// motivated by #554
|
||||||
|
#[test]
|
||||||
|
fn test_bm25_several_fields_bbb() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text = schema_builder.add_text_field("text", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text => "Законы притяжения Оксана Кулакова] \n\nТема: Сексуальное искусство, Женственность\nТип товара: Запись вебинара (аудио)\nПродолжительность: 1,5 часа\n\nСсылка на вебинар:\n ",
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text => "http://i95.fastpic.ru/big/2017/0628/9a/615b9c8504d94a3893d7f496ac53539a.jpg \n\nОт издателя\nОксана Путан профессиональный повар, автор кулинарных книг и известный кулинарный блогер. Ее рецепты отличаются практичностью, доступностью и пользуются огромной популярностью в русскоязычном интернете. Это третья книга автора о самом вкусном и ароматном настоящих русских пирогах и выпечке!\nДаже новички на кухне легко готовят по ее рецептам. Оксана описывает процесс приготовления настолько подробно и понятно, что вам остается только наслаждаться готовкой и не тратить время на лишние усилия. Готовьте легко и просто!\n\nhttps://www.ozon.ru/context/detail/id/139872462/"
|
||||||
|
));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text => "https://i.ibb.co/pzvHrDN/I3d U T6 Gg TM.jpg\nhttps://i.ibb.co/NFrb6v6/N0ls Z9nwjb U.jpg\nВ описание входит штаны, кофта, берет, матросский воротник. Описание продается в формате PDF, состоит из 12 страниц формата А4 и может быть напечатано на любом принтере.\nОписание предназначено для кукол BJD RealPuki от FairyLand, но может подойти и другим подобным куклам. Также вы можете вязать этот наряд из обычной пряжи, и он подойдет для куколок побольше.\nhttps://vk.com/market 95724412?w=product 95724412_2212"
|
||||||
|
));
|
||||||
|
for _ in 0..100 {
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text => "maitre corbeau sur un arbre perche tenait dans son bec un fromage Maitre rnard par lodeur alleche lui tint a peu pres ce langage."
|
||||||
|
));
|
||||||
|
}
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![text]);
|
||||||
|
let query = query_parser
|
||||||
|
.parse_query("Оксана Лифенко")
|
||||||
|
.unwrap();
|
||||||
|
let weight = query.weight(&searcher, true).unwrap();
|
||||||
|
let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||||
|
let mut scores = vec![];
|
||||||
|
while scorer.advance() {
|
||||||
|
scores.push(scorer.score());
|
||||||
|
}
|
||||||
|
assert_eq!(scores, &[0.8017307, 0.72233325, 1.0300813]);
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,41 +14,35 @@ use Score;
|
|||||||
/// specialized implementation if the two
|
/// specialized implementation if the two
|
||||||
/// shortest scorers are `TermScorer`s.
|
/// shortest scorers are `TermScorer`s.
|
||||||
pub fn intersect_scorers(mut scorers: Vec<Box<Scorer>>) -> Box<Scorer> {
|
pub fn intersect_scorers(mut scorers: Vec<Box<Scorer>>) -> Box<Scorer> {
|
||||||
|
if scorers.is_empty() {
|
||||||
|
return Box::new(EmptyScorer);
|
||||||
|
}
|
||||||
|
if scorers.len() == 1 {
|
||||||
|
return scorers.pop().unwrap();
|
||||||
|
}
|
||||||
|
// We know that we have at least 2 elements.
|
||||||
let num_docsets = scorers.len();
|
let num_docsets = scorers.len();
|
||||||
scorers.sort_by(|left, right| right.size_hint().cmp(&left.size_hint()));
|
scorers.sort_by(|left, right| right.size_hint().cmp(&left.size_hint()));
|
||||||
let rarest_opt = scorers.pop();
|
let left = scorers.pop().unwrap();
|
||||||
let second_rarest_opt = scorers.pop();
|
let right = scorers.pop().unwrap();
|
||||||
scorers.reverse();
|
scorers.reverse();
|
||||||
match (rarest_opt, second_rarest_opt) {
|
let all_term_scorers = [&left, &right]
|
||||||
(None, None) => Box::new(EmptyScorer),
|
.iter()
|
||||||
(Some(single_docset), None) => single_docset,
|
.all(|&scorer| scorer.is::<TermScorer>());
|
||||||
(Some(left), Some(right)) => {
|
if all_term_scorers {
|
||||||
{
|
return Box::new(Intersection {
|
||||||
let all_term_scorers = [&left, &right]
|
left: *(left.downcast::<TermScorer>().map_err(|_| ()).unwrap()),
|
||||||
.iter()
|
right: *(right.downcast::<TermScorer>().map_err(|_| ()).unwrap()),
|
||||||
.all(|&scorer| scorer.is::<TermScorer>());
|
others: scorers,
|
||||||
if all_term_scorers {
|
num_docsets,
|
||||||
let left = *(left.downcast::<TermScorer>().map_err(|_| ()).unwrap());
|
});
|
||||||
let right = *(right.downcast::<TermScorer>().map_err(|_| ()).unwrap());
|
|
||||||
return Box::new(Intersection {
|
|
||||||
left,
|
|
||||||
right,
|
|
||||||
others: scorers,
|
|
||||||
num_docsets,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Box::new(Intersection {
|
|
||||||
left,
|
|
||||||
right,
|
|
||||||
others: scorers,
|
|
||||||
num_docsets,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
unreachable!();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
Box::new(Intersection {
|
||||||
|
left,
|
||||||
|
right,
|
||||||
|
others: scorers,
|
||||||
|
num_docsets,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
|
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
|
||||||
@@ -124,7 +118,6 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match left.skip_next(candidate) {
|
match left.skip_next(candidate) {
|
||||||
SkipResult::Reached => {
|
SkipResult::Reached => {
|
||||||
break;
|
break;
|
||||||
@@ -140,35 +133,36 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
|
|||||||
}
|
}
|
||||||
// test the remaining scorers;
|
// test the remaining scorers;
|
||||||
for (ord, docset) in self.others.iter_mut().enumerate() {
|
for (ord, docset) in self.others.iter_mut().enumerate() {
|
||||||
if ord != other_candidate_ord {
|
if ord == other_candidate_ord {
|
||||||
// `candidate_ord` is already at the
|
continue;
|
||||||
// right position.
|
}
|
||||||
//
|
// `candidate_ord` is already at the
|
||||||
// Calling `skip_next` would advance this docset
|
// right position.
|
||||||
// and miss it.
|
//
|
||||||
match docset.skip_next(candidate) {
|
// Calling `skip_next` would advance this docset
|
||||||
SkipResult::Reached => {}
|
// and miss it.
|
||||||
SkipResult::OverStep => {
|
match docset.skip_next(candidate) {
|
||||||
// this is not in the intersection,
|
SkipResult::Reached => {}
|
||||||
// let's update our candidate.
|
SkipResult::OverStep => {
|
||||||
candidate = docset.doc();
|
// this is not in the intersection,
|
||||||
match left.skip_next(candidate) {
|
// let's update our candidate.
|
||||||
SkipResult::Reached => {
|
candidate = docset.doc();
|
||||||
other_candidate_ord = ord;
|
match left.skip_next(candidate) {
|
||||||
}
|
SkipResult::Reached => {
|
||||||
SkipResult::OverStep => {
|
other_candidate_ord = ord;
|
||||||
candidate = left.doc();
|
}
|
||||||
other_candidate_ord = usize::max_value();
|
SkipResult::OverStep => {
|
||||||
}
|
candidate = left.doc();
|
||||||
SkipResult::End => {
|
other_candidate_ord = usize::max_value();
|
||||||
return false;
|
}
|
||||||
}
|
SkipResult::End => {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
continue 'outer;
|
|
||||||
}
|
|
||||||
SkipResult::End => {
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
continue 'outer;
|
||||||
|
}
|
||||||
|
SkipResult::End => {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ use error::TantivyError;
|
|||||||
use query::bm25::BM25Weight;
|
use query::bm25::BM25Weight;
|
||||||
use query::Query;
|
use query::Query;
|
||||||
use query::Weight;
|
use query::Weight;
|
||||||
|
use schema::IndexRecordOption;
|
||||||
use schema::{Field, Term};
|
use schema::{Field, Term};
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use Result;
|
use Result;
|
||||||
@@ -83,7 +84,7 @@ impl Query for PhraseQuery {
|
|||||||
let has_positions = field_entry
|
let has_positions = field_entry
|
||||||
.field_type()
|
.field_type()
|
||||||
.get_index_record_option()
|
.get_index_record_option()
|
||||||
.map(|index_record_option| index_record_option.has_positions())
|
.map(IndexRecordOption::has_positions)
|
||||||
.unwrap_or(false);
|
.unwrap_or(false);
|
||||||
if !has_positions {
|
if !has_positions {
|
||||||
let field_name = field_entry.name();
|
let field_name = field_entry.name();
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
#![cfg_attr(feature = "cargo-clippy", allow(clippy::unneeded_field_pattern))]
|
#![cfg_attr(feature = "cargo-clippy", allow(clippy::unneeded_field_pattern))]
|
||||||
#![cfg_attr(feature = "cargo-clippy", allow(clippy::toplevel_ref_arg))]
|
#![cfg_attr(feature = "cargo-clippy", allow(clippy::toplevel_ref_arg))]
|
||||||
|
|
||||||
|
use super::query_grammar;
|
||||||
use super::user_input_ast::*;
|
use super::user_input_ast::*;
|
||||||
use combine::char::*;
|
use combine::char::*;
|
||||||
use combine::error::StreamError;
|
use combine::error::StreamError;
|
||||||
@@ -22,7 +23,7 @@ parser! {
|
|||||||
parser! {
|
parser! {
|
||||||
fn word[I]()(I) -> String
|
fn word[I]()(I) -> String
|
||||||
where [I: Stream<Item = char>] {
|
where [I: Stream<Item = char>] {
|
||||||
many1(satisfy(|c: char| c.is_alphanumeric()))
|
many1(satisfy(char::is_alphanumeric))
|
||||||
.and_then(|s: String| {
|
.and_then(|s: String| {
|
||||||
match s.as_str() {
|
match s.as_str() {
|
||||||
"OR" => Err(StreamErrorFor::<I>::unexpected_static_message("OR")),
|
"OR" => Err(StreamErrorFor::<I>::unexpected_static_message("OR")),
|
||||||
@@ -62,7 +63,7 @@ parser! {
|
|||||||
fn negative_number[I]()(I) -> String
|
fn negative_number[I]()(I) -> String
|
||||||
where [I: Stream<Item = char>]
|
where [I: Stream<Item = char>]
|
||||||
{
|
{
|
||||||
(char('-'), many1(satisfy(|c: char| c.is_numeric())))
|
(char('-'), many1(satisfy(char::is_numeric)))
|
||||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -184,7 +185,7 @@ parser! {
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
.map(|el| el.into_dnf())
|
.map(query_grammar::Element::into_dnf)
|
||||||
.map(|fnd| {
|
.map(|fnd| {
|
||||||
if fnd.len() == 1 {
|
if fnd.len() == 1 {
|
||||||
UserInputAST::and(fnd.into_iter().next().unwrap()) //< safe
|
UserInputAST::and(fnd.into_iter().next().unwrap()) //< safe
|
||||||
|
|||||||
@@ -98,4 +98,20 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_term_query_count_when_there_are_deletes() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 5_000_000).unwrap();
|
||||||
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
|
index_writer.add_document(doc!(text_field=>"a c"));
|
||||||
|
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
|
let term_query = TermQuery::new(term_a, IndexRecordOption::Basic);
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
assert_eq!(term_query.count(&*reader.searcher()).unwrap(), 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,15 +39,15 @@ impl Weight for TermWeight {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn count(&self, reader: &SegmentReader) -> Result<u32> {
|
fn count(&self, reader: &SegmentReader) -> Result<u32> {
|
||||||
if reader.num_deleted_docs() == 0 {
|
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||||
|
Ok(self.scorer(reader)?.count(delete_bitset))
|
||||||
|
} else {
|
||||||
let field = self.term.field();
|
let field = self.term.field();
|
||||||
Ok(reader
|
Ok(reader
|
||||||
.inverted_index(field)
|
.inverted_index(field)
|
||||||
.get_term_info(&self.term)
|
.get_term_info(&self.term)
|
||||||
.map(|term_info| term_info.doc_freq)
|
.map(|term_info| term_info.doc_freq)
|
||||||
.unwrap_or(0))
|
.unwrap_or(0))
|
||||||
} else {
|
|
||||||
Ok(self.scorer(reader)?.count())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ fn refill<TScorer: Scorer, TScoreCombiner: ScoreCombiner>(
|
|||||||
|
|
||||||
impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> Union<TScorer, TScoreCombiner> {
|
impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> Union<TScorer, TScoreCombiner> {
|
||||||
fn refill(&mut self) -> bool {
|
fn refill(&mut self) -> bool {
|
||||||
if let Some(min_doc) = self.docsets.iter_mut().map(|docset| docset.doc()).min() {
|
if let Some(min_doc) = self.docsets.iter().map(DocSet::doc).min() {
|
||||||
self.offset = min_doc;
|
self.offset = min_doc;
|
||||||
self.cursor = 0;
|
self.cursor = 0;
|
||||||
refill(
|
refill(
|
||||||
@@ -145,7 +145,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn count(&mut self) -> u32 {
|
fn count_including_deleted(&mut self) -> u32 {
|
||||||
let mut count = self.bitsets[self.cursor..HORIZON_NUM_TINYBITSETS]
|
let mut count = self.bitsets[self.cursor..HORIZON_NUM_TINYBITSETS]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|bitset| bitset.len())
|
.map(|bitset| bitset.len())
|
||||||
@@ -163,6 +163,8 @@ where
|
|||||||
count
|
count
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO implement `count` efficiently.
|
||||||
|
|
||||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
if !self.advance() {
|
if !self.advance() {
|
||||||
return SkipResult::End;
|
return SkipResult::End;
|
||||||
@@ -300,7 +302,7 @@ mod tests {
|
|||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
assert!(!union_expected.advance());
|
assert!(!union_expected.advance());
|
||||||
assert_eq!(count, make_union().count());
|
assert_eq!(count, make_union().count_including_deleted());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -13,6 +13,11 @@ pub trait Weight: Send + Sync + 'static {
|
|||||||
|
|
||||||
/// Returns the number documents within the given `SegmentReader`.
|
/// Returns the number documents within the given `SegmentReader`.
|
||||||
fn count(&self, reader: &SegmentReader) -> Result<u32> {
|
fn count(&self, reader: &SegmentReader) -> Result<u32> {
|
||||||
Ok(self.scorer(reader)?.count())
|
let mut scorer = self.scorer(reader)?;
|
||||||
|
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||||
|
Ok(scorer.count(delete_bitset))
|
||||||
|
} else {
|
||||||
|
Ok(scorer.count_including_deleted())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -128,7 +128,7 @@ impl Document {
|
|||||||
self.field_values
|
self.field_values
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|field_value| field_value.field() == field)
|
.filter(|field_value| field_value.field() == field)
|
||||||
.map(|field_value| field_value.value())
|
.map(FieldValue::value)
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -137,7 +137,7 @@ impl Document {
|
|||||||
self.field_values
|
self.field_values
|
||||||
.iter()
|
.iter()
|
||||||
.find(|field_value| field_value.field() == field)
|
.find(|field_value| field_value.field() == field)
|
||||||
.map(|field_value| field_value.value())
|
.map(FieldValue::value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ use schema::{IntOptions, TextOptions};
|
|||||||
|
|
||||||
use schema::Facet;
|
use schema::Facet;
|
||||||
use schema::IndexRecordOption;
|
use schema::IndexRecordOption;
|
||||||
|
use schema::TextFieldIndexing;
|
||||||
use schema::Value;
|
use schema::Value;
|
||||||
use serde_json::Value as JsonValue;
|
use serde_json::Value as JsonValue;
|
||||||
|
|
||||||
@@ -94,7 +95,7 @@ impl FieldType {
|
|||||||
match *self {
|
match *self {
|
||||||
FieldType::Str(ref text_options) => text_options
|
FieldType::Str(ref text_options) => text_options
|
||||||
.get_indexing_options()
|
.get_indexing_options()
|
||||||
.map(|indexing_options| indexing_options.index_option()),
|
.map(TextFieldIndexing::index_option),
|
||||||
FieldType::U64(ref int_options)
|
FieldType::U64(ref int_options)
|
||||||
| FieldType::I64(ref int_options)
|
| FieldType::I64(ref int_options)
|
||||||
| FieldType::Date(ref int_options) => {
|
| FieldType::Date(ref int_options) => {
|
||||||
|
|||||||
@@ -130,7 +130,16 @@ impl SchemaBuilder {
|
|||||||
self.add_field(field_entry)
|
self.add_field(field_entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds a fast bytes field to the schema
|
/// Adds a fast bytes field to the schema.
|
||||||
|
///
|
||||||
|
/// Bytes field are not searchable and are only used
|
||||||
|
/// as fast field, to associate any kind of payload
|
||||||
|
/// to a document.
|
||||||
|
///
|
||||||
|
/// For instance, learning-to-rank often requires to access
|
||||||
|
/// some document features at scoring time.
|
||||||
|
/// These can be serializing and stored as a bytes field to
|
||||||
|
/// get access rapidly when scoring each document.
|
||||||
pub fn add_bytes_field(&mut self, field_name: &str) -> Field {
|
pub fn add_bytes_field(&mut self, field_name: &str) -> Field {
|
||||||
let field_entry = FieldEntry::new_bytes(field_name.to_string());
|
let field_entry = FieldEntry::new_bytes(field_name.to_string());
|
||||||
self.add_field(field_entry)
|
self.add_field(field_entry)
|
||||||
@@ -224,7 +233,7 @@ impl Schema {
|
|||||||
let field_name = self.get_field_name(field);
|
let field_name = self.get_field_name(field);
|
||||||
let values: Vec<Value> = field_values
|
let values: Vec<Value> = field_values
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|field_val| field_val.value())
|
.map(FieldValue::value)
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect();
|
.collect();
|
||||||
field_map.insert(field_name.to_string(), values);
|
field_map.insert(field_name.to_string(), values);
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use htmlescape::encode_minimal;
|
use htmlescape::encode_minimal;
|
||||||
use query::Query;
|
use query::Query;
|
||||||
use schema::Field;
|
use schema::Field;
|
||||||
|
use schema::Value;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
@@ -303,7 +304,7 @@ impl SnippetGenerator {
|
|||||||
let text: String = doc
|
let text: String = doc
|
||||||
.get_all(self.field)
|
.get_all(self.field)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.flat_map(|val| val.text())
|
.flat_map(Value::text)
|
||||||
.collect::<Vec<&str>>()
|
.collect::<Vec<&str>>()
|
||||||
.join(" ");
|
.join(" ");
|
||||||
self.snippet(&text)
|
self.snippet(&text)
|
||||||
|
|||||||
@@ -227,7 +227,7 @@ pub struct PerFieldSpaceUsage {
|
|||||||
|
|
||||||
impl PerFieldSpaceUsage {
|
impl PerFieldSpaceUsage {
|
||||||
pub(crate) fn new(fields: HashMap<Field, FieldUsage>) -> PerFieldSpaceUsage {
|
pub(crate) fn new(fields: HashMap<Field, FieldUsage>) -> PerFieldSpaceUsage {
|
||||||
let total = fields.values().map(|x| x.total()).sum();
|
let total = fields.values().map(FieldUsage::total).sum();
|
||||||
PerFieldSpaceUsage { fields, total }
|
PerFieldSpaceUsage { fields, total }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
4064
src/tokenizer/ascii_folding_filter.rs
Normal file
4064
src/tokenizer/ascii_folding_filter.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -44,18 +44,17 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
if self.tail.advance() {
|
if !self.tail.advance() {
|
||||||
if self.token_mut().text.is_ascii() {
|
return false;
|
||||||
// fast track for ascii.
|
|
||||||
self.token_mut().text.make_ascii_lowercase();
|
|
||||||
} else {
|
|
||||||
to_lowercase_unicode(&mut self.tail.token_mut().text, &mut self.buffer);
|
|
||||||
mem::swap(&mut self.tail.token_mut().text, &mut self.buffer);
|
|
||||||
}
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
}
|
||||||
|
if self.token_mut().text.is_ascii() {
|
||||||
|
// fast track for ascii.
|
||||||
|
self.token_mut().text.make_ascii_lowercase();
|
||||||
|
} else {
|
||||||
|
to_lowercase_unicode(&mut self.tail.token_mut().text, &mut self.buffer);
|
||||||
|
mem::swap(&mut self.tail.token_mut().text, &mut self.buffer);
|
||||||
|
}
|
||||||
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -97,6 +97,8 @@
|
|||||||
//! If you built your schema programmatically, a complete example
|
//! If you built your schema programmatically, a complete example
|
||||||
//! could like this for instance.
|
//! could like this for instance.
|
||||||
//!
|
//!
|
||||||
|
//! Note that tokens with a len greater or equal to [`MAX_TOKEN_LEN`](./constant.MAX_TOKEN_LEN.html).
|
||||||
|
//!
|
||||||
//! # Example
|
//! # Example
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
@@ -129,6 +131,7 @@
|
|||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
mod alphanum_only;
|
mod alphanum_only;
|
||||||
|
mod ascii_folding_filter;
|
||||||
mod facet_tokenizer;
|
mod facet_tokenizer;
|
||||||
mod lower_caser;
|
mod lower_caser;
|
||||||
mod ngram_tokenizer;
|
mod ngram_tokenizer;
|
||||||
@@ -142,6 +145,7 @@ mod tokenizer;
|
|||||||
mod tokenizer_manager;
|
mod tokenizer_manager;
|
||||||
|
|
||||||
pub use self::alphanum_only::AlphaNumOnlyFilter;
|
pub use self::alphanum_only::AlphaNumOnlyFilter;
|
||||||
|
pub use self::ascii_folding_filter::AsciiFoldingFilter;
|
||||||
pub use self::facet_tokenizer::FacetTokenizer;
|
pub use self::facet_tokenizer::FacetTokenizer;
|
||||||
pub use self::lower_caser::LowerCaser;
|
pub use self::lower_caser::LowerCaser;
|
||||||
pub use self::ngram_tokenizer::NgramTokenizer;
|
pub use self::ngram_tokenizer::NgramTokenizer;
|
||||||
@@ -157,6 +161,13 @@ pub use self::tokenizer::BoxedTokenizer;
|
|||||||
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
|
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
|
||||||
pub use self::tokenizer_manager::TokenizerManager;
|
pub use self::tokenizer_manager::TokenizerManager;
|
||||||
|
|
||||||
|
/// Maximum authorized len (in bytes) for a token.
|
||||||
|
///
|
||||||
|
/// Tokenizer are in charge of not emitting tokens larger than this value.
|
||||||
|
/// Currently, if a faulty tokenizer implementation emits tokens with a length larger than
|
||||||
|
/// `2^16 - 1 - 4`, the token will simply be ignored downstream.
|
||||||
|
pub const MAX_TOKEN_LEN: usize = u16::max_value() as usize - 4;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use super::{
|
use super::{
|
||||||
@@ -228,27 +239,27 @@ pub mod tests {
|
|||||||
fn test_non_en_tokenizer() {
|
fn test_non_en_tokenizer() {
|
||||||
let tokenizer_manager = TokenizerManager::default();
|
let tokenizer_manager = TokenizerManager::default();
|
||||||
tokenizer_manager.register(
|
tokenizer_manager.register(
|
||||||
"es_stem",
|
"el_stem",
|
||||||
SimpleTokenizer
|
SimpleTokenizer
|
||||||
.filter(RemoveLongFilter::limit(40))
|
.filter(RemoveLongFilter::limit(40))
|
||||||
.filter(LowerCaser)
|
.filter(LowerCaser)
|
||||||
.filter(Stemmer::new(Language::Spanish)),
|
.filter(Stemmer::new(Language::Greek)),
|
||||||
);
|
);
|
||||||
let en_tokenizer = tokenizer_manager.get("es_stem").unwrap();
|
let en_tokenizer = tokenizer_manager.get("el_stem").unwrap();
|
||||||
let mut tokens: Vec<Token> = vec![];
|
let mut tokens: Vec<Token> = vec![];
|
||||||
{
|
{
|
||||||
let mut add_token = |token: &Token| {
|
let mut add_token = |token: &Token| {
|
||||||
tokens.push(token.clone());
|
tokens.push(token.clone());
|
||||||
};
|
};
|
||||||
en_tokenizer
|
en_tokenizer
|
||||||
.token_stream("Hola, feliz contribuyente!")
|
.token_stream("Καλημέρα, χαρούμενε φορολογούμενε!")
|
||||||
.process(&mut add_token);
|
.process(&mut add_token);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(tokens.len(), 3);
|
assert_eq!(tokens.len(), 3);
|
||||||
assert_token(&tokens[0], 0, "hola", 0, 4);
|
assert_token(&tokens[0], 0, "καλημερ", 0, 16);
|
||||||
assert_token(&tokens[1], 1, "feliz", 6, 11);
|
assert_token(&tokens[1], 1, "χαρουμεν", 18, 36);
|
||||||
assert_token(&tokens[2], 2, "contribuyent", 12, 25);
|
assert_token(&tokens[2], 2, "φορολογουμεν", 37, 63);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -29,12 +29,9 @@ impl<'a> Tokenizer<'a> for RawTokenizer {
|
|||||||
|
|
||||||
impl TokenStream for RawTokenStream {
|
impl TokenStream for RawTokenStream {
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
if self.has_token {
|
let result = self.has_token;
|
||||||
self.has_token = false;
|
self.has_token = false;
|
||||||
true
|
result
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn token(&self) -> &Token {
|
fn token(&self) -> &Token {
|
||||||
|
|||||||
@@ -91,7 +91,6 @@ where
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,23 +38,16 @@ impl<'a> TokenStream for SimpleTokenStream<'a> {
|
|||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
self.token.text.clear();
|
self.token.text.clear();
|
||||||
self.token.position = self.token.position.wrapping_add(1);
|
self.token.position = self.token.position.wrapping_add(1);
|
||||||
|
while let Some((offset_from, c)) = self.chars.next() {
|
||||||
loop {
|
if c.is_alphanumeric() {
|
||||||
match self.chars.next() {
|
let offset_to = self.search_token_end();
|
||||||
Some((offset_from, c)) => {
|
self.token.offset_from = offset_from;
|
||||||
if c.is_alphanumeric() {
|
self.token.offset_to = offset_to;
|
||||||
let offset_to = self.search_token_end();
|
self.token.text.push_str(&self.text[offset_from..offset_to]);
|
||||||
self.token.offset_from = offset_from;
|
return true;
|
||||||
self.token.offset_to = offset_to;
|
|
||||||
self.token.text.push_str(&self.text[offset_from..offset_to]);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn token(&self) -> &Token {
|
fn token(&self) -> &Token {
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
use super::{Token, TokenFilter, TokenStream};
|
use super::{Token, TokenFilter, TokenStream};
|
||||||
use rust_stemmers::{self, Algorithm};
|
use rust_stemmers::{self, Algorithm};
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
/// Available stemmer languages.
|
/// Available stemmer languages.
|
||||||
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Copy, Clone)]
|
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Copy, Clone)]
|
||||||
@@ -57,14 +56,14 @@ impl Language {
|
|||||||
/// Tokens are expected to be lowercased beforehand.
|
/// Tokens are expected to be lowercased beforehand.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Stemmer {
|
pub struct Stemmer {
|
||||||
stemmer_algorithm: Arc<Algorithm>,
|
stemmer_algorithm: Algorithm,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Stemmer {
|
impl Stemmer {
|
||||||
/// Creates a new Stemmer `TokenFilter` for a given language algorithm.
|
/// Creates a new Stemmer `TokenFilter` for a given language algorithm.
|
||||||
pub fn new(language: Language) -> Stemmer {
|
pub fn new(language: Language) -> Stemmer {
|
||||||
Stemmer {
|
Stemmer {
|
||||||
stemmer_algorithm: Arc::new(language.algorithm()),
|
stemmer_algorithm: language.algorithm(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -83,7 +82,7 @@ where
|
|||||||
type ResultTokenStream = StemmerTokenStream<TailTokenStream>;
|
type ResultTokenStream = StemmerTokenStream<TailTokenStream>;
|
||||||
|
|
||||||
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||||
let inner_stemmer = rust_stemmers::Stemmer::create(Algorithm::English);
|
let inner_stemmer = rust_stemmers::Stemmer::create(self.stemmer_algorithm);
|
||||||
StemmerTokenStream::wrap(inner_stemmer, token_stream)
|
StemmerTokenStream::wrap(inner_stemmer, token_stream)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -109,15 +108,14 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
if self.tail.advance() {
|
if !self.tail.advance() {
|
||||||
// TODO remove allocation
|
return false;
|
||||||
let stemmed_str: String = self.stemmer.stem(&self.token().text).into_owned();
|
|
||||||
self.token_mut().text.clear();
|
|
||||||
self.token_mut().text.push_str(&stemmed_str);
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
}
|
||||||
|
// TODO remove allocation
|
||||||
|
let stemmed_str: String = self.stemmer.stem(&self.token().text).into_owned();
|
||||||
|
self.token_mut().text.clear();
|
||||||
|
self.token_mut().text.push_str(&stemmed_str);
|
||||||
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -104,7 +104,6 @@ where
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::ops::Deref;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use tokenizer::box_tokenizer;
|
use tokenizer::box_tokenizer;
|
||||||
use tokenizer::stemmer::Language;
|
use tokenizer::stemmer::Language;
|
||||||
@@ -46,7 +47,8 @@ impl TokenizerManager {
|
|||||||
.read()
|
.read()
|
||||||
.expect("Acquiring the lock should never fail")
|
.expect("Acquiring the lock should never fail")
|
||||||
.get(tokenizer_name)
|
.get(tokenizer_name)
|
||||||
.map(|boxed_tokenizer| boxed_tokenizer.boxed_clone())
|
.map(Deref::deref)
|
||||||
|
.map(BoxedTokenizer::boxed_clone)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user