mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-29 21:42:55 +00:00
Compare commits
31 Commits
limit-rand
...
issue/922
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7cb018c640 | ||
|
|
6b5a5ac1d0 | ||
|
|
581c2bb718 | ||
|
|
3d192c0f57 | ||
|
|
9dc36f4431 | ||
|
|
730ccefffb | ||
|
|
2c56f4b583 | ||
|
|
9e27da8b4e | ||
|
|
7f373f232a | ||
|
|
6f0487979c | ||
|
|
71c66a5405 | ||
|
|
2eb5326aa4 | ||
|
|
91e92fa8a3 | ||
|
|
9cc1661ce2 | ||
|
|
c3f44d38f3 | ||
|
|
01b4aa9adc | ||
|
|
7a78b1cba3 | ||
|
|
4d011cc648 | ||
|
|
80cbe889ba | ||
|
|
c23a03ad81 | ||
|
|
579e3d1ed8 | ||
|
|
687a36a49c | ||
|
|
ad82b455a3 | ||
|
|
848afa43ee | ||
|
|
7720d21265 | ||
|
|
96f946d4c3 | ||
|
|
3432149759 | ||
|
|
392221e36a | ||
|
|
674cae8ee2 | ||
|
|
838c476733 | ||
|
|
5f574348d1 |
12
CHANGELOG.md
12
CHANGELOG.md
@@ -1,6 +1,18 @@
|
||||
Tantivy 0.14.0
|
||||
=========================
|
||||
- Remove dependency to atomicwrites #833 .Implemented by @pmasurel upon suggestion and research from @asafigan).
|
||||
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
|
||||
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
|
||||
- Large API Change in the Directory API. Tantivy used to assume that all files could be somehow memory mapped. After this change, Directory return a `FileSlice` that can be reduced and eventually read into an `OwnedBytes` object. Long and blocking io operation are still required by they do not span over the entire file.
|
||||
- Added support for Brotli compression in the DocStore. (@ppodolsky)
|
||||
- Added helper for building intersections and unions in BooleanQuery (@guilload)
|
||||
- Bugfix in `Query::explain`
|
||||
- Making it possible to opt out the generation of fieldnorms information for indexed fields. This change breaks compatibility as the meta.json file format is slightly changed. (#922, @pmasurel)
|
||||
|
||||
Tantivy 0.13.2
|
||||
===================
|
||||
Bugfix. Acquiring a facet reader on a segment that does not contain any
|
||||
doc with this facet returns `None`. (#896)
|
||||
|
||||
Tantivy 0.13.1
|
||||
===================
|
||||
|
||||
10
Cargo.toml
10
Cargo.toml
@@ -13,7 +13,7 @@ keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.12"
|
||||
base64 = "0.13"
|
||||
byteorder = "1"
|
||||
crc32fast = "1"
|
||||
once_cell = "1"
|
||||
@@ -21,6 +21,7 @@ regex ={version = "1", default-features = false, features = ["std"]}
|
||||
tantivy-fst = "0.3"
|
||||
memmap = {version = "0.7", optional=true}
|
||||
lz4 = {version="1", optional=true}
|
||||
brotli = {version="3.3.0", optional=true}
|
||||
snap = "1"
|
||||
tempfile = {version="3", optional=true}
|
||||
log = "0.4"
|
||||
@@ -31,9 +32,8 @@ fs2={version="0.4", optional=true}
|
||||
levenshtein_automata = "0.2"
|
||||
notify = {version="4", optional=true}
|
||||
uuid = { version = "0.8", features = ["v4", "serde"] }
|
||||
crossbeam = "0.7"
|
||||
crossbeam = "0.8"
|
||||
futures = {version = "0.3", features=["thread-pool"] }
|
||||
owning_ref = "0.4"
|
||||
tantivy-query-grammar = { version="0.14.0-dev", path="./query-grammar" }
|
||||
stable_deref_trait = "1"
|
||||
rust-stemmers = "1"
|
||||
@@ -41,8 +41,7 @@ downcast-rs = "1"
|
||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||
census = "0.4"
|
||||
fnv = "1"
|
||||
owned-read = "0.4"
|
||||
failure = "0.1"
|
||||
thiserror = "1.0"
|
||||
htmlescape = "0.3"
|
||||
fail = "0.4"
|
||||
murmurhash32 = "0.2"
|
||||
@@ -75,6 +74,7 @@ overflow-checks = true
|
||||
[features]
|
||||
default = ["mmap"]
|
||||
mmap = ["fs2", "tempfile", "memmap", "notify"]
|
||||
brotli-compression = ["brotli"]
|
||||
lz4-compression = ["lz4"]
|
||||
failpoints = ["fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
|
||||
[](https://crates.io/crates/tantivy)
|
||||
[](https://saythanks.io/to/fulmicoton)
|
||||
|
||||

|
||||
|
||||
@@ -85,7 +84,7 @@ There are many ways to support this project.
|
||||
- Help with documentation by asking questions or submitting PRs
|
||||
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
|
||||
- Talk about Tantivy around you
|
||||
- Drop a word on on [](https://saythanks.io/to/fulmicoton) or even [](https://www.patreon.com/fulmicoton)
|
||||
- [](https://www.patreon.com/fulmicoton)
|
||||
|
||||
# Contributing code
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ fn main() -> tantivy::Result<()> {
|
||||
);
|
||||
let top_docs_by_custom_score =
|
||||
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
||||
let mut ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
||||
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
||||
let facet_dict = ingredient_reader.facet_dict();
|
||||
|
||||
let query_ords: HashSet<u64> = facets
|
||||
|
||||
@@ -45,7 +45,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
// - the inverted lists associated to each terms and their positions
|
||||
let inverted_index = segment_reader.inverted_index(title);
|
||||
let inverted_index = segment_reader.inverted_index(title)?;
|
||||
|
||||
// A `Term` is a text token associated with a field.
|
||||
// Let's go through all docs containing the term `title:the` and access their position
|
||||
@@ -58,7 +58,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// If you don't need all this information, you may get better performance by decompressing less
|
||||
// information.
|
||||
if let Some(mut segment_postings) =
|
||||
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)
|
||||
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)?
|
||||
{
|
||||
// this buffer will be used to request for positions
|
||||
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
||||
@@ -106,7 +106,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
// - the inverted lists associated to each terms and their positions
|
||||
let inverted_index = segment_reader.inverted_index(title);
|
||||
let inverted_index = segment_reader.inverted_index(title)?;
|
||||
|
||||
// This segment posting object is like a cursor over the documents matching the term.
|
||||
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
|
||||
@@ -115,7 +115,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// If you don't need all this information, you may get better performance by decompressing less
|
||||
// information.
|
||||
if let Some(mut block_segment_postings) =
|
||||
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
|
||||
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)?
|
||||
{
|
||||
loop {
|
||||
let docs = block_segment_postings.docs();
|
||||
|
||||
@@ -58,10 +58,10 @@ where
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
|
||||
let segment_collector = self
|
||||
.collector
|
||||
.for_segment(segment_local_id, segment_reader)?;
|
||||
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
|
||||
Ok(CustomScoreTopSegmentCollector {
|
||||
segment_collector,
|
||||
segment_scorer,
|
||||
|
||||
61
src/collector/docset_collector.rs
Normal file
61
src/collector/docset_collector.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use crate::{DocAddress, DocId, Score};
|
||||
|
||||
use super::{Collector, SegmentCollector};
|
||||
|
||||
/// Collectors that returns the set of DocAddress that matches the query.
|
||||
///
|
||||
/// This collector is mostly useful for tests.
|
||||
pub struct DocSetCollector;
|
||||
|
||||
impl Collector for DocSetCollector {
|
||||
type Fruit = HashSet<DocAddress>;
|
||||
type Child = DocSetChildCollector;
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: crate::SegmentLocalId,
|
||||
_segment: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
Ok(DocSetChildCollector {
|
||||
segment_local_id,
|
||||
docs: HashSet::new(),
|
||||
})
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
segment_fruits: Vec<(u32, HashSet<DocId>)>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let len: usize = segment_fruits.iter().map(|(_, docset)| docset.len()).sum();
|
||||
let mut result = HashSet::with_capacity(len);
|
||||
for (segment_local_id, docs) in segment_fruits {
|
||||
for doc in docs {
|
||||
result.insert(DocAddress(segment_local_id, doc));
|
||||
}
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DocSetChildCollector {
|
||||
segment_local_id: u32,
|
||||
docs: HashSet<DocId>,
|
||||
}
|
||||
|
||||
impl SegmentCollector for DocSetChildCollector {
|
||||
type Fruit = (u32, HashSet<DocId>);
|
||||
|
||||
fn collect(&mut self, doc: crate::DocId, _score: Score) {
|
||||
self.docs.insert(doc);
|
||||
}
|
||||
|
||||
fn harvest(self) -> (u32, HashSet<DocId>) {
|
||||
(self.segment_local_id, self.docs)
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,6 @@ use crate::DocId;
|
||||
use crate::Score;
|
||||
use crate::SegmentLocalId;
|
||||
use crate::SegmentReader;
|
||||
use crate::TantivyError;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::btree_map;
|
||||
use std::collections::BTreeMap;
|
||||
@@ -266,10 +265,7 @@ impl Collector for FacetCollector {
|
||||
_: SegmentLocalId,
|
||||
reader: &SegmentReader,
|
||||
) -> crate::Result<FacetSegmentCollector> {
|
||||
let field_name = reader.schema().get_field_name(self.field);
|
||||
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
|
||||
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
|
||||
})?;
|
||||
let facet_reader = reader.facet_reader(self.field)?;
|
||||
|
||||
let mut collapse_mapping = Vec::new();
|
||||
let mut counts = Vec::new();
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
use std::cmp::Eq;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::Hash;
|
||||
|
||||
use collector::Collector;
|
||||
use fastfield::FastFieldReader;
|
||||
use schema::Field;
|
||||
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentReader;
|
||||
use SegmentLocalId;
|
||||
|
||||
|
||||
/// Facet collector for i64/u64 fast field
|
||||
pub struct IntFacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
counters: HashMap<T::ValueType, u64>,
|
||||
field: Field,
|
||||
ff_reader: Option<T>,
|
||||
}
|
||||
|
||||
|
||||
impl<T> IntFacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
/// Creates a new facet collector for aggregating a given field.
|
||||
pub fn new(field: Field) -> IntFacetCollector<T> {
|
||||
IntFacetCollector {
|
||||
counters: HashMap::new(),
|
||||
field: field,
|
||||
ff_reader: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<T> Collector for IntFacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
||||
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, doc: DocId, _: Score) {
|
||||
let val = self.ff_reader
|
||||
.as_ref()
|
||||
.expect(
|
||||
"collect() was called before set_segment. \
|
||||
This should never happen.",
|
||||
)
|
||||
.get(doc);
|
||||
*(self.counters.entry(val).or_insert(0)) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use collector::{chain, IntFacetCollector};
|
||||
use query::QueryParser;
|
||||
use fastfield::{I64FastFieldReader, U64FastFieldReader};
|
||||
use schema::{self, FAST, STRING};
|
||||
use Index;
|
||||
|
||||
#[test]
|
||||
// create 10 documents, set num field value to 0 or 1 for even/odd ones
|
||||
// make sure we have facet counters correctly filled
|
||||
fn test_facet_collector_results() {
|
||||
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
|
||||
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
|
||||
let num_field_f64 = schema_builder.add_f64_field("num_f64", FAST);
|
||||
let text_field = schema_builder.add_text_field("text", STRING);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
{
|
||||
for i in 0u64..10u64 {
|
||||
index_writer.add_document(doc!(
|
||||
num_field_i64 => ((i as i64) % 3i64) as i64,
|
||||
num_field_u64 => (i % 2u64) as u64,
|
||||
num_field_f64 => (i % 4u64) as f64,
|
||||
text_field => "text"
|
||||
));
|
||||
}
|
||||
}
|
||||
assert_eq!(index_writer.commit().unwrap(), 10u64);
|
||||
}
|
||||
|
||||
let searcher = index.reader().searcher();
|
||||
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
|
||||
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);
|
||||
let mut ffvf_f64: IntFacetCollector<F64FastFieldReader> = IntFacetCollector::new(num_field_f64);
|
||||
|
||||
{
|
||||
// perform the query
|
||||
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64).push(&mut ffvf_f64);
|
||||
let mut query_parser = QueryParser::for_index(index, vec![text_field]);
|
||||
let query = query_parser.parse_query("text:text").unwrap();
|
||||
query.search(&searcher, &mut facet_collectors).unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(ffvf_u64.counters[&0], 5);
|
||||
assert_eq!(ffvf_u64.counters[&1], 5);
|
||||
assert_eq!(ffvf_i64.counters[&0], 4);
|
||||
assert_eq!(ffvf_i64.counters[&1], 3);
|
||||
assert_eq!(ffvf_f64.counters[&0.0], 3);
|
||||
assert_eq!(ffvf_f64.counters[&2.0], 2);
|
||||
|
||||
}
|
||||
}
|
||||
@@ -111,6 +111,9 @@ mod facet_collector;
|
||||
pub use self::facet_collector::FacetCollector;
|
||||
use crate::query::Weight;
|
||||
|
||||
mod docset_collector;
|
||||
pub use self::docset_collector::DocSetCollector;
|
||||
|
||||
/// `Fruit` is the type for the result of our collection.
|
||||
/// e.g. `usize` for the `Count` collector.
|
||||
pub trait Fruit: Send + downcast_rs::Downcast {}
|
||||
@@ -139,7 +142,7 @@ pub trait Collector: Sync + Send {
|
||||
type Fruit: Fruit;
|
||||
|
||||
/// Type of the `SegmentCollector` associated to this collector.
|
||||
type Child: SegmentCollector<Fruit = Self::Fruit>;
|
||||
type Child: SegmentCollector;
|
||||
|
||||
/// `set_segment` is called before beginning to enumerate
|
||||
/// on this segment.
|
||||
@@ -154,7 +157,10 @@ pub trait Collector: Sync + Send {
|
||||
|
||||
/// Combines the fruit associated to the collection of each segments
|
||||
/// into one fruit.
|
||||
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit>;
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit>;
|
||||
|
||||
/// Created a segment collector and
|
||||
fn collect_segment(
|
||||
@@ -224,11 +230,11 @@ where
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
children: Vec<(Left::Fruit, Right::Fruit)>,
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<(Left::Fruit, Right::Fruit)> {
|
||||
let mut left_fruits = vec![];
|
||||
let mut right_fruits = vec![];
|
||||
for (left_fruit, right_fruit) in children {
|
||||
for (left_fruit, right_fruit) in segment_fruits {
|
||||
left_fruits.push(left_fruit);
|
||||
right_fruits.push(right_fruit);
|
||||
}
|
||||
@@ -282,7 +288,10 @@ where
|
||||
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let mut one_fruits = vec![];
|
||||
let mut two_fruits = vec![];
|
||||
let mut three_fruits = vec![];
|
||||
@@ -349,7 +358,10 @@ where
|
||||
|| self.3.requires_scoring()
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let mut one_fruits = vec![];
|
||||
let mut two_fruits = vec![];
|
||||
let mut three_fruits = vec![];
|
||||
|
||||
@@ -34,13 +34,13 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
children: Vec<<Self as Collector>::Fruit>,
|
||||
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Box<dyn Fruit>> {
|
||||
let typed_fruit: Vec<TCollector::Fruit> = children
|
||||
let typed_fruit: Vec<<TCollector::Child as SegmentCollector>::Fruit> = children
|
||||
.into_iter()
|
||||
.map(|untyped_fruit| {
|
||||
untyped_fruit
|
||||
.downcast::<TCollector::Fruit>()
|
||||
.downcast::<<TCollector::Child as SegmentCollector>::Fruit>()
|
||||
.map(|boxed_but_typed| *boxed_but_typed)
|
||||
.map_err(|_| {
|
||||
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
|
||||
|
||||
@@ -185,12 +185,15 @@ impl Collector for BytesFastFieldTestCollector {
|
||||
_segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<BytesFastFieldSegmentCollector> {
|
||||
let reader = segment_reader
|
||||
.fast_fields()
|
||||
.bytes(self.field)
|
||||
.ok_or_else(|| {
|
||||
crate::TantivyError::InvalidArgument("Field is not a bytes fast field.".to_string())
|
||||
})?;
|
||||
Ok(BytesFastFieldSegmentCollector {
|
||||
vals: Vec::new(),
|
||||
reader: segment_reader
|
||||
.fast_fields()
|
||||
.bytes(self.field)
|
||||
.expect("Field is not a bytes fast field."),
|
||||
reader,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
use super::Collector;
|
||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
||||
use crate::collector::top_collector::TopSegmentCollector;
|
||||
use crate::collector::top_collector::{ComparableDoc, TopCollector};
|
||||
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
||||
use crate::collector::{
|
||||
@@ -14,8 +12,71 @@ use crate::DocId;
|
||||
use crate::Score;
|
||||
use crate::SegmentLocalId;
|
||||
use crate::SegmentReader;
|
||||
use std::collections::BinaryHeap;
|
||||
use crate::{collector::custom_score_top_collector::CustomScoreTopCollector, fastfield::FastValue};
|
||||
use crate::{collector::top_collector::TopSegmentCollector, TantivyError};
|
||||
use std::fmt;
|
||||
use std::{collections::BinaryHeap, marker::PhantomData};
|
||||
|
||||
struct FastFieldConvertCollector<
|
||||
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
|
||||
TFastValue: FastValue,
|
||||
> {
|
||||
pub collector: TCollector,
|
||||
pub field: Field,
|
||||
pub fast_value: std::marker::PhantomData<TFastValue>,
|
||||
}
|
||||
|
||||
impl<TCollector, TFastValue> Collector for FastFieldConvertCollector<TCollector, TFastValue>
|
||||
where
|
||||
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
|
||||
TFastValue: FastValue + 'static,
|
||||
{
|
||||
type Fruit = Vec<(TFastValue, DocAddress)>;
|
||||
|
||||
type Child = TCollector::Child;
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: crate::SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let schema = segment.schema();
|
||||
let field_entry = schema.get_field_entry(self.field);
|
||||
if !field_entry.is_fast() {
|
||||
return Err(TantivyError::SchemaError(format!(
|
||||
"Field {:?} is not a fast field.",
|
||||
field_entry.name()
|
||||
)));
|
||||
}
|
||||
let schema_type = TFastValue::to_type();
|
||||
let requested_type = field_entry.field_type().value_type();
|
||||
if schema_type != requested_type {
|
||||
return Err(TantivyError::SchemaError(format!(
|
||||
"Field {:?} is of type {:?}!={:?}",
|
||||
field_entry.name(),
|
||||
schema_type,
|
||||
requested_type
|
||||
)));
|
||||
}
|
||||
self.collector.for_segment(segment_local_id, segment)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
self.collector.requires_scoring()
|
||||
}
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let raw_result = self.collector.merge_fruits(segment_fruits)?;
|
||||
let transformed_result = raw_result
|
||||
.into_iter()
|
||||
.map(|(score, doc_address)| (TFastValue::from_u64(score), doc_address))
|
||||
.collect::<Vec<_>>();
|
||||
Ok(transformed_result)
|
||||
}
|
||||
}
|
||||
|
||||
/// The `TopDocs` collector keeps track of the top `K` documents
|
||||
/// sorted by their score.
|
||||
@@ -73,7 +134,7 @@ struct ScorerByFastFieldReader {
|
||||
|
||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||
fn score(&mut self, doc: DocId) -> u64 {
|
||||
self.ff_reader.get_u64(u64::from(doc))
|
||||
self.ff_reader.get(doc)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,10 +148,10 @@ impl CustomScorer<u64> for ScorerByField {
|
||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
||||
let ff_reader = segment_reader
|
||||
.fast_fields()
|
||||
.u64(self.field)
|
||||
.u64_lenient(self.field)
|
||||
.ok_or_else(|| {
|
||||
crate::TantivyError::SchemaError(format!(
|
||||
"Field requested ({:?}) is not a i64/u64 fast field.",
|
||||
"Field requested ({:?}) is not a fast field.",
|
||||
self.field
|
||||
))
|
||||
})?;
|
||||
@@ -112,6 +173,8 @@ impl TopDocs {
|
||||
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
||||
/// Lucene's TopDocsCollector.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::query::QueryParser;
|
||||
@@ -148,6 +211,14 @@ impl TopDocs {
|
||||
|
||||
/// Set top-K to rank documents by a given fast field.
|
||||
///
|
||||
/// If the field is not a fast or does not exist, this method returns successfully (it is not aware of any schema).
|
||||
/// An error will be returned at the moment of search.
|
||||
///
|
||||
/// If the field is a FAST field but not a u64 field, search will return successfully but it will return
|
||||
/// returns a monotonic u64-representation (ie. the order is still correct) of the requested field type.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, DocAddress};
|
||||
@@ -169,7 +240,7 @@ impl TopDocs {
|
||||
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
||||
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
||||
/// # assert!(index_writer.commit().is_ok());
|
||||
/// # let reader = index.reader().unwrap();
|
||||
/// # let reader = index.reader()?;
|
||||
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
||||
/// # assert_eq!(top_docs,
|
||||
@@ -177,25 +248,20 @@ impl TopDocs {
|
||||
/// # (80u64, DocAddress(0u32, 3))]);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
///
|
||||
///
|
||||
/// /// Searches the document matching the given query, and
|
||||
/// /// collects the top 10 documents, order by the u64-`field`
|
||||
/// /// given in argument.
|
||||
/// ///
|
||||
/// /// `field` is required to be a FAST field.
|
||||
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
||||
/// query: &dyn Query,
|
||||
/// sort_by_field: Field)
|
||||
/// rating_field: Field)
|
||||
/// -> tantivy::Result<Vec<(u64, DocAddress)>> {
|
||||
///
|
||||
/// // This is where we build our topdocs collector
|
||||
/// //
|
||||
/// // Note the generics parameter that needs to match the
|
||||
/// // type `sort_by_field`.
|
||||
/// let top_docs_by_rating = TopDocs
|
||||
/// // Note the `rating_field` needs to be a FAST field here.
|
||||
/// let top_books_by_rating = TopDocs
|
||||
/// ::with_limit(10)
|
||||
/// .order_by_u64_field(sort_by_field);
|
||||
/// .order_by_u64_field(rating_field);
|
||||
///
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `u64` in the pair is the value of our fast field for
|
||||
@@ -205,21 +271,105 @@ impl TopDocs {
|
||||
/// // length of 10, or less if not enough documents matched the
|
||||
/// // query.
|
||||
/// let resulting_docs: Vec<(u64, DocAddress)> =
|
||||
/// searcher.search(query, &top_docs_by_rating)?;
|
||||
/// searcher.search(query, &top_books_by_rating)?;
|
||||
///
|
||||
/// Ok(resulting_docs)
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if the field requested is not a fast field.
|
||||
///
|
||||
/// # See also
|
||||
///
|
||||
/// To confortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method.
|
||||
pub fn order_by_u64_field(
|
||||
self,
|
||||
field: Field,
|
||||
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
||||
self.custom_score(ScorerByField { field })
|
||||
CustomScoreTopCollector::new(ScorerByField { field }, self.0.into_tscore())
|
||||
}
|
||||
|
||||
/// Set top-K to rank documents by a given fast field.
|
||||
///
|
||||
/// If the field is not a fast field, or its field type does not match the generic type, this method does not panic,
|
||||
/// but an explicit error will be returned at the moment of collection.
|
||||
///
|
||||
/// Note that this method is a generic. The requested fast field type will be often
|
||||
/// inferred in your code by the rust compiler.
|
||||
///
|
||||
/// Implementation-wise, for performance reason, tantivy will manipulate the u64 representation of your fast
|
||||
/// field until the last moment.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, DocAddress};
|
||||
/// # use tantivy::query::{Query, AllQuery};
|
||||
/// use tantivy::Searcher;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
/// # let title = schema_builder.add_text_field("company", TEXT);
|
||||
/// # let rating = schema_builder.add_i64_field("revenue", FAST);
|
||||
/// # let schema = schema_builder.build();
|
||||
/// #
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||
/// # index_writer.add_document(doc!(title => "MadCow Inc.", rating => 92_000_000i64));
|
||||
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", rating => 119_000_000i64));
|
||||
/// # index_writer.add_document(doc!(title => "Declining Cow", rating => -63_000_000i64));
|
||||
/// # assert!(index_writer.commit().is_ok());
|
||||
/// # let reader = index.reader()?;
|
||||
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, rating)?;
|
||||
/// # assert_eq!(top_docs,
|
||||
/// # vec![(119_000_000i64, DocAddress(0, 1)),
|
||||
/// # (92_000_000i64, DocAddress(0, 0))]);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// /// Searches the document matching the given query, and
|
||||
/// /// collects the top 10 documents, order by the u64-`field`
|
||||
/// /// given in argument.
|
||||
/// fn docs_sorted_by_revenue(searcher: &Searcher,
|
||||
/// query: &dyn Query,
|
||||
/// revenue_field: Field)
|
||||
/// -> tantivy::Result<Vec<(i64, DocAddress)>> {
|
||||
///
|
||||
/// // This is where we build our topdocs collector
|
||||
/// //
|
||||
/// // Note the generics parameter that needs to match the
|
||||
/// // type `sort_by_field`. revenue_field here is a FAST i64 field.
|
||||
/// let top_company_by_revenue = TopDocs
|
||||
/// ::with_limit(2)
|
||||
/// .order_by_fast_field(revenue_field);
|
||||
///
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `i64` in the pair is the value of our fast field for
|
||||
/// // each documents.
|
||||
/// //
|
||||
/// // The vec is sorted decreasingly by `sort_by_field`, and has a
|
||||
/// // length of 10, or less if not enough documents matched the
|
||||
/// // query.
|
||||
/// let resulting_docs: Vec<(i64, DocAddress)> =
|
||||
/// searcher.search(query, &top_company_by_revenue)?;
|
||||
///
|
||||
/// Ok(resulting_docs)
|
||||
/// }
|
||||
/// ```
|
||||
pub fn order_by_fast_field<TFastValue>(
|
||||
self,
|
||||
fast_field: Field,
|
||||
) -> impl Collector<Fruit = Vec<(TFastValue, DocAddress)>>
|
||||
where
|
||||
TFastValue: FastValue + 'static,
|
||||
{
|
||||
let u64_collector = self.order_by_u64_field(fast_field);
|
||||
FastFieldConvertCollector {
|
||||
collector: u64_collector,
|
||||
field: fast_field,
|
||||
fast_value: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Ranks the documents using a custom score.
|
||||
@@ -722,6 +872,94 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_field_collector_datetime() -> crate::Result<()> {
|
||||
use std::str::FromStr;
|
||||
let mut schema_builder = Schema::builder();
|
||||
let name = schema_builder.add_text_field("name", TEXT);
|
||||
let birthday = schema_builder.add_date_field("birthday", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let pr_birthday = crate::DateTime::from_str("1898-04-09T00:00:00+00:00")?;
|
||||
index_writer.add_document(doc!(
|
||||
name => "Paul Robeson",
|
||||
birthday => pr_birthday
|
||||
));
|
||||
let mr_birthday = crate::DateTime::from_str("1947-11-08T00:00:00+00:00")?;
|
||||
index_writer.add_document(doc!(
|
||||
name => "Minnie Riperton",
|
||||
birthday => mr_birthday
|
||||
));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field(birthday);
|
||||
let top_docs: Vec<(crate::DateTime, DocAddress)> =
|
||||
searcher.search(&AllQuery, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
&[
|
||||
(mr_birthday, DocAddress(0, 1)),
|
||||
(pr_birthday, DocAddress(0, 0)),
|
||||
]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_field_collector_i64() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let city = schema_builder.add_text_field("city", TEXT);
|
||||
let altitude = schema_builder.add_i64_field("altitude", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(
|
||||
city => "georgetown",
|
||||
altitude => -1i64,
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
city => "tokyo",
|
||||
altitude => 40i64,
|
||||
));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
|
||||
let top_docs: Vec<(i64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
&[(40i64, DocAddress(0, 1)), (-1i64, DocAddress(0, 0)),]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_field_collector_f64() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let city = schema_builder.add_text_field("city", TEXT);
|
||||
let altitude = schema_builder.add_f64_field("altitude", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(
|
||||
city => "georgetown",
|
||||
altitude => -1.0f64,
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
city => "tokyo",
|
||||
altitude => 40f64,
|
||||
));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
|
||||
let top_docs: Vec<(f64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
&[(40f64, DocAddress(0, 1)), (-1.0f64, DocAddress(0, 0)),]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_field_does_not_exist() {
|
||||
@@ -744,29 +982,41 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_field_not_fast_field() {
|
||||
fn test_field_not_fast_field() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, STORED);
|
||||
let schema = schema_builder.build();
|
||||
let (index, _) = index("beer", title, schema, |index_writer| {
|
||||
index_writer.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
));
|
||||
});
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(size=>1u64));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment = searcher.segment_reader(0);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||
let err = top_collector.for_segment(0, segment);
|
||||
if let Err(crate::TantivyError::SchemaError(msg)) = err {
|
||||
assert_eq!(
|
||||
msg,
|
||||
"Field requested (Field(1)) is not a i64/u64 fast field."
|
||||
);
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
let err = top_collector.for_segment(0, segment).err().unwrap();
|
||||
assert!(
|
||||
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field requested (Field(0)) is not a fast field.")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_field_wrong_type() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let size = schema_builder.add_u64_field(SIZE, STORED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(size=>1u64));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment = searcher.segment_reader(0);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_fast_field::<i64>(size);
|
||||
let err = top_collector.for_segment(0, segment).err().unwrap();
|
||||
assert!(
|
||||
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field \"size\" is not a fast field.")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -820,7 +1070,6 @@ mod tests {
|
||||
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
|
||||
) -> (Index, Box<dyn Query>) {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||
doc_adder(&mut index_writer);
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||
use std::io;
|
||||
use std::ops::Deref;
|
||||
|
||||
use crate::directory::OwnedBytes;
|
||||
|
||||
pub(crate) struct BitPacker {
|
||||
mini_buffer: u64,
|
||||
@@ -60,20 +61,14 @@ impl BitPacker {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BitUnpacker<Data>
|
||||
where
|
||||
Data: Deref<Target = [u8]>,
|
||||
{
|
||||
pub struct BitUnpacker {
|
||||
num_bits: u64,
|
||||
mask: u64,
|
||||
data: Data,
|
||||
data: OwnedBytes,
|
||||
}
|
||||
|
||||
impl<Data> BitUnpacker<Data>
|
||||
where
|
||||
Data: Deref<Target = [u8]>,
|
||||
{
|
||||
pub fn new(data: Data, num_bits: u8) -> BitUnpacker<Data> {
|
||||
impl BitUnpacker {
|
||||
pub fn new(data: OwnedBytes, num_bits: u8) -> BitUnpacker {
|
||||
let mask: u64 = if num_bits == 64 {
|
||||
!0u64
|
||||
} else {
|
||||
@@ -90,7 +85,7 @@ where
|
||||
if self.num_bits == 0 {
|
||||
return 0u64;
|
||||
}
|
||||
let data: &[u8] = &*self.data;
|
||||
let data: &[u8] = self.data.as_slice();
|
||||
let num_bits = self.num_bits;
|
||||
let mask = self.mask;
|
||||
let addr_in_bits = idx * num_bits;
|
||||
@@ -109,8 +104,9 @@ where
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::{BitPacker, BitUnpacker};
|
||||
use crate::directory::OwnedBytes;
|
||||
|
||||
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
|
||||
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>) {
|
||||
let mut data = Vec::new();
|
||||
let mut bitpacker = BitPacker::new();
|
||||
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
|
||||
@@ -122,7 +118,7 @@ mod test {
|
||||
}
|
||||
bitpacker.close(&mut data).unwrap();
|
||||
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
|
||||
let bitunpacker = BitUnpacker::new(data, num_bits);
|
||||
let bitunpacker = BitUnpacker::new(OwnedBytes::new(data), num_bits);
|
||||
(bitunpacker, vals)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::CountingWriter;
|
||||
use crate::common::VInt;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use crate::schema::Field;
|
||||
use crate::space_usage::FieldUsage;
|
||||
use crate::space_usage::PerFieldSpaceUsage;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Write;
|
||||
use std::io::{self, Read};
|
||||
use std::io::{self, Read, Write};
|
||||
|
||||
use super::HasLen;
|
||||
|
||||
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
|
||||
pub struct FileAddr {
|
||||
@@ -103,25 +104,26 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
/// for each field.
|
||||
#[derive(Clone)]
|
||||
pub struct CompositeFile {
|
||||
data: ReadOnlySource,
|
||||
data: FileSlice,
|
||||
offsets_index: HashMap<FileAddr, (usize, usize)>,
|
||||
}
|
||||
|
||||
impl CompositeFile {
|
||||
/// Opens a composite file stored in a given
|
||||
/// `ReadOnlySource`.
|
||||
pub fn open(data: &ReadOnlySource) -> io::Result<CompositeFile> {
|
||||
/// `FileSlice`.
|
||||
pub fn open(data: &FileSlice) -> io::Result<CompositeFile> {
|
||||
let end = data.len();
|
||||
let footer_len_data = data.slice_from(end - 4);
|
||||
let footer_len_data = data.slice_from(end - 4).read_bytes()?;
|
||||
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
|
||||
let footer_start = end - 4 - footer_len;
|
||||
let footer_data = data.slice(footer_start, footer_start + footer_len);
|
||||
let footer_data = data
|
||||
.slice(footer_start, footer_start + footer_len)
|
||||
.read_bytes()?;
|
||||
let mut footer_buffer = footer_data.as_slice();
|
||||
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
||||
|
||||
let mut file_addrs = vec![];
|
||||
let mut offsets = vec![];
|
||||
|
||||
let mut field_index = HashMap::new();
|
||||
|
||||
let mut offset = 0;
|
||||
@@ -150,19 +152,19 @@ impl CompositeFile {
|
||||
pub fn empty() -> CompositeFile {
|
||||
CompositeFile {
|
||||
offsets_index: HashMap::new(),
|
||||
data: ReadOnlySource::empty(),
|
||||
data: FileSlice::empty(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `ReadOnlySource` associated
|
||||
/// Returns the `FileSlice` associated
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read(&self, field: Field) -> Option<ReadOnlySource> {
|
||||
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
|
||||
self.open_read_with_idx(field, 0)
|
||||
}
|
||||
|
||||
/// Returns the `ReadOnlySource` associated
|
||||
/// Returns the `FileSlice` associated
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
|
||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
|
||||
self.offsets_index
|
||||
.get(&FileAddr { field, idx })
|
||||
.map(|&(from, to)| self.data.slice(from, to))
|
||||
@@ -192,46 +194,44 @@ mod test {
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_composite_file() {
|
||||
fn test_composite_file() -> crate::Result<()> {
|
||||
let path = Path::new("test_path");
|
||||
let mut directory = RAMDirectory::create();
|
||||
let directory = RAMDirectory::create();
|
||||
{
|
||||
let w = directory.open_write(path).unwrap();
|
||||
let mut composite_write = CompositeWrite::wrap(w);
|
||||
{
|
||||
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
|
||||
VInt(32431123u64).serialize(&mut write_0).unwrap();
|
||||
write_0.flush().unwrap();
|
||||
}
|
||||
|
||||
{
|
||||
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
|
||||
VInt(2).serialize(&mut write_4).unwrap();
|
||||
write_4.flush().unwrap();
|
||||
}
|
||||
composite_write.close().unwrap();
|
||||
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
|
||||
VInt(32431123u64).serialize(&mut write_0)?;
|
||||
write_0.flush()?;
|
||||
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
|
||||
VInt(2).serialize(&mut write_4)?;
|
||||
write_4.flush()?;
|
||||
composite_write.close()?;
|
||||
}
|
||||
{
|
||||
let r = directory.open_read(path).unwrap();
|
||||
let composite_file = CompositeFile::open(&r).unwrap();
|
||||
let r = directory.open_read(path)?;
|
||||
let composite_file = CompositeFile::open(&r)?;
|
||||
{
|
||||
let file0 = composite_file
|
||||
.open_read(Field::from_field_id(0u32))
|
||||
.unwrap();
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let mut file0_buf = file0.as_slice();
|
||||
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
|
||||
let payload_0 = VInt::deserialize(&mut file0_buf)?.0;
|
||||
assert_eq!(file0_buf.len(), 0);
|
||||
assert_eq!(payload_0, 32431123u64);
|
||||
}
|
||||
{
|
||||
let file4 = composite_file
|
||||
.open_read(Field::from_field_id(4u32))
|
||||
.unwrap();
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let mut file4_buf = file4.as_slice();
|
||||
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
|
||||
let payload_4 = VInt::deserialize(&mut file4_buf)?.0;
|
||||
assert_eq!(file4_buf.len(), 0);
|
||||
assert_eq!(payload_4, 2u64);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ use crate::schema::FieldType;
|
||||
use crate::schema::Schema;
|
||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||
use crate::IndexWriter;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
|
||||
@@ -57,7 +56,9 @@ pub struct Index {
|
||||
}
|
||||
|
||||
impl Index {
|
||||
/// Examines the director to see if it contains an index
|
||||
/// Examines the directory to see if it contains an index.
|
||||
///
|
||||
/// Effectively, it only checks for the presence of the `meta.json` file.
|
||||
pub fn exists<Dir: Directory>(dir: &Dir) -> bool {
|
||||
dir.exists(&META_FILEPATH)
|
||||
}
|
||||
@@ -140,7 +141,9 @@ impl Index {
|
||||
Index::create(mmap_directory, schema)
|
||||
}
|
||||
|
||||
/// Creates a new index given an implementation of the trait `Directory`
|
||||
/// Creates a new index given an implementation of the trait `Directory`.
|
||||
///
|
||||
/// If a directory previously existed, it will be erased.
|
||||
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
||||
let directory = ManagedDirectory::wrap(dir)?;
|
||||
Index::from_directory(directory, schema)
|
||||
@@ -149,8 +152,8 @@ impl Index {
|
||||
/// Create a new index from a directory.
|
||||
///
|
||||
/// This will overwrite existing meta.json
|
||||
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
|
||||
save_new_metas(schema.clone(), directory.borrow_mut())?;
|
||||
fn from_directory(directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
|
||||
save_new_metas(schema.clone(), &directory)?;
|
||||
let metas = IndexMeta::with_schema(schema);
|
||||
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
|
||||
}
|
||||
|
||||
@@ -301,7 +301,7 @@ mod tests {
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#
|
||||
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default","fieldnorms":true},"stored":false}}],"opstamp":0}"#
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use std::io;
|
||||
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::positions::PositionReader;
|
||||
use crate::postings::TermInfo;
|
||||
use crate::postings::{BlockSegmentPostings, SegmentPostings};
|
||||
@@ -14,7 +16,7 @@ use crate::termdict::TermDictionary;
|
||||
///
|
||||
/// It is safe to delete the segment associated to
|
||||
/// an `InvertedIndexReader`. As long as it is open,
|
||||
/// the `ReadOnlySource` it is relying on should
|
||||
/// the `FileSlice` it is relying on should
|
||||
/// stay available.
|
||||
///
|
||||
///
|
||||
@@ -22,9 +24,9 @@ use crate::termdict::TermDictionary;
|
||||
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
||||
pub struct InvertedIndexReader {
|
||||
termdict: TermDictionary,
|
||||
postings_source: ReadOnlySource,
|
||||
positions_source: ReadOnlySource,
|
||||
positions_idx_source: ReadOnlySource,
|
||||
postings_file_slice: FileSlice,
|
||||
positions_file_slice: FileSlice,
|
||||
positions_idx_file_slice: FileSlice,
|
||||
record_option: IndexRecordOption,
|
||||
total_num_tokens: u64,
|
||||
}
|
||||
@@ -33,22 +35,21 @@ impl InvertedIndexReader {
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
||||
pub(crate) fn new(
|
||||
termdict: TermDictionary,
|
||||
postings_source: ReadOnlySource,
|
||||
positions_source: ReadOnlySource,
|
||||
positions_idx_source: ReadOnlySource,
|
||||
postings_file_slice: FileSlice,
|
||||
positions_file_slice: FileSlice,
|
||||
positions_idx_file_slice: FileSlice,
|
||||
record_option: IndexRecordOption,
|
||||
) -> InvertedIndexReader {
|
||||
let total_num_tokens_data = postings_source.slice(0, 8);
|
||||
let mut total_num_tokens_cursor = total_num_tokens_data.as_slice();
|
||||
let total_num_tokens = u64::deserialize(&mut total_num_tokens_cursor).unwrap_or(0u64);
|
||||
InvertedIndexReader {
|
||||
) -> io::Result<InvertedIndexReader> {
|
||||
let (total_num_tokens_slice, postings_body) = postings_file_slice.split(8);
|
||||
let total_num_tokens = u64::deserialize(&mut total_num_tokens_slice.read_bytes()?)?;
|
||||
Ok(InvertedIndexReader {
|
||||
termdict,
|
||||
postings_source: postings_source.slice_from(8),
|
||||
positions_source,
|
||||
positions_idx_source,
|
||||
postings_file_slice: postings_body,
|
||||
positions_file_slice,
|
||||
positions_idx_file_slice,
|
||||
record_option,
|
||||
total_num_tokens,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates an empty `InvertedIndexReader` object, which
|
||||
@@ -56,9 +57,9 @@ impl InvertedIndexReader {
|
||||
pub fn empty(record_option: IndexRecordOption) -> InvertedIndexReader {
|
||||
InvertedIndexReader {
|
||||
termdict: TermDictionary::empty(),
|
||||
postings_source: ReadOnlySource::empty(),
|
||||
positions_source: ReadOnlySource::empty(),
|
||||
positions_idx_source: ReadOnlySource::empty(),
|
||||
postings_file_slice: FileSlice::empty(),
|
||||
positions_file_slice: FileSlice::empty(),
|
||||
positions_idx_file_slice: FileSlice::empty(),
|
||||
record_option,
|
||||
total_num_tokens: 0u64,
|
||||
}
|
||||
@@ -88,11 +89,12 @@ impl InvertedIndexReader {
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
block_postings: &mut BlockSegmentPostings,
|
||||
) {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let end_source = self.postings_source.len();
|
||||
let postings_slice = self.postings_source.slice(offset, end_source);
|
||||
block_postings.reset(term_info.doc_freq, postings_slice);
|
||||
) -> io::Result<()> {
|
||||
let postings_slice = self
|
||||
.postings_file_slice
|
||||
.slice_from(term_info.postings_offset as usize);
|
||||
block_postings.reset(term_info.doc_freq, postings_slice.read_bytes()?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `Term`.
|
||||
@@ -103,9 +105,11 @@ impl InvertedIndexReader {
|
||||
&self,
|
||||
term: &Term,
|
||||
option: IndexRecordOption,
|
||||
) -> Option<BlockSegmentPostings> {
|
||||
self.get_term_info(term)
|
||||
) -> io::Result<Option<BlockSegmentPostings>> {
|
||||
Ok(self
|
||||
.get_term_info(term)
|
||||
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
|
||||
.transpose()?)
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `term_info`.
|
||||
@@ -116,10 +120,10 @@ impl InvertedIndexReader {
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
requested_option: IndexRecordOption,
|
||||
) -> BlockSegmentPostings {
|
||||
) -> io::Result<BlockSegmentPostings> {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let postings_data = self.postings_source.slice_from(offset);
|
||||
BlockSegmentPostings::from_data(
|
||||
let postings_data = self.postings_file_slice.slice_from(offset);
|
||||
BlockSegmentPostings::open(
|
||||
term_info.doc_freq,
|
||||
postings_data,
|
||||
self.record_option,
|
||||
@@ -135,20 +139,23 @@ impl InvertedIndexReader {
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
option: IndexRecordOption,
|
||||
) -> SegmentPostings {
|
||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option);
|
||||
) -> io::Result<SegmentPostings> {
|
||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?;
|
||||
let position_stream = {
|
||||
if option.has_positions() {
|
||||
let position_reader = self.positions_source.clone();
|
||||
let skip_reader = self.positions_idx_source.clone();
|
||||
let position_reader = self.positions_file_slice.clone();
|
||||
let skip_reader = self.positions_idx_file_slice.clone();
|
||||
let position_reader =
|
||||
PositionReader::new(position_reader, skip_reader, term_info.positions_idx);
|
||||
PositionReader::new(position_reader, skip_reader, term_info.positions_idx)?;
|
||||
Some(position_reader)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
SegmentPostings::from_block_postings(block_postings, position_stream)
|
||||
Ok(SegmentPostings::from_block_postings(
|
||||
block_postings,
|
||||
position_stream,
|
||||
))
|
||||
}
|
||||
|
||||
/// Returns the total number of tokens recorded for all documents
|
||||
@@ -167,24 +174,31 @@ impl InvertedIndexReader {
|
||||
/// For instance, requesting `IndexRecordOption::Freq` for a
|
||||
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
||||
/// with `DocId`s and frequencies.
|
||||
pub fn read_postings(&self, term: &Term, option: IndexRecordOption) -> Option<SegmentPostings> {
|
||||
pub fn read_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<Option<SegmentPostings>> {
|
||||
self.get_term_info(term)
|
||||
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub(crate) fn read_postings_no_deletes(
|
||||
&self,
|
||||
term: &Term,
|
||||
option: IndexRecordOption,
|
||||
) -> Option<SegmentPostings> {
|
||||
) -> io::Result<Option<SegmentPostings>> {
|
||||
self.get_term_info(term)
|
||||
.map(|term_info| self.read_postings_from_terminfo(&term_info, option))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Returns the number of documents containing the term.
|
||||
pub fn doc_freq(&self, term: &Term) -> u32 {
|
||||
self.get_term_info(term)
|
||||
pub fn doc_freq(&self, term: &Term) -> io::Result<u32> {
|
||||
Ok(self
|
||||
.get_term_info(term)
|
||||
.map(|term_info| term_info.doc_freq)
|
||||
.unwrap_or(0u32)
|
||||
.unwrap_or(0u32))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ use crate::store::StoreReader;
|
||||
use crate::termdict::TermMerger;
|
||||
use crate::DocAddress;
|
||||
use crate::Index;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
/// Holds a list of `SegmentReader`s ready for search.
|
||||
///
|
||||
@@ -32,17 +32,17 @@ impl Searcher {
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
) -> Searcher {
|
||||
let store_readers = segment_readers
|
||||
) -> io::Result<Searcher> {
|
||||
let store_readers: Vec<StoreReader> = segment_readers
|
||||
.iter()
|
||||
.map(SegmentReader::get_store_reader)
|
||||
.collect();
|
||||
Searcher {
|
||||
.collect::<io::Result<Vec<_>>>()?;
|
||||
Ok(Searcher {
|
||||
schema,
|
||||
index,
|
||||
segment_readers,
|
||||
store_readers,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the `Index` associated to the `Searcher`
|
||||
@@ -75,13 +75,14 @@ impl Searcher {
|
||||
|
||||
/// Return the overall number of documents containing
|
||||
/// the given term.
|
||||
pub fn doc_freq(&self, term: &Term) -> u64 {
|
||||
self.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| {
|
||||
u64::from(segment_reader.inverted_index(term.field()).doc_freq(term))
|
||||
})
|
||||
.sum::<u64>()
|
||||
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> {
|
||||
let mut total_doc_freq = 0;
|
||||
for segment_reader in &self.segment_readers {
|
||||
let inverted_index = segment_reader.inverted_index(term.field())?;
|
||||
let doc_freq = inverted_index.doc_freq(term)?;
|
||||
total_doc_freq += u64::from(doc_freq);
|
||||
}
|
||||
Ok(total_doc_freq)
|
||||
}
|
||||
|
||||
/// Return the list of segment readers
|
||||
@@ -148,22 +149,22 @@ impl Searcher {
|
||||
}
|
||||
|
||||
/// Return the field searcher associated to a `Field`.
|
||||
pub fn field(&self, field: Field) -> FieldSearcher {
|
||||
let inv_index_readers = self
|
||||
pub fn field(&self, field: Field) -> crate::Result<FieldSearcher> {
|
||||
let inv_index_readers: Vec<Arc<InvertedIndexReader>> = self
|
||||
.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.inverted_index(field))
|
||||
.collect::<Vec<_>>();
|
||||
FieldSearcher::new(inv_index_readers)
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
Ok(FieldSearcher::new(inv_index_readers))
|
||||
}
|
||||
|
||||
/// Summarize total space usage of this searcher.
|
||||
pub fn space_usage(&self) -> SearcherSpaceUsage {
|
||||
pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> {
|
||||
let mut space_usage = SearcherSpaceUsage::new();
|
||||
for segment_reader in self.segment_readers.iter() {
|
||||
space_usage.add_segment(segment_reader.space_usage());
|
||||
for segment_reader in &self.segment_readers {
|
||||
space_usage.add_segment(segment_reader.space_usage()?);
|
||||
}
|
||||
space_usage
|
||||
Ok(space_usage)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::directory::error::{OpenReadError, OpenWriteError};
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use crate::directory::{FileSlice, WritePtr};
|
||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||
use crate::schema::Schema;
|
||||
use crate::Opstamp;
|
||||
@@ -78,10 +78,9 @@ impl Segment {
|
||||
}
|
||||
|
||||
/// Open one of the component file for a *regular* read.
|
||||
pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> {
|
||||
pub fn open_read(&self, component: SegmentComponent) -> Result<FileSlice, OpenReadError> {
|
||||
let path = self.relative_path(component);
|
||||
let source = self.index.directory().open_read(&path)?;
|
||||
Ok(source)
|
||||
self.index.directory().open_read(&path)
|
||||
}
|
||||
|
||||
/// Open one of the component file for *regular* write.
|
||||
|
||||
@@ -20,7 +20,7 @@ pub enum SegmentComponent {
|
||||
/// Dictionary associating `Term`s to `TermInfo`s which is
|
||||
/// simply an address into the `postings` file and the `positions` file.
|
||||
TERMS,
|
||||
/// Row-oriented, LZ4-compressed storage of the documents.
|
||||
/// Row-oriented, compressed storage of the documents.
|
||||
/// Accessing a document from the store is relatively slow, as it
|
||||
/// requires to decompress the entire block it belongs to.
|
||||
STORE,
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use crate::common::CompositeFile;
|
||||
use crate::common::HasLen;
|
||||
use crate::core::InvertedIndexReader;
|
||||
use crate::core::Segment;
|
||||
use crate::core::SegmentComponent;
|
||||
use crate::core::SegmentId;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::fastfield::DeleteBitSet;
|
||||
use crate::fastfield::FacetReader;
|
||||
use crate::fastfield::FastFieldReaders;
|
||||
@@ -16,11 +15,12 @@ use crate::space_usage::SegmentSpaceUsage;
|
||||
use crate::store::StoreReader;
|
||||
use crate::termdict::TermDictionary;
|
||||
use crate::DocId;
|
||||
use crate::{common::CompositeFile, error::DataCorruption};
|
||||
use fail::fail_point;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use std::{collections::HashMap, io};
|
||||
|
||||
/// Entry point to access all of the datastructures of the `Segment`
|
||||
///
|
||||
@@ -50,7 +50,7 @@ pub struct SegmentReader {
|
||||
fast_fields_readers: Arc<FastFieldReaders>,
|
||||
fieldnorm_readers: FieldNormReaders,
|
||||
|
||||
store_source: ReadOnlySource,
|
||||
store_file: FileSlice,
|
||||
delete_bitset_opt: Option<DeleteBitSet>,
|
||||
schema: Schema,
|
||||
}
|
||||
@@ -106,16 +106,26 @@ impl SegmentReader {
|
||||
}
|
||||
|
||||
/// Accessor to the `FacetReader` associated to a given `Field`.
|
||||
pub fn facet_reader(&self, field: Field) -> Option<FacetReader> {
|
||||
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
||||
return None;
|
||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||
"Field {:?} is not a facet field.",
|
||||
field_entry.name()
|
||||
)));
|
||||
}
|
||||
let term_ords_reader = self.fast_fields().u64s(field)?;
|
||||
let termdict_source = self.termdict_composite.open_read(field)?;
|
||||
let termdict = TermDictionary::from_source(&termdict_source);
|
||||
let facet_reader = FacetReader::new(term_ords_reader, termdict);
|
||||
Some(facet_reader)
|
||||
let term_ords_reader = self.fast_fields().u64s(field).ok_or_else(|| {
|
||||
DataCorruption::comment_only(format!(
|
||||
"Cannot find data for hierarchical facet {:?}",
|
||||
field_entry.name()
|
||||
))
|
||||
})?;
|
||||
let termdict = self
|
||||
.termdict_composite
|
||||
.open_read(field)
|
||||
.map(TermDictionary::open)
|
||||
.unwrap_or_else(|| Ok(TermDictionary::empty()))?;
|
||||
Ok(FacetReader::new(term_ords_reader, termdict))
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `Field norms`'s reader.
|
||||
@@ -126,10 +136,10 @@ impl SegmentReader {
|
||||
/// They are simply stored as a fast field, serialized in
|
||||
/// the `.fieldnorm` file of the segment.
|
||||
pub fn get_fieldnorms_reader(&self, field: Field) -> crate::Result<FieldNormReader> {
|
||||
self.fieldnorm_readers.get_field(field).ok_or_else(|| {
|
||||
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| {
|
||||
let field_name = self.schema.get_field_name(field);
|
||||
let err_msg = format!(
|
||||
"Field norm not found for field {:?}. Was it market as indexed during indexing.",
|
||||
"Field norm not found for field {:?}. Was it marked as indexed during indexing?",
|
||||
field_name
|
||||
);
|
||||
crate::TantivyError::SchemaError(err_msg)
|
||||
@@ -137,33 +147,33 @@ impl SegmentReader {
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `StoreReader`.
|
||||
pub fn get_store_reader(&self) -> StoreReader {
|
||||
StoreReader::from_source(self.store_source.clone())
|
||||
pub fn get_store_reader(&self) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone())
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> {
|
||||
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
|
||||
let termdict_composite = CompositeFile::open(&termdict_source)?;
|
||||
let termdict_file = segment.open_read(SegmentComponent::TERMS)?;
|
||||
let termdict_composite = CompositeFile::open(&termdict_file)?;
|
||||
|
||||
let store_source = segment.open_read(SegmentComponent::STORE)?;
|
||||
let store_file = segment.open_read(SegmentComponent::STORE)?;
|
||||
|
||||
fail_point!("SegmentReader::open#middle");
|
||||
|
||||
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||
let postings_composite = CompositeFile::open(&postings_source)?;
|
||||
let postings_file = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||
let postings_composite = CompositeFile::open(&postings_file)?;
|
||||
|
||||
let positions_composite = {
|
||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
|
||||
CompositeFile::open(&source)?
|
||||
if let Ok(positions_file) = segment.open_read(SegmentComponent::POSITIONS) {
|
||||
CompositeFile::open(&positions_file)?
|
||||
} else {
|
||||
CompositeFile::empty()
|
||||
}
|
||||
};
|
||||
|
||||
let positions_idx_composite = {
|
||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
||||
CompositeFile::open(&source)?
|
||||
if let Ok(positions_skip_file) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
||||
CompositeFile::open(&positions_skip_file)?
|
||||
} else {
|
||||
CompositeFile::empty()
|
||||
}
|
||||
@@ -181,13 +191,14 @@ impl SegmentReader {
|
||||
|
||||
let delete_bitset_opt = if segment.meta().has_deletes() {
|
||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||
Some(DeleteBitSet::open(delete_data))
|
||||
let delete_bitset = DeleteBitSet::open(delete_data)?;
|
||||
Some(delete_bitset)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(SegmentReader {
|
||||
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
inv_idx_reader_cache: Default::default(),
|
||||
max_doc: segment.meta().max_doc(),
|
||||
num_docs: segment.meta().num_docs(),
|
||||
termdict_composite,
|
||||
@@ -195,7 +206,7 @@ impl SegmentReader {
|
||||
fast_fields_readers: fast_field_readers,
|
||||
fieldnorm_readers,
|
||||
segment_id: segment.id(),
|
||||
store_source,
|
||||
store_file,
|
||||
delete_bitset_opt,
|
||||
positions_composite,
|
||||
positions_idx_composite,
|
||||
@@ -215,14 +226,14 @@ impl SegmentReader {
|
||||
/// is returned.
|
||||
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
||||
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||
pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
|
||||
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
||||
if let Some(inv_idx_reader) = self
|
||||
.inv_idx_reader_cache
|
||||
.read()
|
||||
.expect("Lock poisoned. This should never happen")
|
||||
.get(&field)
|
||||
{
|
||||
return Arc::clone(inv_idx_reader);
|
||||
return Ok(Arc::clone(inv_idx_reader));
|
||||
}
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
@@ -232,41 +243,42 @@ impl SegmentReader {
|
||||
warn!("Field {:?} does not seem indexed.", field_entry.name());
|
||||
}
|
||||
|
||||
let postings_source_opt = self.postings_composite.open_read(field);
|
||||
let postings_file_opt = self.postings_composite.open_read(field);
|
||||
|
||||
if postings_source_opt.is_none() || record_option_opt.is_none() {
|
||||
if postings_file_opt.is_none() || record_option_opt.is_none() {
|
||||
// no documents in the segment contained this field.
|
||||
// As a result, no data is associated to the inverted index.
|
||||
//
|
||||
// Returns an empty inverted index.
|
||||
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
||||
return Arc::new(InvertedIndexReader::empty(record_option));
|
||||
return Ok(Arc::new(InvertedIndexReader::empty(record_option)));
|
||||
}
|
||||
|
||||
let record_option = record_option_opt.unwrap();
|
||||
let postings_source = postings_source_opt.unwrap();
|
||||
let postings_file = postings_file_opt.unwrap();
|
||||
|
||||
let termdict_source = self.termdict_composite.open_read(field).expect(
|
||||
"Failed to open field term dictionary in composite file. Is the field indexed?",
|
||||
);
|
||||
let termdict_file: FileSlice = self.termdict_composite.open_read(field)
|
||||
.ok_or_else(||
|
||||
DataCorruption::comment_only(format!("Failed to open field {:?}'s term dictionary in the composite file. Has the schema been modified?", field_entry.name()))
|
||||
)?;
|
||||
|
||||
let positions_source = self
|
||||
let positions_file = self
|
||||
.positions_composite
|
||||
.open_read(field)
|
||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||
|
||||
let positions_idx_source = self
|
||||
let positions_idx_file = self
|
||||
.positions_idx_composite
|
||||
.open_read(field)
|
||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||
|
||||
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
||||
TermDictionary::from_source(&termdict_source),
|
||||
postings_source,
|
||||
positions_source,
|
||||
positions_idx_source,
|
||||
TermDictionary::open(termdict_file)?,
|
||||
postings_file,
|
||||
positions_file,
|
||||
positions_idx_file,
|
||||
record_option,
|
||||
));
|
||||
)?);
|
||||
|
||||
// by releasing the lock in between, we may end up opening the inverting index
|
||||
// twice, but this is fine.
|
||||
@@ -275,7 +287,7 @@ impl SegmentReader {
|
||||
.expect("Field reader cache lock poisoned. This should never happen.")
|
||||
.insert(field, Arc::clone(&inv_idx_reader));
|
||||
|
||||
inv_idx_reader
|
||||
Ok(inv_idx_reader)
|
||||
}
|
||||
|
||||
/// Returns the segment id
|
||||
@@ -303,8 +315,8 @@ impl SegmentReader {
|
||||
}
|
||||
|
||||
/// Summarize total space usage of this segment.
|
||||
pub fn space_usage(&self) -> SegmentSpaceUsage {
|
||||
SegmentSpaceUsage::new(
|
||||
pub fn space_usage(&self) -> io::Result<SegmentSpaceUsage> {
|
||||
Ok(SegmentSpaceUsage::new(
|
||||
self.num_docs(),
|
||||
self.termdict_composite.space_usage(),
|
||||
self.postings_composite.space_usage(),
|
||||
@@ -312,12 +324,12 @@ impl SegmentReader {
|
||||
self.positions_idx_composite.space_usage(),
|
||||
self.fast_fields_readers.space_usage(),
|
||||
self.fieldnorm_readers.space_usage(),
|
||||
self.get_store_reader().space_usage(),
|
||||
self.get_store_reader()?.space_usage(),
|
||||
self.delete_bitset_opt
|
||||
.as_ref()
|
||||
.map(DeleteBitSet::space_usage)
|
||||
.unwrap_or(0),
|
||||
)
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -334,7 +346,7 @@ mod test {
|
||||
use crate::DocId;
|
||||
|
||||
#[test]
|
||||
fn test_alive_docs_iterator() {
|
||||
fn test_alive_docs_iterator() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_text_field("name", TEXT | STORED);
|
||||
let schema = schema_builder.build();
|
||||
@@ -342,26 +354,26 @@ mod test {
|
||||
let name = schema.get_field("name").unwrap();
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(name => "tantivy"));
|
||||
index_writer.add_document(doc!(name => "horse"));
|
||||
index_writer.add_document(doc!(name => "jockey"));
|
||||
index_writer.add_document(doc!(name => "cap"));
|
||||
|
||||
// we should now have one segment with two docs
|
||||
index_writer.commit().unwrap();
|
||||
index_writer.commit()?;
|
||||
}
|
||||
|
||||
{
|
||||
let mut index_writer2 = index.writer(50_000_000).unwrap();
|
||||
let mut index_writer2 = index.writer(50_000_000)?;
|
||||
index_writer2.delete_term(Term::from_field_text(name, "horse"));
|
||||
index_writer2.delete_term(Term::from_field_text(name, "cap"));
|
||||
|
||||
// ok, now we should have a deleted doc
|
||||
index_writer2.commit().unwrap();
|
||||
index_writer2.commit()?;
|
||||
}
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let searcher = index.reader()?.searcher();
|
||||
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
|
||||
assert_eq!(vec![0u32, 2u32], docs);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ use crate::directory::error::LockError;
|
||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::WatchCallback;
|
||||
use crate::directory::WatchHandle;
|
||||
use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use crate::directory::{FileSlice, WritePtr};
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
@@ -11,7 +11,6 @@ use std::marker::Send;
|
||||
use std::marker::Sync;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::result;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -80,7 +79,7 @@ fn try_acquire_lock(
|
||||
) -> Result<DirectoryLock, TryAcquireLockError> {
|
||||
let mut write = directory.open_write(filepath).map_err(|e| match e {
|
||||
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
||||
OpenWriteError::IOError(io_error) => TryAcquireLockError::IOError(io_error.into()),
|
||||
OpenWriteError::IOError { io_error, .. } => TryAcquireLockError::IOError(io_error),
|
||||
})?;
|
||||
write.flush().map_err(TryAcquireLockError::IOError)?;
|
||||
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
||||
@@ -117,19 +116,19 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// change.
|
||||
///
|
||||
/// Specifically, subsequent writes or flushes should
|
||||
/// have no effect on the returned `ReadOnlySource` object.
|
||||
/// have no effect on the returned `FileSlice` object.
|
||||
///
|
||||
/// You should only use this to read files create with [Directory::open_write].
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
||||
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError>;
|
||||
|
||||
/// Removes a file
|
||||
///
|
||||
/// Removing a file will not affect an eventual
|
||||
/// existing ReadOnlySource pointing to it.
|
||||
/// existing FileSlice pointing to it.
|
||||
///
|
||||
/// Removing a nonexistent file, yields a
|
||||
/// `DeleteError::DoesNotExist`.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
|
||||
fn delete(&self, path: &Path) -> Result<(), DeleteError>;
|
||||
|
||||
/// Returns true iff the file exists
|
||||
fn exists(&self, path: &Path) -> bool;
|
||||
@@ -139,7 +138,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
///
|
||||
/// Right after this call, the file should be created
|
||||
/// and any subsequent call to `open_read` for the
|
||||
/// same path should return a `ReadOnlySource`.
|
||||
/// same path should return a `FileSlice`.
|
||||
///
|
||||
/// Write operations may be aggressively buffered.
|
||||
/// The client of this trait is responsible for calling flush
|
||||
@@ -153,7 +152,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// was not called.
|
||||
///
|
||||
/// The file may not previously exist.
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
||||
|
||||
/// Reads the full content file that has been written using
|
||||
/// atomic_write.
|
||||
@@ -169,7 +168,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// a partially written file.
|
||||
///
|
||||
/// The file may or may not previously exist.
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()>;
|
||||
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()>;
|
||||
|
||||
/// Acquire a lock in the given directory.
|
||||
///
|
||||
|
||||
@@ -1,162 +1,67 @@
|
||||
use crate::Version;
|
||||
use std::error::Error as StdError;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Error while trying to acquire a directory lock.
|
||||
#[derive(Debug, Fail)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum LockError {
|
||||
/// Failed to acquired a lock as it is already held by another
|
||||
/// client.
|
||||
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
|
||||
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
|
||||
#[fail(
|
||||
display = "Could not acquire lock as it is already held, possibly by a different process."
|
||||
)]
|
||||
#[error("Could not acquire lock as it is already held, possibly by a different process.")]
|
||||
LockBusy,
|
||||
/// Trying to acquire a lock failed with an `IOError`
|
||||
#[fail(display = "Failed to acquire the lock due to an io:Error.")]
|
||||
#[error("Failed to acquire the lock due to an io:Error.")]
|
||||
IOError(io::Error),
|
||||
}
|
||||
|
||||
/// General IO error with an optional path to the offending file.
|
||||
#[derive(Debug)]
|
||||
pub struct IOError {
|
||||
path: Option<PathBuf>,
|
||||
err: io::Error,
|
||||
}
|
||||
|
||||
impl Into<io::Error> for IOError {
|
||||
fn into(self) -> io::Error {
|
||||
self.err
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for IOError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.path {
|
||||
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
|
||||
None => write!(f, "io error occurred: '{}'", self.err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StdError for IOError {
|
||||
fn description(&self) -> &str {
|
||||
"io error occurred"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
Some(&self.err)
|
||||
}
|
||||
}
|
||||
|
||||
impl IOError {
|
||||
pub(crate) fn with_path(path: PathBuf, err: io::Error) -> Self {
|
||||
IOError {
|
||||
path: Some(path),
|
||||
err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for IOError {
|
||||
fn from(err: io::Error) -> IOError {
|
||||
IOError { path: None, err }
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when opening a directory
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum OpenDirectoryError {
|
||||
/// The underlying directory does not exists.
|
||||
#[error("Directory does not exist: '{0}'.")]
|
||||
DoesNotExist(PathBuf),
|
||||
/// The path exists but is not a directory.
|
||||
#[error("Path exists but is not a directory: '{0}'.")]
|
||||
NotADirectory(PathBuf),
|
||||
/// Failed to create a temp directory.
|
||||
#[error("Failed to create a temporary directory: '{0}'.")]
|
||||
FailedToCreateTempDir(io::Error),
|
||||
/// IoError
|
||||
IoError(io::Error),
|
||||
}
|
||||
|
||||
impl From<io::Error> for OpenDirectoryError {
|
||||
fn from(io_err: io::Error) -> Self {
|
||||
OpenDirectoryError::IoError(io_err)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for OpenDirectoryError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
OpenDirectoryError::DoesNotExist(ref path) => {
|
||||
write!(f, "the underlying directory '{:?}' does not exist", path)
|
||||
}
|
||||
OpenDirectoryError::NotADirectory(ref path) => {
|
||||
write!(f, "the path '{:?}' exists but is not a directory", path)
|
||||
}
|
||||
OpenDirectoryError::IoError(ref err) => write!(
|
||||
f,
|
||||
"IOError while trying to open/create the directory. {:?}",
|
||||
err
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StdError for OpenDirectoryError {
|
||||
fn description(&self) -> &str {
|
||||
"error occurred while opening a directory"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
None
|
||||
}
|
||||
#[error("IOError '{io_error:?}' while create directory in: '{directory_path:?}'.")]
|
||||
IoError {
|
||||
/// underlying io Error.
|
||||
io_error: io::Error,
|
||||
/// directory we tried to open.
|
||||
directory_path: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
/// Error that may occur when starting to write in a file
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum OpenWriteError {
|
||||
/// Our directory is WORM, writing an existing file is forbidden.
|
||||
/// Checkout the `Directory` documentation.
|
||||
#[error("File already exists: '{0}'")]
|
||||
FileAlreadyExists(PathBuf),
|
||||
/// Any kind of IO error that happens when
|
||||
/// writing in the underlying IO device.
|
||||
IOError(IOError),
|
||||
#[error("IOError '{io_error:?}' while opening file for write: '{filepath}'.")]
|
||||
IOError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
/// File path of the file that tantivy failed to open for write.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<IOError> for OpenWriteError {
|
||||
fn from(err: IOError) -> OpenWriteError {
|
||||
OpenWriteError::IOError(err)
|
||||
impl OpenWriteError {
|
||||
pub(crate) fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||
Self::IOError { io_error, filepath }
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for OpenWriteError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
OpenWriteError::FileAlreadyExists(ref path) => {
|
||||
write!(f, "the file '{:?}' already exists", path)
|
||||
}
|
||||
OpenWriteError::IOError(ref err) => write!(
|
||||
f,
|
||||
"an io error occurred while opening a file for writing: '{}'",
|
||||
err
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StdError for OpenWriteError {
|
||||
fn description(&self) -> &str {
|
||||
"error occurred while opening a file for writing"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
match *self {
|
||||
OpenWriteError::FileAlreadyExists(_) => None,
|
||||
OpenWriteError::IOError(ref err) => Some(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Type of index incompatibility between the library and the index found on disk
|
||||
/// Used to catch and provide a hint to solve this incompatibility issue
|
||||
pub enum Incompatibility {
|
||||
@@ -217,55 +122,46 @@ impl fmt::Debug for Incompatibility {
|
||||
}
|
||||
|
||||
/// Error that may occur when accessing a file read
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum OpenReadError {
|
||||
/// The file does not exists.
|
||||
#[error("Files does not exists: {0:?}")]
|
||||
FileDoesNotExist(PathBuf),
|
||||
/// Any kind of IO error that happens when
|
||||
/// interacting with the underlying IO device.
|
||||
IOError(IOError),
|
||||
/// This library doesn't support the index version found on disk
|
||||
/// Any kind of io::Error.
|
||||
#[error(
|
||||
"IOError: '{io_error:?}' happened while opening the following file for Read: {filepath}."
|
||||
)]
|
||||
IOError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
/// File path of the file that tantivy failed to open for read.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
/// This library does not support the index version found in file footer.
|
||||
#[error("Index version unsupported: {0:?}")]
|
||||
IncompatibleIndex(Incompatibility),
|
||||
}
|
||||
|
||||
impl From<IOError> for OpenReadError {
|
||||
fn from(err: IOError) -> OpenReadError {
|
||||
OpenReadError::IOError(err)
|
||||
impl OpenReadError {
|
||||
pub(crate) fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||
Self::IOError { io_error, filepath }
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for OpenReadError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
OpenReadError::FileDoesNotExist(ref path) => {
|
||||
write!(f, "the file '{:?}' does not exist", path)
|
||||
}
|
||||
OpenReadError::IOError(ref err) => write!(
|
||||
f,
|
||||
"an io error occurred while opening a file for reading: '{}'",
|
||||
err
|
||||
),
|
||||
OpenReadError::IncompatibleIndex(ref footer) => {
|
||||
write!(f, "Incompatible index format: {:?}", footer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when trying to delete a file
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum DeleteError {
|
||||
/// The file does not exists.
|
||||
#[error("File does not exists: '{0}'.")]
|
||||
FileDoesNotExist(PathBuf),
|
||||
/// Any kind of IO error that happens when
|
||||
/// interacting with the underlying IO device.
|
||||
IOError(IOError),
|
||||
}
|
||||
|
||||
impl From<IOError> for DeleteError {
|
||||
fn from(err: IOError) -> DeleteError {
|
||||
DeleteError::IOError(err)
|
||||
}
|
||||
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")]
|
||||
IOError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
/// File path of the file that tantivy failed to delete.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<Incompatibility> for OpenReadError {
|
||||
@@ -273,29 +169,3 @@ impl From<Incompatibility> for OpenReadError {
|
||||
OpenReadError::IncompatibleIndex(incompatibility)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DeleteError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
DeleteError::FileDoesNotExist(ref path) => {
|
||||
write!(f, "the file '{:?}' does not exist", path)
|
||||
}
|
||||
DeleteError::IOError(ref err) => {
|
||||
write!(f, "an io error occurred while deleting a file: '{}'", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StdError for DeleteError {
|
||||
fn description(&self) -> &str {
|
||||
"error occurred while deleting a file"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
match *self {
|
||||
DeleteError::FileDoesNotExist(_) => None,
|
||||
DeleteError::IOError(ref err) => Some(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
237
src/directory/file_slice.rs
Normal file
237
src/directory/file_slice.rs
Normal file
@@ -0,0 +1,237 @@
|
||||
use stable_deref_trait::StableDeref;
|
||||
|
||||
use crate::common::HasLen;
|
||||
use crate::directory::OwnedBytes;
|
||||
use std::sync::Arc;
|
||||
use std::{io, ops::Deref};
|
||||
|
||||
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
|
||||
/// Objects that represents files sections in tantivy.
|
||||
///
|
||||
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
||||
/// is alive, the data associated with it cannot be altered or destroyed.
|
||||
///
|
||||
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
||||
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
||||
/// on the filesystem.
|
||||
pub trait FileHandle: 'static + Send + Sync + HasLen {
|
||||
/// Reads a slice of bytes.
|
||||
///
|
||||
/// This method may panic if the range requested is invalid.
|
||||
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes>;
|
||||
}
|
||||
|
||||
impl FileHandle for &'static [u8] {
|
||||
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
||||
let bytes = &self[from..to];
|
||||
Ok(OwnedBytes::new(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Deref<Target = [u8]>> HasLen for T {
|
||||
fn len(&self) -> usize {
|
||||
self.as_ref().len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> From<B> for FileSlice
|
||||
where
|
||||
B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync,
|
||||
{
|
||||
fn from(bytes: B) -> FileSlice {
|
||||
FileSlice::new(OwnedBytes::new(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
/// Logical slice of read only file in tantivy.
|
||||
//
|
||||
/// It can be cloned and sliced cheaply.
|
||||
///
|
||||
#[derive(Clone)]
|
||||
pub struct FileSlice {
|
||||
data: Arc<Box<dyn FileHandle>>,
|
||||
start: usize,
|
||||
stop: usize,
|
||||
}
|
||||
|
||||
impl FileSlice {
|
||||
/// Wraps a FileHandle.
|
||||
pub fn new<D>(data: D) -> Self
|
||||
where
|
||||
D: FileHandle,
|
||||
{
|
||||
let len = data.len();
|
||||
FileSlice {
|
||||
data: Arc::new(Box::new(data)),
|
||||
start: 0,
|
||||
stop: len,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a fileslice that is just a view over a slice of the data.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if `to < from` or if `to` exceeds the filesize.
|
||||
pub fn slice(&self, from: usize, to: usize) -> FileSlice {
|
||||
assert!(to <= self.len());
|
||||
assert!(to >= from);
|
||||
FileSlice {
|
||||
data: self.data.clone(),
|
||||
start: self.start + from,
|
||||
stop: self.start + to,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an empty FileSlice
|
||||
pub fn empty() -> FileSlice {
|
||||
const EMPTY_SLICE: &[u8] = &[];
|
||||
FileSlice::from(EMPTY_SLICE)
|
||||
}
|
||||
|
||||
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
|
||||
///
|
||||
/// The behavior is strongly dependant on the implementation of the underlying
|
||||
/// `Directory` and the `FileSliceTrait` it creates.
|
||||
/// In particular, it is up to the `Directory` implementation
|
||||
/// to handle caching if needed.
|
||||
pub fn read_bytes(&self) -> io::Result<OwnedBytes> {
|
||||
self.data.read_bytes(self.start, self.stop)
|
||||
}
|
||||
|
||||
/// Reads a specific slice of data.
|
||||
///
|
||||
/// This is equivalent to running `file_slice.slice(from, to).read_bytes()`.
|
||||
pub fn read_bytes_slice(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
||||
assert!(from <= to);
|
||||
assert!(
|
||||
self.start + to <= self.stop,
|
||||
"`to` exceeds the fileslice length"
|
||||
);
|
||||
self.data.read_bytes(self.start + from, self.start + to)
|
||||
}
|
||||
|
||||
/// Splits the FileSlice at the given offset and return two file slices.
|
||||
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||
///
|
||||
/// This operation is cheap and must not copy any underlying data.
|
||||
pub fn split(self, left_len: usize) -> (FileSlice, FileSlice) {
|
||||
let left = self.slice_to(left_len);
|
||||
let right = self.slice_from(left_len);
|
||||
(left, right)
|
||||
}
|
||||
|
||||
/// Splits the file slice at the given offset and return two file slices.
|
||||
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||
pub fn split_from_end(self, right_len: usize) -> (FileSlice, FileSlice) {
|
||||
let left_len = self.len() - right_len;
|
||||
self.split(left_len)
|
||||
}
|
||||
|
||||
/// Like `.slice(...)` but enforcing only the `from`
|
||||
/// boundary.
|
||||
///
|
||||
/// Equivalent to `.slice(from_offset, self.len())`
|
||||
pub fn slice_from(&self, from_offset: usize) -> FileSlice {
|
||||
self.slice(from_offset, self.len())
|
||||
}
|
||||
|
||||
/// Like `.slice(...)` but enforcing only the `to`
|
||||
/// boundary.
|
||||
///
|
||||
/// Equivalent to `.slice(0, to_offset)`
|
||||
pub fn slice_to(&self, to_offset: usize) -> FileSlice {
|
||||
self.slice(0, to_offset)
|
||||
}
|
||||
}
|
||||
|
||||
impl HasLen for FileSlice {
|
||||
fn len(&self) -> usize {
|
||||
self.stop - self.start
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{FileHandle, FileSlice};
|
||||
use crate::common::HasLen;
|
||||
use std::io;
|
||||
|
||||
#[test]
|
||||
fn test_file_slice() -> io::Result<()> {
|
||||
let file_slice = FileSlice::new(b"abcdef".as_ref());
|
||||
assert_eq!(file_slice.len(), 6);
|
||||
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
|
||||
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
|
||||
assert_eq!(
|
||||
file_slice
|
||||
.slice_from(1)
|
||||
.slice_to(2)
|
||||
.read_bytes()?
|
||||
.as_slice(),
|
||||
b"bc"
|
||||
);
|
||||
{
|
||||
let (left, right) = file_slice.clone().split(0);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"abcdef");
|
||||
}
|
||||
{
|
||||
let (left, right) = file_slice.clone().split(2);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"ab");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"cdef");
|
||||
}
|
||||
{
|
||||
let (left, right) = file_slice.clone().split_from_end(0);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"abcdef");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"");
|
||||
}
|
||||
{
|
||||
let (left, right) = file_slice.clone().split_from_end(2);
|
||||
assert_eq!(left.read_bytes()?.as_slice(), b"abcd");
|
||||
assert_eq!(right.read_bytes()?.as_slice(), b"ef");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_slice_trait_slice_len() {
|
||||
let blop: &'static [u8] = b"abc";
|
||||
let owned_bytes: Box<dyn FileHandle> = Box::new(blop);
|
||||
assert_eq!(owned_bytes.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slice_simple_read() -> io::Result<()> {
|
||||
let slice = FileSlice::new(&b"abcdef"[..]);
|
||||
assert_eq!(slice.len(), 6);
|
||||
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
|
||||
assert_eq!(slice.slice(1, 4).read_bytes()?.as_ref(), b"bcd");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slice_read_slice() -> io::Result<()> {
|
||||
let slice_deref = FileSlice::new(&b"abcdef"[..]);
|
||||
assert_eq!(slice_deref.read_bytes_slice(1, 4)?.as_ref(), b"bcd");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "assertion failed: from <= to")]
|
||||
fn test_slice_read_slice_invalid_range() {
|
||||
let slice_deref = FileSlice::new(&b"abcdef"[..]);
|
||||
assert_eq!(slice_deref.read_bytes_slice(1, 0).unwrap().as_ref(), b"bcd");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "`to` exceeds the fileslice length")]
|
||||
fn test_slice_read_slice_invalid_range_exceeds() {
|
||||
let slice_deref = FileSlice::new(&b"abcdef"[..]);
|
||||
assert_eq!(
|
||||
slice_deref.read_bytes_slice(0, 10).unwrap().as_ref(),
|
||||
b"bcd"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
|
||||
use crate::common::{BinarySerializable, CountingWriter, FixedSize, HasLen, VInt};
|
||||
use crate::directory::error::Incompatibility;
|
||||
use crate::directory::read_only_source::ReadOnlySource;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::{AntiCallToken, TerminatingWrite};
|
||||
use crate::Version;
|
||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||
use crc32fast::Hasher;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
@@ -64,26 +63,26 @@ impl Footer {
|
||||
let mut counting_write = CountingWriter::wrap(&mut write);
|
||||
self.serialize(&mut counting_write)?;
|
||||
let written_len = counting_write.written_bytes();
|
||||
write.write_u32::<LittleEndian>(written_len as u32)?;
|
||||
(written_len as u32).serialize(write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
|
||||
if source.len() < 4 {
|
||||
pub fn extract_footer(file: FileSlice) -> io::Result<(Footer, FileSlice)> {
|
||||
if file.len() < 4 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
format!(
|
||||
"File corrupted. The file is smaller than 4 bytes (len={}).",
|
||||
source.len()
|
||||
file.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES);
|
||||
let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize;
|
||||
let body_len = body_footer.len() - footer_len;
|
||||
let (body, footer_data) = body_footer.split(body_len);
|
||||
let mut cursor = footer_data.as_slice();
|
||||
let footer = Footer::deserialize(&mut cursor)?;
|
||||
let (body_footer, footer_len_file) = file.split_from_end(u32::SIZE_IN_BYTES);
|
||||
let mut footer_len_bytes = footer_len_file.read_bytes()?;
|
||||
let footer_len = u32::deserialize(&mut footer_len_bytes)? as usize;
|
||||
let (body, footer) = body_footer.split_from_end(footer_len);
|
||||
let mut footer_bytes = footer.read_bytes()?;
|
||||
let footer = Footer::deserialize(&mut footer_bytes)?;
|
||||
Ok((footer, body))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use crate::core::{MANAGED_FILEPATH, META_FILEPATH};
|
||||
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::error::{DeleteError, LockError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::footer::{Footer, FooterProxy};
|
||||
use crate::directory::DirectoryLock;
|
||||
use crate::directory::GarbageCollectionResult;
|
||||
use crate::directory::Lock;
|
||||
use crate::directory::META_LOCK;
|
||||
use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use crate::directory::{FileSlice, WritePtr};
|
||||
use crate::directory::{WatchCallback, WatchHandle};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::Directory;
|
||||
@@ -53,7 +53,7 @@ struct MetaInformation {
|
||||
/// Saves the file containing the list of existing files
|
||||
/// that were created by tantivy.
|
||||
fn save_managed_paths(
|
||||
directory: &mut dyn Directory,
|
||||
directory: &dyn Directory,
|
||||
wlock: &RwLockWriteGuard<'_, MetaInformation>,
|
||||
) -> io::Result<()> {
|
||||
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
|
||||
@@ -86,7 +86,7 @@ impl ManagedDirectory {
|
||||
directory: Box::new(directory),
|
||||
meta_informations: Arc::default(),
|
||||
}),
|
||||
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
|
||||
io_err @ Err(OpenReadError::IOError { .. }) => Err(io_err.err().unwrap().into()),
|
||||
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
|
||||
// For the moment, this should never happen `meta.json`
|
||||
// do not have any footer and cannot detect incompatibility.
|
||||
@@ -168,7 +168,7 @@ impl ManagedDirectory {
|
||||
DeleteError::FileDoesNotExist(_) => {
|
||||
deleted_files.push(file_to_delete.clone());
|
||||
}
|
||||
DeleteError::IOError(_) => {
|
||||
DeleteError::IOError { .. } => {
|
||||
failed_to_delete_files.push(file_to_delete.clone());
|
||||
if !cfg!(target_os = "windows") {
|
||||
// On windows, delete is expected to fail if the file
|
||||
@@ -212,7 +212,7 @@ impl ManagedDirectory {
|
||||
/// File starting by "." are reserved to locks.
|
||||
/// They are not managed and cannot be subjected
|
||||
/// to garbage collection.
|
||||
fn register_file_as_managed(&mut self, filepath: &Path) -> io::Result<()> {
|
||||
fn register_file_as_managed(&self, filepath: &Path) -> io::Result<()> {
|
||||
// Files starting by "." (e.g. lock files) are not managed.
|
||||
if !is_managed(filepath) {
|
||||
return Ok(());
|
||||
@@ -223,7 +223,7 @@ impl ManagedDirectory {
|
||||
.expect("Managed file lock poisoned");
|
||||
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
|
||||
if has_changed {
|
||||
save_managed_paths(self.directory.as_mut(), &meta_wlock)?;
|
||||
save_managed_paths(self.directory.as_ref(), &meta_wlock)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -231,10 +231,19 @@ impl ManagedDirectory {
|
||||
/// Verify checksum of a managed file
|
||||
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
||||
let reader = self.directory.open_read(path)?;
|
||||
let (footer, data) = Footer::extract_footer(reader)
|
||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||
let (footer, data) =
|
||||
Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IOError {
|
||||
io_error,
|
||||
filepath: path.to_path_buf(),
|
||||
})?;
|
||||
let bytes = data
|
||||
.read_bytes()
|
||||
.map_err(|io_error| OpenReadError::IOError {
|
||||
filepath: path.to_path_buf(),
|
||||
io_error,
|
||||
})?;
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(data.as_slice());
|
||||
hasher.update(bytes.as_slice());
|
||||
let crc = hasher.finalize();
|
||||
Ok(footer
|
||||
.versioned_footer
|
||||
@@ -245,7 +254,6 @@ impl ManagedDirectory {
|
||||
|
||||
/// List files for which checksum does not match content
|
||||
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
||||
let mut hashset = HashSet::new();
|
||||
let mut managed_paths = self
|
||||
.meta_informations
|
||||
.read()
|
||||
@@ -255,27 +263,28 @@ impl ManagedDirectory {
|
||||
|
||||
managed_paths.remove(*META_FILEPATH);
|
||||
|
||||
for path in managed_paths.into_iter() {
|
||||
let mut damaged_files = HashSet::new();
|
||||
for path in managed_paths {
|
||||
if !self.validate_checksum(&path)? {
|
||||
hashset.insert(path);
|
||||
damaged_files.insert(path);
|
||||
}
|
||||
}
|
||||
Ok(hashset)
|
||||
Ok(damaged_files)
|
||||
}
|
||||
}
|
||||
|
||||
impl Directory for ManagedDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
let read_only_source = self.directory.open_read(path)?;
|
||||
let (footer, reader) = Footer::extract_footer(read_only_source)
|
||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||
let file_slice = self.directory.open_read(path)?;
|
||||
let (footer, reader) = Footer::extract_footer(file_slice)
|
||||
.map_err(|io_error| OpenReadError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||
footer.is_compatible()?;
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
||||
fn open_write(&self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
||||
self.register_file_as_managed(path)
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
|
||||
self.directory
|
||||
.open_write(path)?
|
||||
@@ -285,7 +294,7 @@ impl Directory for ManagedDirectory {
|
||||
))))
|
||||
}
|
||||
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
self.register_file_as_managed(path)?;
|
||||
self.directory.atomic_write(path, data)
|
||||
}
|
||||
@@ -399,39 +408,37 @@ mod tests_mmap_specific {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_checksum() {
|
||||
fn test_checksum() -> crate::Result<()> {
|
||||
let test_path1: &'static Path = Path::new("some_path_for_test");
|
||||
let test_path2: &'static Path = Path::new("other_test_path");
|
||||
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||
write.write_all(&[0u8, 1u8]).unwrap();
|
||||
write.terminate().unwrap();
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path)?;
|
||||
let managed_directory = ManagedDirectory::wrap(mmap_directory)?;
|
||||
let mut write = managed_directory.open_write(test_path1)?;
|
||||
write.write_all(&[0u8, 1u8])?;
|
||||
write.terminate()?;
|
||||
|
||||
let mut write = managed_directory.open_write(test_path2).unwrap();
|
||||
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
|
||||
write.terminate().unwrap();
|
||||
let mut write = managed_directory.open_write(test_path2)?;
|
||||
write.write_all(&[3u8, 4u8, 5u8])?;
|
||||
write.terminate()?;
|
||||
|
||||
let read_source = managed_directory.open_read(test_path2).unwrap();
|
||||
assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
|
||||
let read_file = managed_directory.open_read(test_path2)?.read_bytes()?;
|
||||
assert_eq!(read_file.as_slice(), &[3u8, 4u8, 5u8]);
|
||||
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
||||
|
||||
let mut corrupted_path = tempdir_path.clone();
|
||||
corrupted_path.push(test_path2);
|
||||
let mut file = OpenOptions::new()
|
||||
.write(true)
|
||||
.open(&corrupted_path)
|
||||
.unwrap();
|
||||
file.write_all(&[255u8]).unwrap();
|
||||
file.flush().unwrap();
|
||||
let mut file = OpenOptions::new().write(true).open(&corrupted_path)?;
|
||||
file.write_all(&[255u8])?;
|
||||
file.flush()?;
|
||||
drop(file);
|
||||
|
||||
let damaged = managed_directory.list_damaged().unwrap();
|
||||
let damaged = managed_directory.list_damaged()?;
|
||||
assert_eq!(damaged.len(), 1);
|
||||
assert!(damaged.contains(test_path2));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::error::LockError;
|
||||
use crate::directory::error::{
|
||||
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
||||
};
|
||||
use crate::directory::read_only_source::BoxedData;
|
||||
use crate::directory::error::{DeleteError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::AntiCallToken;
|
||||
use crate::directory::BoxedData;
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::DirectoryLock;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::Lock;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::WatchCallback;
|
||||
use crate::directory::WatchCallbackList;
|
||||
use crate::directory::WatchHandle;
|
||||
@@ -19,7 +17,7 @@ use notify::RawEvent;
|
||||
use notify::RecursiveMode;
|
||||
use notify::Watcher;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use stable_deref_trait::StableDeref;
|
||||
use std::convert::From;
|
||||
use std::fmt;
|
||||
use std::fs::OpenOptions;
|
||||
@@ -34,6 +32,7 @@ use std::sync::Mutex;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::Weak;
|
||||
use std::thread;
|
||||
use std::{collections::HashMap, ops::Deref};
|
||||
use tempfile::TempDir;
|
||||
|
||||
/// Create a default io error given a string.
|
||||
@@ -44,17 +43,17 @@ pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||
/// Returns None iff the file exists, can be read, but is empty (and hence
|
||||
/// cannot be mmapped)
|
||||
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||
let file = File::open(full_path).map_err(|e| {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
OpenReadError::FileDoesNotExist(full_path.to_owned())
|
||||
let file = File::open(full_path).map_err(|io_err| {
|
||||
if io_err.kind() == io::ErrorKind::NotFound {
|
||||
OpenReadError::FileDoesNotExist(full_path.to_path_buf())
|
||||
} else {
|
||||
OpenReadError::IOError(IOError::with_path(full_path.to_owned(), e))
|
||||
OpenReadError::wrap_io_error(io_err, full_path.to_path_buf())
|
||||
}
|
||||
})?;
|
||||
|
||||
let meta_data = file
|
||||
.metadata()
|
||||
.map_err(|e| IOError::with_path(full_path.to_owned(), e))?;
|
||||
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_owned()))?;
|
||||
if meta_data.len() == 0 {
|
||||
// if the file size is 0, it will not be possible
|
||||
// to mmap the file, so we return None
|
||||
@@ -64,7 +63,7 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||
unsafe {
|
||||
memmap::Mmap::map(&file)
|
||||
.map(Some)
|
||||
.map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e)))
|
||||
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_path_buf()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,6 +182,10 @@ impl WatcherWrapper {
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.map_err(|io_error| OpenDirectoryError::IoError {
|
||||
io_error,
|
||||
directory_path: path.to_path_buf(),
|
||||
})?;
|
||||
Ok(WatcherWrapper {
|
||||
_watcher: Mutex::new(watcher),
|
||||
@@ -272,9 +275,11 @@ impl MmapDirectory {
|
||||
/// This is mostly useful to test the MmapDirectory itself.
|
||||
/// For your unit tests, prefer the RAMDirectory.
|
||||
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
Ok(MmapDirectory::new(tempdir_path, Some(tempdir)))
|
||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?;
|
||||
Ok(MmapDirectory::new(
|
||||
tempdir.path().to_path_buf(),
|
||||
Some(tempdir),
|
||||
))
|
||||
}
|
||||
|
||||
/// Opens a MmapDirectory in a directory.
|
||||
@@ -396,8 +401,20 @@ impl TerminatingWrite for SafeFileWriter {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct MmapArc(Arc<Box<dyn Deref<Target = [u8]> + Send + Sync>>);
|
||||
|
||||
impl Deref for MmapArc {
|
||||
type Target = [u8];
|
||||
|
||||
fn deref(&self) -> &[u8] {
|
||||
self.0.deref()
|
||||
}
|
||||
}
|
||||
unsafe impl StableDeref for MmapArc {}
|
||||
|
||||
impl Directory for MmapDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||
debug!("Open Read {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
@@ -407,12 +424,14 @@ impl Directory for MmapDirectory {
|
||||
on mmap cache while reading {:?}",
|
||||
path
|
||||
);
|
||||
IOError::with_path(path.to_owned(), make_io_err(msg))
|
||||
let io_err = make_io_err(msg);
|
||||
OpenReadError::wrap_io_error(io_err, path.to_path_buf())
|
||||
})?;
|
||||
Ok(mmap_cache
|
||||
.get_mmap(&full_path)?
|
||||
.map(ReadOnlySource::from)
|
||||
.unwrap_or_else(ReadOnlySource::empty))
|
||||
if let Some(mmap_arc) = mmap_cache.get_mmap(&full_path)? {
|
||||
Ok(FileSlice::from(MmapArc(mmap_arc)))
|
||||
} else {
|
||||
Ok(FileSlice::empty())
|
||||
}
|
||||
}
|
||||
|
||||
/// Any entry associated to the path in the mmap will be
|
||||
@@ -420,14 +439,18 @@ impl Directory for MmapDirectory {
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
match fs::remove_file(&full_path) {
|
||||
Ok(_) => self
|
||||
.sync_directory()
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e).into()),
|
||||
Ok(_) => self.sync_directory().map_err(|e| DeleteError::IOError {
|
||||
io_error: e,
|
||||
filepath: path.to_path_buf(),
|
||||
}),
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
Err(DeleteError::FileDoesNotExist(path.to_owned()))
|
||||
} else {
|
||||
Err(IOError::with_path(path.to_owned(), e).into())
|
||||
Err(DeleteError::IOError {
|
||||
io_error: e,
|
||||
filepath: path.to_path_buf(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -438,7 +461,7 @@ impl Directory for MmapDirectory {
|
||||
full_path.exists()
|
||||
}
|
||||
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
debug!("Open Write {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
@@ -447,22 +470,22 @@ impl Directory for MmapDirectory {
|
||||
.create_new(true)
|
||||
.open(full_path);
|
||||
|
||||
let mut file = open_res.map_err(|err| {
|
||||
if err.kind() == io::ErrorKind::AlreadyExists {
|
||||
OpenWriteError::FileAlreadyExists(path.to_owned())
|
||||
let mut file = open_res.map_err(|io_err| {
|
||||
if io_err.kind() == io::ErrorKind::AlreadyExists {
|
||||
OpenWriteError::FileAlreadyExists(path.to_path_buf())
|
||||
} else {
|
||||
IOError::with_path(path.to_owned(), err).into()
|
||||
OpenWriteError::wrap_io_error(io_err, path.to_path_buf())
|
||||
}
|
||||
})?;
|
||||
|
||||
// making sure the file is created.
|
||||
file.flush()
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||
|
||||
// Apparetntly, on some filesystem syncing the parent
|
||||
// directory is required.
|
||||
self.sync_directory()
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
.map_err(|io_err| OpenWriteError::wrap_io_error(io_err, path.to_path_buf()))?;
|
||||
|
||||
let writer = SafeFileWriter::new(file);
|
||||
Ok(BufWriter::new(Box::new(writer)))
|
||||
@@ -473,21 +496,22 @@ impl Directory for MmapDirectory {
|
||||
let mut buffer = Vec::new();
|
||||
match File::open(&full_path) {
|
||||
Ok(mut file) => {
|
||||
file.read_to_end(&mut buffer)
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
file.read_to_end(&mut buffer).map_err(|io_error| {
|
||||
OpenReadError::wrap_io_error(io_error, path.to_path_buf())
|
||||
})?;
|
||||
Ok(buffer)
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
Err(io_error) => {
|
||||
if io_error.kind() == io::ErrorKind::NotFound {
|
||||
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
|
||||
} else {
|
||||
Err(IOError::with_path(path.to_owned(), e).into())
|
||||
Err(OpenReadError::wrap_io_error(io_error, path.to_path_buf()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn atomic_write(&mut self, path: &Path, content: &[u8]) -> io::Result<()> {
|
||||
fn atomic_write(&self, path: &Path, content: &[u8]) -> io::Result<()> {
|
||||
debug!("Atomic Write {:?}", path);
|
||||
let mut tempfile = tempfile::Builder::new().tempfile_in(&self.inner.root_path)?;
|
||||
tempfile.write_all(content)?;
|
||||
@@ -529,10 +553,10 @@ mod tests {
|
||||
// The following tests are specific to the MmapDirectory
|
||||
|
||||
use super::*;
|
||||
use crate::indexer::LogMergePolicy;
|
||||
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
||||
use crate::Index;
|
||||
use crate::ReloadPolicy;
|
||||
use crate::{common::HasLen, indexer::LogMergePolicy};
|
||||
use std::fs;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
@@ -547,7 +571,7 @@ mod tests {
|
||||
// cannot be mmapped.
|
||||
//
|
||||
// In that case the directory returns a SharedVecSlice.
|
||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
let path = PathBuf::from("test");
|
||||
{
|
||||
let mut w = mmap_directory.open_write(&path).unwrap();
|
||||
@@ -563,7 +587,7 @@ mod tests {
|
||||
|
||||
// here we test if the cache releases
|
||||
// mmaps correctly.
|
||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
let num_paths = 10;
|
||||
let paths: Vec<PathBuf> = (0..num_paths)
|
||||
.map(|i| PathBuf::from(&*format!("file_{}", i)))
|
||||
|
||||
@@ -9,10 +9,11 @@ mod mmap_directory;
|
||||
|
||||
mod directory;
|
||||
mod directory_lock;
|
||||
mod file_slice;
|
||||
mod footer;
|
||||
mod managed_directory;
|
||||
mod owned_bytes;
|
||||
mod ram_directory;
|
||||
mod read_only_source;
|
||||
mod watch_event_router;
|
||||
|
||||
/// Errors specific to the directory module.
|
||||
@@ -21,11 +22,14 @@ pub mod error;
|
||||
pub use self::directory::DirectoryLock;
|
||||
pub use self::directory::{Directory, DirectoryClone};
|
||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||
pub(crate) use self::file_slice::BoxedData;
|
||||
pub use self::file_slice::{FileHandle, FileSlice};
|
||||
pub use self::owned_bytes::OwnedBytes;
|
||||
pub use self::ram_directory::RAMDirectory;
|
||||
pub use self::read_only_source::ReadOnlySource;
|
||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||
use std::io::{self, BufWriter, Write};
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Outcome of the Garbage collection
|
||||
pub struct GarbageCollectionResult {
|
||||
/// List of files that were deleted in this cycle
|
||||
|
||||
255
src/directory/owned_bytes.rs
Normal file
255
src/directory/owned_bytes.rs
Normal file
@@ -0,0 +1,255 @@
|
||||
use crate::directory::FileHandle;
|
||||
use stable_deref_trait::StableDeref;
|
||||
use std::mem;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
|
||||
/// this data as a static slice.
|
||||
///
|
||||
/// The backing object is required to be `StableDeref`.
|
||||
#[derive(Clone)]
|
||||
pub struct OwnedBytes {
|
||||
data: &'static [u8],
|
||||
box_stable_deref: Arc<dyn Deref<Target = [u8]> + Sync + Send>,
|
||||
}
|
||||
|
||||
impl FileHandle for OwnedBytes {
|
||||
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
|
||||
Ok(self.slice(from, to))
|
||||
}
|
||||
}
|
||||
|
||||
impl OwnedBytes {
|
||||
/// Creates an empty `OwnedBytes`.
|
||||
pub fn empty() -> OwnedBytes {
|
||||
OwnedBytes::new(&[][..])
|
||||
}
|
||||
|
||||
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
|
||||
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
|
||||
data_holder: T,
|
||||
) -> OwnedBytes {
|
||||
let box_stable_deref = Arc::new(data_holder);
|
||||
let bytes: &[u8] = box_stable_deref.as_ref();
|
||||
let data = unsafe { mem::transmute::<_, &'static [u8]>(bytes.deref()) };
|
||||
OwnedBytes {
|
||||
box_stable_deref,
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
/// creates a fileslice that is just a view over a slice of the data.
|
||||
pub fn slice(&self, from: usize, to: usize) -> Self {
|
||||
OwnedBytes {
|
||||
data: &self.data[from..to],
|
||||
box_stable_deref: self.box_stable_deref.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the underlying slice of data.
|
||||
/// `Deref` and `AsRef` are also available.
|
||||
#[inline(always)]
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
self.data
|
||||
}
|
||||
|
||||
/// Returns the len of the slice.
|
||||
#[inline(always)]
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
||||
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
|
||||
///
|
||||
/// Left will hold `split_len` bytes.
|
||||
///
|
||||
/// This operation is cheap and does not require to copy any memory.
|
||||
/// On the other hand, both `left` and `right` retain a handle over
|
||||
/// the entire slice of memory. In other words, the memory will only
|
||||
/// be released when both left and right are dropped.
|
||||
pub fn split(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
|
||||
let right_box_stable_deref = self.box_stable_deref.clone();
|
||||
let left = OwnedBytes {
|
||||
data: &self.data[..split_len],
|
||||
box_stable_deref: self.box_stable_deref,
|
||||
};
|
||||
let right = OwnedBytes {
|
||||
data: &self.data[split_len..],
|
||||
box_stable_deref: right_box_stable_deref,
|
||||
};
|
||||
(left, right)
|
||||
}
|
||||
|
||||
/// Returns true iff this `OwnedBytes` is empty.
|
||||
#[inline(always)]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.as_slice().is_empty()
|
||||
}
|
||||
|
||||
/// Drops the left most `advance_len` bytes.
|
||||
///
|
||||
/// See also [.clip(clip_len: usize))](#method.clip).
|
||||
#[inline(always)]
|
||||
pub fn advance(&mut self, advance_len: usize) {
|
||||
self.data = &self.data[advance_len..]
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for OwnedBytes {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
// We truncate the bytes in order to make sure the debug string
|
||||
// is not too long.
|
||||
let bytes_truncated: &[u8] = if self.len() > 8 {
|
||||
&self.as_slice()[..10]
|
||||
} else {
|
||||
self.as_slice()
|
||||
};
|
||||
write!(f, "OwnedBytes({:?}, len={})", bytes_truncated, self.len())
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for OwnedBytes {
|
||||
type Target = [u8];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for OwnedBytes {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let read_len = {
|
||||
let data = self.as_slice();
|
||||
if data.len() >= buf.len() {
|
||||
let buf_len = buf.len();
|
||||
buf.copy_from_slice(&data[..buf_len]);
|
||||
buf.len()
|
||||
} else {
|
||||
let data_len = data.len();
|
||||
buf[..data_len].copy_from_slice(data);
|
||||
data_len
|
||||
}
|
||||
};
|
||||
self.advance(read_len);
|
||||
Ok(read_len)
|
||||
}
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
let read_len = {
|
||||
let data = self.as_slice();
|
||||
buf.extend(data);
|
||||
data.len()
|
||||
};
|
||||
self.advance(read_len);
|
||||
Ok(read_len)
|
||||
}
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||
let read_len = self.read(buf)?;
|
||||
if read_len != buf.len() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"failed to fill whole buffer",
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for OwnedBytes {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::{self, Read};
|
||||
|
||||
use super::OwnedBytes;
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_debug() {
|
||||
let short_bytes = OwnedBytes::new(b"abcd".as_ref());
|
||||
assert_eq!(
|
||||
format!("{:?}", short_bytes),
|
||||
"OwnedBytes([97, 98, 99, 100], len=4)"
|
||||
);
|
||||
let long_bytes = OwnedBytes::new(b"abcdefghijklmnopq".as_ref());
|
||||
assert_eq!(
|
||||
format!("{:?}", long_bytes),
|
||||
"OwnedBytes([97, 98, 99, 100, 101, 102, 103, 104, 105, 106], len=17)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_read() -> io::Result<()> {
|
||||
let mut bytes = OwnedBytes::new(b"abcdefghiklmnopqrstuvwxyz".as_ref());
|
||||
{
|
||||
let mut buf = [0u8; 5];
|
||||
bytes.read_exact(&mut buf[..]).unwrap();
|
||||
assert_eq!(&buf, b"abcde");
|
||||
assert_eq!(bytes.as_slice(), b"fghiklmnopqrstuvwxyz")
|
||||
}
|
||||
{
|
||||
let mut buf = [0u8; 2];
|
||||
bytes.read_exact(&mut buf[..]).unwrap();
|
||||
assert_eq!(&buf, b"fg");
|
||||
assert_eq!(bytes.as_slice(), b"hiklmnopqrstuvwxyz")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_read_right_at_the_end() -> io::Result<()> {
|
||||
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||
let mut buf = [0u8; 5];
|
||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
|
||||
assert_eq!(&buf, b"abcde");
|
||||
assert_eq!(bytes.as_slice(), b"");
|
||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
|
||||
assert_eq!(&buf, b"abcde");
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn test_owned_bytes_read_incomplete() -> io::Result<()> {
|
||||
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||
let mut buf = [0u8; 7];
|
||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
|
||||
assert_eq!(&buf[..5], b"abcde");
|
||||
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_read_to_end() -> io::Result<()> {
|
||||
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
|
||||
let mut buf = Vec::new();
|
||||
bytes.read_to_end(&mut buf)?;
|
||||
assert_eq!(buf.as_slice(), b"abcde".as_ref());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_split() {
|
||||
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
|
||||
let (left, right) = bytes.split(3);
|
||||
assert_eq!(left.as_slice(), b"abc");
|
||||
assert_eq!(right.as_slice(), b"defghi");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_owned_bytes_split_boundary() {
|
||||
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
|
||||
{
|
||||
let (left, right) = bytes.clone().split(0);
|
||||
assert_eq!(left.as_slice(), b"");
|
||||
assert_eq!(right.as_slice(), b"abcdefghi");
|
||||
}
|
||||
{
|
||||
let (left, right) = bytes.split(9);
|
||||
assert_eq!(left.as_slice(), b"abcdefghi");
|
||||
assert_eq!(right.as_slice(), b"");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::AntiCallToken;
|
||||
use crate::directory::WatchCallbackList;
|
||||
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
|
||||
use crate::directory::{Directory, FileSlice, WatchCallback, WatchHandle};
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use crate::{common::HasLen, core::META_FILEPATH};
|
||||
use fail::fail_point;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
@@ -80,17 +80,17 @@ impl TerminatingWrite for VecWriter {
|
||||
|
||||
#[derive(Default)]
|
||||
struct InnerDirectory {
|
||||
fs: HashMap<PathBuf, ReadOnlySource>,
|
||||
fs: HashMap<PathBuf, FileSlice>,
|
||||
watch_router: WatchCallbackList,
|
||||
}
|
||||
|
||||
impl InnerDirectory {
|
||||
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
|
||||
let data = ReadOnlySource::new(Vec::from(data));
|
||||
let data = FileSlice::from(data.to_vec());
|
||||
self.fs.insert(path, data).is_some()
|
||||
}
|
||||
|
||||
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
|
||||
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> {
|
||||
self.fs
|
||||
.get(path)
|
||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
||||
@@ -151,11 +151,11 @@ impl RAMDirectory {
|
||||
/// written using the `atomic_write` api.
|
||||
///
|
||||
/// If an error is encounterred, files may be persisted partially.
|
||||
pub fn persist(&self, dest: &mut dyn Directory) -> crate::Result<()> {
|
||||
pub fn persist(&self, dest: &dyn Directory) -> crate::Result<()> {
|
||||
let wlock = self.fs.write().unwrap();
|
||||
for (path, source) in wlock.fs.iter() {
|
||||
for (path, file) in wlock.fs.iter() {
|
||||
let mut dest_wrt = dest.open_write(path)?;
|
||||
dest_wrt.write_all(source.as_slice())?;
|
||||
dest_wrt.write_all(file.read_bytes()?.as_slice())?;
|
||||
dest_wrt.terminate()?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -163,15 +163,16 @@ impl RAMDirectory {
|
||||
}
|
||||
|
||||
impl Directory for RAMDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||
self.fs.read().unwrap().open_read(path)
|
||||
}
|
||||
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
fail_point!("RAMDirectory::delete", |_| {
|
||||
use crate::directory::error::IOError;
|
||||
let io_error = IOError::from(io::Error::from(io::ErrorKind::Other));
|
||||
Err(DeleteError::from(io_error))
|
||||
Err(DeleteError::IOError {
|
||||
io_error: io::Error::from(io::ErrorKind::Other),
|
||||
filepath: path.to_path_buf(),
|
||||
})
|
||||
});
|
||||
self.fs.write().unwrap().delete(path)
|
||||
}
|
||||
@@ -180,7 +181,7 @@ impl Directory for RAMDirectory {
|
||||
self.fs.read().unwrap().exists(path)
|
||||
}
|
||||
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
let mut fs = self.fs.write().unwrap();
|
||||
let path_buf = PathBuf::from(path);
|
||||
let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
||||
@@ -194,10 +195,17 @@ impl Directory for RAMDirectory {
|
||||
}
|
||||
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||
Ok(self.open_read(path)?.as_slice().to_owned())
|
||||
let bytes =
|
||||
self.open_read(path)?
|
||||
.read_bytes()
|
||||
.map_err(|io_error| OpenReadError::IOError {
|
||||
io_error,
|
||||
filepath: path.to_path_buf(),
|
||||
})?;
|
||||
Ok(bytes.as_slice().to_owned())
|
||||
}
|
||||
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
msg.unwrap_or_else(|| "Undefined".to_string())
|
||||
@@ -234,13 +242,13 @@ mod tests {
|
||||
let msg_seq: &'static [u8] = b"sequential is the way";
|
||||
let path_atomic: &'static Path = Path::new("atomic");
|
||||
let path_seq: &'static Path = Path::new("seq");
|
||||
let mut directory = RAMDirectory::create();
|
||||
let directory = RAMDirectory::create();
|
||||
assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok());
|
||||
let mut wrt = directory.open_write(path_seq).unwrap();
|
||||
assert!(wrt.write_all(msg_seq).is_ok());
|
||||
assert!(wrt.flush().is_ok());
|
||||
let mut directory_copy = RAMDirectory::create();
|
||||
assert!(directory.persist(&mut directory_copy).is_ok());
|
||||
let directory_copy = RAMDirectory::create();
|
||||
assert!(directory.persist(&directory_copy).is_ok());
|
||||
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
||||
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
||||
}
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
use crate::common::HasLen;
|
||||
use stable_deref_trait::{CloneStableDeref, StableDeref};
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
|
||||
/// Read object that represents files in tantivy.
|
||||
///
|
||||
/// These read objects are only in charge to deliver
|
||||
/// the data in the form of a constant read-only `&[u8]`.
|
||||
/// Whatever happens to the directory file, the data
|
||||
/// hold by this object should never be altered or destroyed.
|
||||
pub struct ReadOnlySource {
|
||||
data: Arc<BoxedData>,
|
||||
start: usize,
|
||||
stop: usize,
|
||||
}
|
||||
|
||||
unsafe impl StableDeref for ReadOnlySource {}
|
||||
unsafe impl CloneStableDeref for ReadOnlySource {}
|
||||
|
||||
impl Deref for ReadOnlySource {
|
||||
type Target = [u8];
|
||||
|
||||
fn deref(&self) -> &[u8] {
|
||||
self.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Arc<BoxedData>> for ReadOnlySource {
|
||||
fn from(data: Arc<BoxedData>) -> Self {
|
||||
let len = data.len();
|
||||
ReadOnlySource {
|
||||
data,
|
||||
start: 0,
|
||||
stop: len,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadOnlySource {
|
||||
pub(crate) fn new<D>(data: D) -> ReadOnlySource
|
||||
where
|
||||
D: Deref<Target = [u8]> + Send + Sync + 'static,
|
||||
{
|
||||
let len = data.len();
|
||||
ReadOnlySource {
|
||||
data: Arc::new(Box::new(data)),
|
||||
start: 0,
|
||||
stop: len,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an empty ReadOnlySource
|
||||
pub fn empty() -> ReadOnlySource {
|
||||
ReadOnlySource::new(&[][..])
|
||||
}
|
||||
|
||||
/// Returns the data underlying the ReadOnlySource object.
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
&self.data[self.start..self.stop]
|
||||
}
|
||||
|
||||
/// Splits into 2 `ReadOnlySource`, at the offset given
|
||||
/// as an argument.
|
||||
pub fn split(self, addr: usize) -> (ReadOnlySource, ReadOnlySource) {
|
||||
let left = self.slice(0, addr);
|
||||
let right = self.slice_from(addr);
|
||||
(left, right)
|
||||
}
|
||||
|
||||
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
|
||||
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
|
||||
let left_len = self.len() - right_len;
|
||||
self.split(left_len)
|
||||
}
|
||||
|
||||
/// Creates a ReadOnlySource that is just a
|
||||
/// view over a slice of the data.
|
||||
///
|
||||
/// Keep in mind that any living slice extends
|
||||
/// the lifetime of the original ReadOnlySource,
|
||||
///
|
||||
/// For instance, if `ReadOnlySource` wraps 500MB
|
||||
/// worth of data in anonymous memory, and only a
|
||||
/// 1KB slice is remaining, the whole `500MBs`
|
||||
/// are retained in memory.
|
||||
pub fn slice(&self, start: usize, stop: usize) -> ReadOnlySource {
|
||||
assert!(
|
||||
start <= stop,
|
||||
"Requested negative slice [{}..{}]",
|
||||
start,
|
||||
stop
|
||||
);
|
||||
assert!(stop <= self.len());
|
||||
ReadOnlySource {
|
||||
data: self.data.clone(),
|
||||
start: self.start + start,
|
||||
stop: self.start + stop,
|
||||
}
|
||||
}
|
||||
|
||||
/// Like `.slice(...)` but enforcing only the `from`
|
||||
/// boundary.
|
||||
///
|
||||
/// Equivalent to `.slice(from_offset, self.len())`
|
||||
pub fn slice_from(&self, from_offset: usize) -> ReadOnlySource {
|
||||
self.slice(from_offset, self.len())
|
||||
}
|
||||
|
||||
/// Like `.slice(...)` but enforcing only the `to`
|
||||
/// boundary.
|
||||
///
|
||||
/// Equivalent to `.slice(0, to_offset)`
|
||||
pub fn slice_to(&self, to_offset: usize) -> ReadOnlySource {
|
||||
self.slice(0, to_offset)
|
||||
}
|
||||
}
|
||||
|
||||
impl HasLen for ReadOnlySource {
|
||||
fn len(&self) -> usize {
|
||||
self.stop - self.start
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for ReadOnlySource {
|
||||
fn clone(&self) -> Self {
|
||||
self.slice_from(0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for ReadOnlySource {
|
||||
fn from(data: Vec<u8>) -> ReadOnlySource {
|
||||
ReadOnlySource::new(data)
|
||||
}
|
||||
}
|
||||
@@ -20,45 +20,47 @@ mod mmap_directory_tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple() {
|
||||
let mut directory = make_directory();
|
||||
super::test_simple(&mut directory);
|
||||
fn test_simple() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_simple(&directory)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_create_the_file() {
|
||||
let mut directory = make_directory();
|
||||
super::test_write_create_the_file(&mut directory);
|
||||
let directory = make_directory();
|
||||
super::test_write_create_the_file(&directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_forbidden() {
|
||||
let mut directory = make_directory();
|
||||
super::test_rewrite_forbidden(&mut directory);
|
||||
fn test_rewrite_forbidden() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_rewrite_forbidden(&directory)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_delete() {
|
||||
let mut directory = make_directory();
|
||||
super::test_directory_delete(&mut directory);
|
||||
fn test_directory_delete() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_directory_delete(&directory)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_non_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_non_blocking(&mut directory);
|
||||
let directory = make_directory();
|
||||
super::test_lock_non_blocking(&directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_blocking(&mut directory);
|
||||
let directory = make_directory();
|
||||
super::test_lock_blocking(&directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_watch() {
|
||||
let mut directory = make_directory();
|
||||
super::test_watch(&mut directory);
|
||||
let directory = make_directory();
|
||||
super::test_watch(&directory);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,45 +74,47 @@ mod ram_directory_tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple() {
|
||||
let mut directory = make_directory();
|
||||
super::test_simple(&mut directory);
|
||||
fn test_simple() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_simple(&directory)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_create_the_file() {
|
||||
let mut directory = make_directory();
|
||||
super::test_write_create_the_file(&mut directory);
|
||||
let directory = make_directory();
|
||||
super::test_write_create_the_file(&directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_forbidden() {
|
||||
let mut directory = make_directory();
|
||||
super::test_rewrite_forbidden(&mut directory);
|
||||
fn test_rewrite_forbidden() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_rewrite_forbidden(&directory)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_delete() {
|
||||
let mut directory = make_directory();
|
||||
super::test_directory_delete(&mut directory);
|
||||
fn test_directory_delete() -> crate::Result<()> {
|
||||
let directory = make_directory();
|
||||
super::test_directory_delete(&directory)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_non_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_non_blocking(&mut directory);
|
||||
let directory = make_directory();
|
||||
super::test_lock_non_blocking(&directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_blocking(&mut directory);
|
||||
let directory = make_directory();
|
||||
super::test_lock_blocking(&directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_watch() {
|
||||
let mut directory = make_directory();
|
||||
super::test_watch(&mut directory);
|
||||
let directory = make_directory();
|
||||
super::test_watch(&directory);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,43 +122,37 @@ mod ram_directory_tests {
|
||||
#[should_panic]
|
||||
fn ram_directory_panics_if_flush_forgotten() {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
let mut ram_directory = RAMDirectory::create();
|
||||
let ram_directory = RAMDirectory::create();
|
||||
let mut write_file = ram_directory.open_write(test_path).unwrap();
|
||||
assert!(write_file.write_all(&[4]).is_ok());
|
||||
}
|
||||
|
||||
fn test_simple(directory: &mut dyn Directory) {
|
||||
fn test_simple(directory: &dyn Directory) -> crate::Result<()> {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
{
|
||||
let mut write_file = directory.open_write(test_path).unwrap();
|
||||
assert!(directory.exists(test_path));
|
||||
write_file.write_all(&[4]).unwrap();
|
||||
write_file.write_all(&[3]).unwrap();
|
||||
write_file.write_all(&[7, 3, 5]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
}
|
||||
{
|
||||
let read_file = directory.open_read(test_path).unwrap();
|
||||
let data: &[u8] = &*read_file;
|
||||
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
||||
}
|
||||
let mut write_file = directory.open_write(test_path)?;
|
||||
assert!(directory.exists(test_path));
|
||||
write_file.write_all(&[4])?;
|
||||
write_file.write_all(&[3])?;
|
||||
write_file.write_all(&[7, 3, 5])?;
|
||||
write_file.flush()?;
|
||||
let read_file = directory.open_read(test_path)?.read_bytes()?;
|
||||
assert_eq!(read_file.as_slice(), &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
||||
mem::drop(read_file);
|
||||
assert!(directory.delete(test_path).is_ok());
|
||||
assert!(!directory.exists(test_path));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_rewrite_forbidden(directory: &mut dyn Directory) {
|
||||
fn test_rewrite_forbidden(directory: &dyn Directory) -> crate::Result<()> {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
{
|
||||
directory.open_write(test_path).unwrap();
|
||||
assert!(directory.exists(test_path));
|
||||
}
|
||||
{
|
||||
assert!(directory.open_write(test_path).is_err());
|
||||
}
|
||||
directory.open_write(test_path)?;
|
||||
assert!(directory.exists(test_path));
|
||||
assert!(directory.open_write(test_path).is_err());
|
||||
assert!(directory.delete(test_path).is_ok());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_write_create_the_file(directory: &mut dyn Directory) {
|
||||
fn test_write_create_the_file(directory: &dyn Directory) {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
{
|
||||
assert!(directory.open_read(test_path).is_err());
|
||||
@@ -165,21 +163,20 @@ fn test_write_create_the_file(directory: &mut dyn Directory) {
|
||||
}
|
||||
}
|
||||
|
||||
fn test_directory_delete(directory: &mut dyn Directory) {
|
||||
fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
assert!(directory.open_read(test_path).is_err());
|
||||
let mut write_file = directory.open_write(&test_path).unwrap();
|
||||
write_file.write_all(&[1, 2, 3, 4]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
let mut write_file = directory.open_write(&test_path)?;
|
||||
write_file.write_all(&[1, 2, 3, 4])?;
|
||||
write_file.flush()?;
|
||||
{
|
||||
let read_handle = directory.open_read(&test_path).unwrap();
|
||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||
let read_handle = directory.open_read(&test_path)?.read_bytes()?;
|
||||
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
|
||||
// Mapped files can't be deleted on Windows
|
||||
if !cfg!(windows) {
|
||||
assert!(directory.delete(&test_path).is_ok());
|
||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
|
||||
}
|
||||
|
||||
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
||||
}
|
||||
|
||||
@@ -189,9 +186,10 @@ fn test_directory_delete(directory: &mut dyn Directory) {
|
||||
|
||||
assert!(directory.open_read(&test_path).is_err());
|
||||
assert!(directory.delete(&test_path).is_err());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_watch(directory: &mut dyn Directory) {
|
||||
fn test_watch(directory: &dyn Directory) {
|
||||
let num_progress: Arc<AtomicUsize> = Default::default();
|
||||
let counter: Arc<AtomicUsize> = Default::default();
|
||||
let counter_clone = counter.clone();
|
||||
@@ -226,7 +224,7 @@ fn test_watch(directory: &mut dyn Directory) {
|
||||
assert!(10 <= counter.load(SeqCst));
|
||||
}
|
||||
|
||||
fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
||||
fn test_lock_non_blocking(directory: &dyn Directory) {
|
||||
{
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
@@ -251,7 +249,7 @@ fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
||||
assert!(lock_a_res.is_ok());
|
||||
}
|
||||
|
||||
fn test_lock_blocking(directory: &mut dyn Directory) {
|
||||
fn test_lock_blocking(directory: &dyn Directory) {
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
is_blocking: true,
|
||||
|
||||
@@ -29,6 +29,13 @@ impl WatchHandle {
|
||||
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
|
||||
WatchHandle(watch_callback)
|
||||
}
|
||||
|
||||
/// Returns an empty watch handle.
|
||||
///
|
||||
/// This function is only useful when implementing a readonly directory.
|
||||
pub fn empty() -> WatchHandle {
|
||||
WatchHandle::new(Arc::new(Box::new(|| {})))
|
||||
}
|
||||
}
|
||||
|
||||
impl WatchCallbackList {
|
||||
|
||||
110
src/error.rs
110
src/error.rs
@@ -2,21 +2,27 @@
|
||||
|
||||
use std::io;
|
||||
|
||||
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::error::{Incompatibility, LockError};
|
||||
use crate::fastfield::FastFieldNotAvailableError;
|
||||
use crate::query;
|
||||
use crate::schema;
|
||||
use crate::{
|
||||
directory::error::{OpenDirectoryError, OpenReadError, OpenWriteError},
|
||||
schema,
|
||||
};
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::PoisonError;
|
||||
|
||||
/// Represents a `DataCorruption` error.
|
||||
///
|
||||
/// When facing data corruption, tantivy actually panic or return this error.
|
||||
pub struct DataCorruption {
|
||||
filepath: Option<PathBuf>,
|
||||
comment: String,
|
||||
}
|
||||
|
||||
impl DataCorruption {
|
||||
/// Creates a `DataCorruption` Error.
|
||||
pub fn new(filepath: PathBuf, comment: String) -> DataCorruption {
|
||||
DataCorruption {
|
||||
filepath: Some(filepath),
|
||||
@@ -24,10 +30,11 @@ impl DataCorruption {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn comment_only(comment: String) -> DataCorruption {
|
||||
/// Creates a `DataCorruption` Error, when the filepath is irrelevant.
|
||||
pub fn comment_only<TStr: ToString>(comment: TStr) -> DataCorruption {
|
||||
DataCorruption {
|
||||
filepath: None,
|
||||
comment,
|
||||
comment: comment.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -43,44 +50,47 @@ impl fmt::Debug for DataCorruption {
|
||||
}
|
||||
}
|
||||
|
||||
/// The library's failure based error enum
|
||||
#[derive(Debug, Fail)]
|
||||
/// The library's error enum
|
||||
#[derive(Debug, Error)]
|
||||
pub enum TantivyError {
|
||||
/// Path does not exist.
|
||||
#[fail(display = "Path does not exist: '{:?}'", _0)]
|
||||
PathDoesNotExist(PathBuf),
|
||||
/// File already exists, this is a problem when we try to write into a new file.
|
||||
#[fail(display = "File already exists: '{:?}'", _0)]
|
||||
FileAlreadyExists(PathBuf),
|
||||
/// Failed to open the directory.
|
||||
#[error("Failed to open the directory: '{0:?}'")]
|
||||
OpenDirectoryError(#[from] OpenDirectoryError),
|
||||
/// Failed to open a file for read.
|
||||
#[error("Failed to open file for read: '{0:?}'")]
|
||||
OpenReadError(#[from] OpenReadError),
|
||||
/// Failed to open a file for write.
|
||||
#[error("Failed to open file for write: '{0:?}'")]
|
||||
OpenWriteError(#[from] OpenWriteError),
|
||||
/// Index already exists in this directory
|
||||
#[fail(display = "Index already exists")]
|
||||
#[error("Index already exists")]
|
||||
IndexAlreadyExists,
|
||||
/// Failed to acquire file lock
|
||||
#[fail(display = "Failed to acquire Lockfile: {:?}. {:?}", _0, _1)]
|
||||
#[error("Failed to acquire Lockfile: {0:?}. {1:?}")]
|
||||
LockFailure(LockError, Option<String>),
|
||||
/// IO Error.
|
||||
#[fail(display = "An IO error occurred: '{}'", _0)]
|
||||
IOError(#[cause] IOError),
|
||||
#[error("An IO error occurred: '{0}'")]
|
||||
IOError(#[from] io::Error),
|
||||
/// Data corruption.
|
||||
#[fail(display = "{:?}", _0)]
|
||||
#[error("Data corrupted: '{0:?}'")]
|
||||
DataCorruption(DataCorruption),
|
||||
/// A thread holding the locked panicked and poisoned the lock.
|
||||
#[fail(display = "A thread holding the locked panicked and poisoned the lock")]
|
||||
#[error("A thread holding the locked panicked and poisoned the lock")]
|
||||
Poisoned,
|
||||
/// Invalid argument was passed by the user.
|
||||
#[fail(display = "An invalid argument was passed: '{}'", _0)]
|
||||
#[error("An invalid argument was passed: '{0}'")]
|
||||
InvalidArgument(String),
|
||||
/// An Error happened in one of the thread.
|
||||
#[fail(display = "An error occurred in a thread: '{}'", _0)]
|
||||
#[error("An error occurred in a thread: '{0}'")]
|
||||
ErrorInThread(String),
|
||||
/// An Error appeared related to the schema.
|
||||
#[fail(display = "Schema error: '{}'", _0)]
|
||||
#[error("Schema error: '{0}'")]
|
||||
SchemaError(String),
|
||||
/// System error. (e.g.: We failed spawning a new thread)
|
||||
#[fail(display = "System error.'{}'", _0)]
|
||||
#[error("System error.'{0}'")]
|
||||
SystemError(String),
|
||||
/// Index incompatible with current version of tantivy
|
||||
#[fail(display = "{:?}", _0)]
|
||||
#[error("{0:?}")]
|
||||
IncompatibleIndex(Incompatibility),
|
||||
}
|
||||
|
||||
@@ -89,31 +99,17 @@ impl From<DataCorruption> for TantivyError {
|
||||
TantivyError::DataCorruption(data_corruption)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FastFieldNotAvailableError> for TantivyError {
|
||||
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
|
||||
TantivyError::SchemaError(format!("{}", fastfield_error))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<LockError> for TantivyError {
|
||||
fn from(lock_error: LockError) -> TantivyError {
|
||||
TantivyError::LockFailure(lock_error, None)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IOError> for TantivyError {
|
||||
fn from(io_error: IOError) -> TantivyError {
|
||||
TantivyError::IOError(io_error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for TantivyError {
|
||||
fn from(io_error: io::Error) -> TantivyError {
|
||||
TantivyError::IOError(io_error.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<query::QueryParserError> for TantivyError {
|
||||
fn from(parsing_error: query::QueryParserError) -> TantivyError {
|
||||
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
|
||||
@@ -126,15 +122,9 @@ impl<Guard> From<PoisonError<Guard>> for TantivyError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OpenReadError> for TantivyError {
|
||||
fn from(error: OpenReadError) -> TantivyError {
|
||||
match error {
|
||||
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
|
||||
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||
OpenReadError::IncompatibleIndex(incompatibility) => {
|
||||
TantivyError::IncompatibleIndex(incompatibility)
|
||||
}
|
||||
}
|
||||
impl From<chrono::ParseError> for TantivyError {
|
||||
fn from(err: chrono::ParseError) -> TantivyError {
|
||||
TantivyError::InvalidArgument(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,35 +134,9 @@ impl From<schema::DocParsingError> for TantivyError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OpenWriteError> for TantivyError {
|
||||
fn from(error: OpenWriteError) -> TantivyError {
|
||||
match error {
|
||||
OpenWriteError::FileAlreadyExists(filepath) => {
|
||||
TantivyError::FileAlreadyExists(filepath)
|
||||
}
|
||||
OpenWriteError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OpenDirectoryError> for TantivyError {
|
||||
fn from(error: OpenDirectoryError) -> TantivyError {
|
||||
match error {
|
||||
OpenDirectoryError::DoesNotExist(directory_path) => {
|
||||
TantivyError::PathDoesNotExist(directory_path)
|
||||
}
|
||||
OpenDirectoryError::NotADirectory(directory_path) => {
|
||||
TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path))
|
||||
}
|
||||
OpenDirectoryError::IoError(err) => TantivyError::IOError(IOError::from(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for TantivyError {
|
||||
fn from(error: serde_json::Error) -> TantivyError {
|
||||
let io_err = io::Error::from(error);
|
||||
TantivyError::IOError(io_err.into())
|
||||
TantivyError::IOError(error.into())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,31 +6,114 @@ pub use self::writer::BytesFastFieldWriter;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::schema::Schema;
|
||||
use crate::Index;
|
||||
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value};
|
||||
use crate::{query::TermQuery, schema::FAST, schema::INDEXED, schema::STORED};
|
||||
use crate::{DocAddress, DocSet, Index, Searcher, Term};
|
||||
use std::ops::Deref;
|
||||
|
||||
#[test]
|
||||
fn test_bytes() {
|
||||
fn test_bytes() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_bytes_field("bytesfield");
|
||||
let bytes_field = schema_builder.add_bytes_field("bytesfield", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer.add_document(doc!(field=>vec![0u8, 1, 2, 3]));
|
||||
index_writer.add_document(doc!(field=>vec![]));
|
||||
index_writer.add_document(doc!(field=>vec![255u8]));
|
||||
index_writer.add_document(doc!(field=>vec![1u8, 3, 5, 7, 9]));
|
||||
index_writer.add_document(doc!(field=>vec![0u8; 1000]));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(bytes_field=>vec![0u8, 1, 2, 3]));
|
||||
index_writer.add_document(doc!(bytes_field=>vec![]));
|
||||
index_writer.add_document(doc!(bytes_field=>vec![255u8]));
|
||||
index_writer.add_document(doc!(bytes_field=>vec![1u8, 3, 5, 7, 9]));
|
||||
index_writer.add_document(doc!(bytes_field=>vec![0u8; 1000]));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let bytes_reader = segment_reader.fast_fields().bytes(field).unwrap();
|
||||
|
||||
let bytes_reader = segment_reader.fast_fields().bytes(bytes_field).unwrap();
|
||||
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
|
||||
assert!(bytes_reader.get_bytes(1).is_empty());
|
||||
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
|
||||
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
|
||||
let long = vec![0u8; 1000];
|
||||
assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_index_for_test<T: Into<BytesOptions>>(
|
||||
byte_options: T,
|
||||
) -> crate::Result<impl Deref<Target = Searcher>> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_bytes_field("string_bytes", byte_options.into());
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(
|
||||
field => b"tantivy".as_ref(),
|
||||
field => b"lucene".as_ref()
|
||||
));
|
||||
index_writer.commit()?;
|
||||
Ok(index.reader()?.searcher())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stored_bytes() -> crate::Result<()> {
|
||||
let searcher = create_index_for_test(STORED)?;
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
let values: Vec<&Value> = retrieved_doc.get_all(field).collect();
|
||||
assert_eq!(values.len(), 2);
|
||||
let values_bytes: Vec<&[u8]> = values
|
||||
.into_iter()
|
||||
.flat_map(|value| value.bytes_value())
|
||||
.collect();
|
||||
assert_eq!(values_bytes, &[&b"tantivy"[..], &b"lucene"[..]]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_stored_bytes() -> crate::Result<()> {
|
||||
let searcher = create_index_for_test(INDEXED)?;
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
assert!(retrieved_doc.get_first(field).is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_bytes() -> crate::Result<()> {
|
||||
let searcher = create_index_for_test(INDEXED)?;
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||
let term_weight = term_query.specialized_weight(&searcher, true)?;
|
||||
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0f32)?;
|
||||
assert_eq!(term_scorer.doc(), 0u32);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_index_bytes() -> crate::Result<()> {
|
||||
let searcher = create_index_for_test(STORED)?;
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||
let term_weight_res = term_query.specialized_weight(&searcher, false);
|
||||
assert!(matches!(
|
||||
term_weight_res,
|
||||
Err(crate::TantivyError::SchemaError(_))
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fast_bytes_multivalue_value() -> crate::Result<()> {
|
||||
let searcher = create_index_for_test(FAST)?;
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let fast_fields = searcher.segment_reader(0u32).fast_fields();
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
let fast_field_reader = fast_fields.bytes(field).unwrap();
|
||||
assert_eq!(fast_field_reader.get_bytes(0u32), b"tantivy");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use owning_ref::OwningRef;
|
||||
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::DocId;
|
||||
|
||||
@@ -17,16 +16,16 @@ use crate::DocId;
|
||||
#[derive(Clone)]
|
||||
pub struct BytesFastFieldReader {
|
||||
idx_reader: FastFieldReader<u64>,
|
||||
values: OwningRef<ReadOnlySource, [u8]>,
|
||||
values: OwnedBytes,
|
||||
}
|
||||
|
||||
impl BytesFastFieldReader {
|
||||
pub(crate) fn open(
|
||||
idx_reader: FastFieldReader<u64>,
|
||||
values_source: ReadOnlySource,
|
||||
) -> BytesFastFieldReader {
|
||||
let values = OwningRef::new(values_source).map(|source| &source[..]);
|
||||
BytesFastFieldReader { idx_reader, values }
|
||||
values_file: FileSlice,
|
||||
) -> crate::Result<BytesFastFieldReader> {
|
||||
let values = values_file.read_bytes()?;
|
||||
Ok(BytesFastFieldReader { idx_reader, values })
|
||||
}
|
||||
|
||||
fn range(&self, doc: DocId) -> (usize, usize) {
|
||||
@@ -38,7 +37,7 @@ impl BytesFastFieldReader {
|
||||
/// Returns the bytes associated to the given `doc`
|
||||
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
||||
let (start, stop) = self.range(doc);
|
||||
&self.values[start..stop]
|
||||
&self.values.as_slice()[start..stop]
|
||||
}
|
||||
|
||||
/// Returns the overall number of bytes in this bytes fast field.
|
||||
|
||||
@@ -49,16 +49,10 @@ impl BytesFastFieldWriter {
|
||||
/// matching field values present in the document.
|
||||
pub fn add_document(&mut self, doc: &Document) {
|
||||
self.next_doc();
|
||||
for field_value in doc.field_values() {
|
||||
if field_value.field() == self.field {
|
||||
if let Value::Bytes(ref bytes) = *field_value.value() {
|
||||
self.vals.extend_from_slice(bytes);
|
||||
} else {
|
||||
panic!(
|
||||
"Bytes field contained non-Bytes Value!. Field {:?} = {:?}",
|
||||
self.field, field_value
|
||||
);
|
||||
}
|
||||
for field_value in doc.get_all(self.field) {
|
||||
if let Value::Bytes(ref bytes) = field_value {
|
||||
self.vals.extend_from_slice(bytes);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -76,21 +70,18 @@ impl BytesFastFieldWriter {
|
||||
|
||||
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
|
||||
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
|
||||
{
|
||||
// writing the offset index
|
||||
let mut doc_index_serializer =
|
||||
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
||||
for &offset in &self.doc_index {
|
||||
doc_index_serializer.add_val(offset)?;
|
||||
}
|
||||
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
||||
doc_index_serializer.close_field()?;
|
||||
}
|
||||
{
|
||||
// writing the values themselves
|
||||
let mut value_serializer = serializer.new_bytes_fast_field_with_idx(self.field, 1)?;
|
||||
value_serializer.write_all(&self.vals)?;
|
||||
// writing the offset index
|
||||
let mut doc_index_serializer =
|
||||
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
||||
for &offset in &self.doc_index {
|
||||
doc_index_serializer.add_val(offset)?;
|
||||
}
|
||||
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
||||
doc_index_serializer.close_field()?;
|
||||
// writing the values themselves
|
||||
serializer
|
||||
.new_bytes_fast_field_with_idx(self.field, 1)?
|
||||
.write_all(&self.vals)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use crate::common::{BitSet, HasLen};
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::directory::WritePtr;
|
||||
use crate::space_usage::ByteCount;
|
||||
use crate::DocId;
|
||||
@@ -39,7 +40,7 @@ pub fn write_delete_bitset(
|
||||
/// Set of deleted `DocId`s.
|
||||
#[derive(Clone)]
|
||||
pub struct DeleteBitSet {
|
||||
data: ReadOnlySource,
|
||||
data: OwnedBytes,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
@@ -53,26 +54,27 @@ impl DeleteBitSet {
|
||||
for &doc in docs {
|
||||
bitset.insert(doc);
|
||||
}
|
||||
let mut directory = RAMDirectory::create();
|
||||
let directory = RAMDirectory::create();
|
||||
let path = Path::new("dummydeletebitset");
|
||||
let mut wrt = directory.open_write(path).unwrap();
|
||||
write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap();
|
||||
wrt.terminate().unwrap();
|
||||
let source = directory.open_read(path).unwrap();
|
||||
Self::open(source)
|
||||
let file = directory.open_read(path).unwrap();
|
||||
Self::open(file).unwrap()
|
||||
}
|
||||
|
||||
/// Opens a delete bitset given its data source.
|
||||
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
|
||||
let num_deleted: usize = data
|
||||
/// Opens a delete bitset given its file.
|
||||
pub fn open(file: FileSlice) -> crate::Result<DeleteBitSet> {
|
||||
let bytes = file.read_bytes()?;
|
||||
let num_deleted: usize = bytes
|
||||
.as_slice()
|
||||
.iter()
|
||||
.map(|b| b.count_ones() as usize)
|
||||
.sum();
|
||||
DeleteBitSet {
|
||||
data,
|
||||
Ok(DeleteBitSet {
|
||||
data: bytes,
|
||||
len: num_deleted,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
||||
@@ -84,7 +86,7 @@ impl DeleteBitSet {
|
||||
#[inline(always)]
|
||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||
let byte_offset = doc / 8u32;
|
||||
let b: u8 = (*self.data)[byte_offset as usize];
|
||||
let b: u8 = self.data.as_slice()[byte_offset as usize];
|
||||
let shift = (doc & 7u32) as u8;
|
||||
b & (1u8 << shift) != 0
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ use std::result;
|
||||
/// `FastFieldNotAvailableError` is returned when the
|
||||
/// user requested for a fast field reader, and the field was not
|
||||
/// defined in the schema as a fast field.
|
||||
#[derive(Debug, Fail)]
|
||||
#[fail(display = "Fast field not available: '{:?}'", field_name)]
|
||||
#[derive(Debug, Error)]
|
||||
#[error("Fast field not available: '{field_name:?}'")]
|
||||
pub struct FastFieldNotAvailableError {
|
||||
field_name: String,
|
||||
}
|
||||
|
||||
@@ -73,7 +73,61 @@ impl FacetReader {
|
||||
}
|
||||
|
||||
/// Return the list of facet ordinals associated to a document.
|
||||
pub fn facet_ords(&mut self, doc: DocId, output: &mut Vec<u64>) {
|
||||
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) {
|
||||
self.term_ords.get_vals(doc, output);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::Index;
|
||||
use crate::{
|
||||
schema::{Facet, SchemaBuilder},
|
||||
Document,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_facet_not_populated_for_all_docs() -> crate::Result<()> {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let facet_field = schema_builder.add_facet_field("facet");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
|
||||
index_writer.add_document(Document::default());
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let facet_reader = searcher
|
||||
.segment_reader(0u32)
|
||||
.facet_reader(facet_field)
|
||||
.unwrap();
|
||||
let mut facet_ords = Vec::new();
|
||||
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||
assert_eq!(&facet_ords, &[2u64]);
|
||||
facet_reader.facet_ords(1u32, &mut facet_ords);
|
||||
assert!(facet_ords.is_empty());
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn test_facet_not_populated_for_any_docs() -> crate::Result<()> {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let facet_field = schema_builder.add_facet_field("facet");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(Document::default());
|
||||
index_writer.add_document(Document::default());
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let facet_reader = searcher
|
||||
.segment_reader(0u32)
|
||||
.facet_reader(facet_field)
|
||||
.unwrap();
|
||||
let mut facet_ords = Vec::new();
|
||||
facet_reader.facet_ords(0u32, &mut facet_ords);
|
||||
assert!(facet_ords.is_empty());
|
||||
facet_reader.facet_ords(1u32, &mut facet_ords);
|
||||
assert!(facet_ords.is_empty());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,11 +33,14 @@ pub use self::reader::FastFieldReader;
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub use self::serializer::FastFieldSerializer;
|
||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||
use crate::chrono::{NaiveDateTime, Utc};
|
||||
use crate::common;
|
||||
use crate::schema::Cardinality;
|
||||
use crate::schema::FieldType;
|
||||
use crate::schema::Value;
|
||||
use crate::{
|
||||
chrono::{NaiveDateTime, Utc},
|
||||
schema::Type,
|
||||
};
|
||||
|
||||
mod bytes;
|
||||
mod delete;
|
||||
@@ -76,6 +79,9 @@ pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
|
||||
fn make_zero() -> Self {
|
||||
Self::from_u64(0i64.to_u64())
|
||||
}
|
||||
|
||||
/// Returns the `schema::Type` for this FastValue.
|
||||
fn to_type() -> Type;
|
||||
}
|
||||
|
||||
impl FastValue for u64 {
|
||||
@@ -98,6 +104,10 @@ impl FastValue for u64 {
|
||||
fn as_u64(&self) -> u64 {
|
||||
*self
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::U64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for i64 {
|
||||
@@ -119,6 +129,10 @@ impl FastValue for i64 {
|
||||
fn as_u64(&self) -> u64 {
|
||||
*self as u64
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::I64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for f64 {
|
||||
@@ -140,6 +154,10 @@ impl FastValue for f64 {
|
||||
fn as_u64(&self) -> u64 {
|
||||
self.to_bits()
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::F64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for crate::DateTime {
|
||||
@@ -162,6 +180,10 @@ impl FastValue for crate::DateTime {
|
||||
fn as_u64(&self) -> u64 {
|
||||
self.timestamp().as_u64()
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::Date
|
||||
}
|
||||
}
|
||||
|
||||
fn value_to_u64(value: &Value) -> u64 {
|
||||
@@ -187,6 +209,7 @@ mod tests {
|
||||
use crate::schema::FAST;
|
||||
use crate::schema::{Document, IntOptions};
|
||||
use crate::{Index, SegmentId, SegmentReader};
|
||||
use common::HasLen;
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::rngs::StdRng;
|
||||
@@ -217,9 +240,9 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_small() {
|
||||
fn test_intfastfield_small() -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||
@@ -232,27 +255,24 @@ mod tests {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
{
|
||||
assert_eq!(source.len(), 36 as usize);
|
||||
}
|
||||
{
|
||||
let composite_file = CompositeFile::open(&source).unwrap();
|
||||
let field_source = composite_file.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(field_source);
|
||||
assert_eq!(fast_field_reader.get(0), 13u64);
|
||||
assert_eq!(fast_field_reader.get(1), 14u64);
|
||||
assert_eq!(fast_field_reader.get(2), 2u64);
|
||||
}
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
assert_eq!(file.len(), 36 as usize);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(file)?;
|
||||
assert_eq!(fast_field_reader.get(0), 13u64);
|
||||
assert_eq!(fast_field_reader.get(1), 14u64);
|
||||
assert_eq!(fast_field_reader.get(2), 2u64);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_large() {
|
||||
fn test_intfastfield_large() -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
||||
let mut serializer = FastFieldSerializer::from_write(write)?;
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>4u64));
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
|
||||
@@ -263,19 +283,15 @@ mod tests {
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>215u64));
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
fast_field_writers.serialize(&mut serializer, &HashMap::new())?;
|
||||
serializer.close()?;
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let file = directory.open_read(&path)?;
|
||||
assert_eq!(file.len(), 61 as usize);
|
||||
{
|
||||
assert_eq!(source.len(), 61 as usize);
|
||||
}
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||
assert_eq!(fast_field_reader.get(0), 4u64);
|
||||
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
|
||||
assert_eq!(fast_field_reader.get(2), 3_052u64);
|
||||
@@ -286,12 +302,13 @@ mod tests {
|
||||
assert_eq!(fast_field_reader.get(7), 1_501u64);
|
||||
assert_eq!(fast_field_reader.get(8), 215u64);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_null_amplitude() {
|
||||
fn test_intfastfield_null_amplitude() -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
@@ -305,24 +322,23 @@ mod tests {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
assert_eq!(file.len(), 34 as usize);
|
||||
{
|
||||
assert_eq!(source.len(), 34 as usize);
|
||||
}
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||
for doc in 0..10_000 {
|
||||
assert_eq!(fast_field_reader.get(doc), 100_000u64);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_large_numbers() {
|
||||
fn test_intfastfield_large_numbers() -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
@@ -338,14 +354,12 @@ mod tests {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
assert_eq!(file.len(), 80042 as usize);
|
||||
{
|
||||
assert_eq!(source.len(), 80042 as usize);
|
||||
}
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||
assert_eq!(fast_field_reader.get(0), 0u64);
|
||||
for doc in 1..10_001 {
|
||||
assert_eq!(
|
||||
@@ -354,12 +368,13 @@ mod tests {
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signed_intfastfield() {
|
||||
fn test_signed_intfastfield() -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
let i64_field = schema_builder.add_i64_field("field", FAST);
|
||||
@@ -378,14 +393,12 @@ mod tests {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
assert_eq!(file.len(), 17709 as usize);
|
||||
{
|
||||
assert_eq!(source.len(), 17709 as usize);
|
||||
}
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<i64>::open(data);
|
||||
let fast_field_reader = FastFieldReader::<i64>::open(data)?;
|
||||
|
||||
assert_eq!(fast_field_reader.min_value(), -100i64);
|
||||
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
||||
@@ -398,12 +411,13 @@ mod tests {
|
||||
assert_eq!(buffer[i], -100i64 + 53i64 + i as i64);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signed_intfastfield_default_val() {
|
||||
fn test_signed_intfastfield_default_val() -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
let mut schema_builder = Schema::builder();
|
||||
let i64_field = schema_builder.add_i64_field("field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
@@ -420,13 +434,14 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<i64>::open(data);
|
||||
let fast_field_reader = FastFieldReader::<i64>::open(data)?;
|
||||
assert_eq!(fast_field_reader.get(0u32), 0i64);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
@@ -437,28 +452,26 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_permutation() {
|
||||
fn test_intfastfield_permutation() -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let mut directory = RAMDirectory::create();
|
||||
let directory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
||||
let mut serializer = FastFieldSerializer::from_write(write)?;
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
for &x in &permutation {
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
||||
}
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new())
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
fast_field_writers.serialize(&mut serializer, &HashMap::new())?;
|
||||
serializer.close()?;
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let file = directory.open_read(&path)?;
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data)?;
|
||||
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
@@ -466,6 +479,7 @@ mod tests {
|
||||
a = fast_field_reader.get(a as u32);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -598,7 +612,7 @@ mod bench {
|
||||
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
|
||||
let path = Path::new("test");
|
||||
let permutation = generate_permutation();
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||
@@ -611,9 +625,9 @@ mod bench {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
|
||||
@@ -632,7 +646,7 @@ mod bench {
|
||||
fn bench_intfastfield_fflookup(b: &mut Bencher) {
|
||||
let path = Path::new("test");
|
||||
let permutation = generate_permutation();
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
|
||||
@@ -645,9 +659,9 @@ mod bench {
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&source).unwrap();
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = FastFieldReader::<u64>::open(data);
|
||||
|
||||
|
||||
@@ -100,6 +100,7 @@ mod tests {
|
||||
.get_first(date_field)
|
||||
.expect("cannot find value")
|
||||
.date_value()
|
||||
.unwrap()
|
||||
.timestamp(),
|
||||
first_time_stamp.timestamp()
|
||||
);
|
||||
@@ -108,7 +109,7 @@ mod tests {
|
||||
.get_first(time_i)
|
||||
.expect("cannot find value")
|
||||
.i64_value(),
|
||||
1i64
|
||||
Some(1i64)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -131,6 +132,7 @@ mod tests {
|
||||
.get_first(date_field)
|
||||
.expect("cannot find value")
|
||||
.date_value()
|
||||
.unwrap()
|
||||
.timestamp(),
|
||||
two_secs_ahead.timestamp()
|
||||
);
|
||||
@@ -139,7 +141,7 @@ mod tests {
|
||||
.get_first(time_i)
|
||||
.expect("cannot find value")
|
||||
.i64_value(),
|
||||
3i64
|
||||
Some(3i64)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -197,22 +199,14 @@ mod tests {
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut vals = Vec::new();
|
||||
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
|
||||
{
|
||||
multi_value_reader.get_vals(2, &mut vals);
|
||||
assert_eq!(&vals, &[-4i64]);
|
||||
}
|
||||
{
|
||||
multi_value_reader.get_vals(0, &mut vals);
|
||||
assert_eq!(&vals, &[1i64, 3i64]);
|
||||
}
|
||||
{
|
||||
multi_value_reader.get_vals(1, &mut vals);
|
||||
assert!(vals.is_empty());
|
||||
}
|
||||
{
|
||||
multi_value_reader.get_vals(3, &mut vals);
|
||||
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
|
||||
}
|
||||
multi_value_reader.get_vals(2, &mut vals);
|
||||
assert_eq!(&vals, &[-4i64]);
|
||||
multi_value_reader.get_vals(0, &mut vals);
|
||||
assert_eq!(&vals, &[1i64, 3i64]);
|
||||
multi_value_reader.get_vals(1, &mut vals);
|
||||
assert!(vals.is_empty());
|
||||
multi_value_reader.get_vals(3, &mut vals);
|
||||
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
|
||||
}
|
||||
#[test]
|
||||
#[ignore]
|
||||
|
||||
@@ -143,7 +143,7 @@ impl MultiValueIntFastFieldWriter {
|
||||
.iter()
|
||||
.map(|val| *mapping.get(val).expect("Missing term ordinal"));
|
||||
doc_vals.extend(remapped_vals);
|
||||
doc_vals.sort();
|
||||
doc_vals.sort_unstable();
|
||||
for &val in &doc_vals {
|
||||
value_serializer.add_val(val)?;
|
||||
}
|
||||
|
||||
@@ -3,13 +3,12 @@ use crate::common::bitpacker::BitUnpacker;
|
||||
use crate::common::compute_num_bits;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::FAST;
|
||||
use crate::DocId;
|
||||
use owning_ref::OwningRef;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::path::Path;
|
||||
@@ -20,34 +19,27 @@ use std::path::Path;
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct FastFieldReader<Item: FastValue> {
|
||||
bit_unpacker: BitUnpacker<OwningRef<ReadOnlySource, [u8]>>,
|
||||
bit_unpacker: BitUnpacker,
|
||||
min_value_u64: u64,
|
||||
max_value_u64: u64,
|
||||
_phantom: PhantomData<Item>,
|
||||
}
|
||||
|
||||
impl<Item: FastValue> FastFieldReader<Item> {
|
||||
/// Opens a fast field given a source.
|
||||
pub fn open(data: ReadOnlySource) -> Self {
|
||||
let min_value: u64;
|
||||
let amplitude: u64;
|
||||
{
|
||||
let mut cursor = data.as_slice();
|
||||
min_value =
|
||||
u64::deserialize(&mut cursor).expect("Failed to read the min_value of fast field.");
|
||||
amplitude =
|
||||
u64::deserialize(&mut cursor).expect("Failed to read the amplitude of fast field.");
|
||||
}
|
||||
/// Opens a fast field given a file.
|
||||
pub fn open(file: FileSlice) -> crate::Result<Self> {
|
||||
let mut bytes = file.read_bytes()?;
|
||||
let min_value = u64::deserialize(&mut bytes)?;
|
||||
let amplitude = u64::deserialize(&mut bytes)?;
|
||||
let max_value = min_value + amplitude;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let owning_ref = OwningRef::new(data).map(|data| &data[16..]);
|
||||
let bit_unpacker = BitUnpacker::new(owning_ref, num_bits);
|
||||
FastFieldReader {
|
||||
let bit_unpacker = BitUnpacker::new(bytes, num_bits);
|
||||
Ok(FastFieldReader {
|
||||
min_value_u64: min_value,
|
||||
max_value_u64: max_value,
|
||||
bit_unpacker,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
|
||||
@@ -135,7 +127,7 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
|
||||
let field = schema_builder.add_u64_field("field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let path = Path::new("__dummy__");
|
||||
let mut directory: RAMDirectory = RAMDirectory::create();
|
||||
let directory: RAMDirectory = RAMDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory
|
||||
.open_write(path)
|
||||
@@ -157,12 +149,11 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
|
||||
let source = directory.open_read(path).expect("Failed to open the file");
|
||||
let composite_file =
|
||||
CompositeFile::open(&source).expect("Failed to read the composite file");
|
||||
let field_source = composite_file
|
||||
let file = directory.open_read(path).expect("Failed to open the file");
|
||||
let composite_file = CompositeFile::open(&file).expect("Failed to read the composite file");
|
||||
let field_file = composite_file
|
||||
.open_read(field)
|
||||
.expect("File component not found");
|
||||
FastFieldReader::open(field_source)
|
||||
FastFieldReader::open(field_file).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,45 +68,52 @@ impl FastFieldReaders {
|
||||
};
|
||||
for (field, field_entry) in schema.fields() {
|
||||
let field_type = field_entry.field_type();
|
||||
if field_type == &FieldType::Bytes {
|
||||
let idx_reader = fast_fields_composite
|
||||
if let FieldType::Bytes(bytes_option) = field_type {
|
||||
if !bytes_option.is_fast() {
|
||||
continue;
|
||||
}
|
||||
let fast_field_idx_file = fast_fields_composite
|
||||
.open_read_with_idx(field, 0)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||
.map(FastFieldReader::open)?;
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
||||
let idx_reader = FastFieldReader::open(fast_field_idx_file)?;
|
||||
let data = fast_fields_composite
|
||||
.open_read_with_idx(field, 1)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
||||
let bytes_fast_field_reader = BytesFastFieldReader::open(idx_reader, data)?;
|
||||
fast_field_readers
|
||||
.fast_bytes
|
||||
.insert(field, BytesFastFieldReader::open(idx_reader, data));
|
||||
.insert(field, bytes_fast_field_reader);
|
||||
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
|
||||
match cardinality {
|
||||
Cardinality::SingleValue => {
|
||||
if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
|
||||
match fast_type {
|
||||
FastType::U64 => {
|
||||
let fast_field_reader = FastFieldReader::open(fast_field_data);
|
||||
let fast_field_reader = FastFieldReader::open(fast_field_data)?;
|
||||
fast_field_readers
|
||||
.fast_field_u64
|
||||
.insert(field, fast_field_reader);
|
||||
}
|
||||
FastType::I64 => {
|
||||
fast_field_readers.fast_field_i64.insert(
|
||||
field,
|
||||
FastFieldReader::open(fast_field_data.clone()),
|
||||
);
|
||||
let fast_field_reader =
|
||||
FastFieldReader::open(fast_field_data.clone())?;
|
||||
fast_field_readers
|
||||
.fast_field_i64
|
||||
.insert(field, fast_field_reader);
|
||||
}
|
||||
FastType::F64 => {
|
||||
fast_field_readers.fast_field_f64.insert(
|
||||
field,
|
||||
FastFieldReader::open(fast_field_data.clone()),
|
||||
);
|
||||
let fast_field_reader =
|
||||
FastFieldReader::open(fast_field_data.clone())?;
|
||||
fast_field_readers
|
||||
.fast_field_f64
|
||||
.insert(field, fast_field_reader);
|
||||
}
|
||||
FastType::Date => {
|
||||
fast_field_readers.fast_field_date.insert(
|
||||
field,
|
||||
FastFieldReader::open(fast_field_data.clone()),
|
||||
);
|
||||
let fast_field_reader =
|
||||
FastFieldReader::open(fast_field_data.clone())?;
|
||||
fast_field_readers
|
||||
.fast_field_date
|
||||
.insert(field, fast_field_reader);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -117,10 +124,10 @@ impl FastFieldReaders {
|
||||
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
|
||||
let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
|
||||
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
|
||||
let idx_reader = FastFieldReader::open(fast_field_idx);
|
||||
let idx_reader = FastFieldReader::open(fast_field_idx)?;
|
||||
match fast_type {
|
||||
FastType::I64 => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
@@ -128,7 +135,7 @@ impl FastFieldReaders {
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
FastType::U64 => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
@@ -136,7 +143,7 @@ impl FastFieldReaders {
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
FastType::F64 => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
@@ -144,7 +151,7 @@ impl FastFieldReaders {
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
FastType::Date => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let vals_reader = FastFieldReader::open(fast_field_data)?;
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
|
||||
@@ -33,7 +33,7 @@ impl FastFieldsWriter {
|
||||
let mut bytes_value_writers = Vec::new();
|
||||
|
||||
for (field, field_entry) in schema.fields() {
|
||||
match *field_entry.field_type() {
|
||||
match field_entry.field_type() {
|
||||
FieldType::I64(ref int_options)
|
||||
| FieldType::U64(ref int_options)
|
||||
| FieldType::F64(ref int_options)
|
||||
@@ -56,9 +56,11 @@ impl FastFieldsWriter {
|
||||
let fast_field_writer = MultiValueIntFastFieldWriter::new(field, true);
|
||||
multi_values_writers.push(fast_field_writer);
|
||||
}
|
||||
FieldType::Bytes => {
|
||||
let fast_field_writer = BytesFastFieldWriter::new(field);
|
||||
bytes_value_writers.push(fast_field_writer);
|
||||
FieldType::Bytes(bytes_option) => {
|
||||
if bytes_option.is_fast() {
|
||||
let fast_field_writer = BytesFastFieldWriter::new(field);
|
||||
bytes_value_writers.push(fast_field_writer);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@@ -126,6 +128,7 @@ impl FastFieldsWriter {
|
||||
for field_writer in &self.single_value_writers {
|
||||
field_writer.serialize(serializer)?;
|
||||
}
|
||||
|
||||
for field_writer in &self.multi_values_writers {
|
||||
let field = field_writer.field();
|
||||
field_writer.serialize(serializer, mapping.get(&field))?;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use super::{fieldnorm_to_id, id_to_fieldnorm};
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::schema::Field;
|
||||
use crate::space_usage::PerFieldSpaceUsage;
|
||||
use crate::DocId;
|
||||
@@ -19,16 +20,21 @@ pub struct FieldNormReaders {
|
||||
|
||||
impl FieldNormReaders {
|
||||
/// Creates a field norm reader.
|
||||
pub fn open(source: ReadOnlySource) -> crate::Result<FieldNormReaders> {
|
||||
let data = CompositeFile::open(&source)?;
|
||||
pub fn open(file: FileSlice) -> crate::Result<FieldNormReaders> {
|
||||
let data = CompositeFile::open(&file)?;
|
||||
Ok(FieldNormReaders {
|
||||
data: Arc::new(data),
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the FieldNormReader for a specific field.
|
||||
pub fn get_field(&self, field: Field) -> Option<FieldNormReader> {
|
||||
self.data.open_read(field).map(FieldNormReader::open)
|
||||
pub fn get_field(&self, field: Field) -> crate::Result<Option<FieldNormReader>> {
|
||||
if let Some(file) = self.data.open_read(field) {
|
||||
let fieldnorm_reader = FieldNormReader::open(file)?;
|
||||
Ok(Some(fieldnorm_reader))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a break down of the space usage per field.
|
||||
@@ -43,7 +49,7 @@ impl FieldNormReaders {
|
||||
///
|
||||
/// This metric is important to compute the score of a
|
||||
/// document : a document having a query word in one its short fields
|
||||
/// (e.g. title) is likely to be more relevant than in one of its longer field
|
||||
/// (e.g. title)is likely to be more relevant than in one of its longer field
|
||||
/// (e.g. body).
|
||||
///
|
||||
/// tantivy encodes `fieldnorm` on one byte with some precision loss,
|
||||
@@ -55,19 +61,31 @@ impl FieldNormReaders {
|
||||
/// precompute computationally expensive functions of the fieldnorm
|
||||
/// in a very short array.
|
||||
#[derive(Clone)]
|
||||
pub struct FieldNormReader {
|
||||
data: ReadOnlySource,
|
||||
pub enum FieldNormReader {
|
||||
ConstFieldNorm { fieldnorm_id: u8, num_docs: u32 },
|
||||
OneByte(OwnedBytes),
|
||||
}
|
||||
|
||||
impl FieldNormReader {
|
||||
/// Opens a field norm reader given its data source.
|
||||
pub fn open(data: ReadOnlySource) -> Self {
|
||||
FieldNormReader { data }
|
||||
pub fn const_fieldnorm_id(fieldnorm_id: u8, num_docs: u32) -> FieldNormReader {
|
||||
FieldNormReader::ConstFieldNorm {
|
||||
fieldnorm_id,
|
||||
num_docs,
|
||||
}
|
||||
}
|
||||
|
||||
/// Opens a field norm reader given its file.
|
||||
pub fn open(fieldnorm_file: FileSlice) -> crate::Result<Self> {
|
||||
let data = fieldnorm_file.read_bytes()?;
|
||||
Ok(FieldNormReader::OneByte(data))
|
||||
}
|
||||
|
||||
/// Returns the number of documents in this segment.
|
||||
pub fn num_docs(&self) -> u32 {
|
||||
self.data.len() as u32
|
||||
match self {
|
||||
Self::ConstFieldNorm { num_docs, .. } => *num_docs,
|
||||
FieldNormReader::OneByte(vals) => vals.len() as u32,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `fieldnorm` associated to a doc id.
|
||||
@@ -79,6 +97,7 @@ impl FieldNormReader {
|
||||
///
|
||||
/// The fieldnorm is effectively decoded from the
|
||||
/// `fieldnorm_id` by doing a simple table lookup.
|
||||
#[inline(always)]
|
||||
pub fn fieldnorm(&self, doc_id: DocId) -> u32 {
|
||||
let fieldnorm_id = self.fieldnorm_id(doc_id);
|
||||
id_to_fieldnorm(fieldnorm_id)
|
||||
@@ -87,8 +106,11 @@ impl FieldNormReader {
|
||||
/// Returns the `fieldnorm_id` associated to a document.
|
||||
#[inline(always)]
|
||||
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
|
||||
let fielnorms_data = self.data.as_slice();
|
||||
fielnorms_data[doc_id as usize]
|
||||
match self {
|
||||
FieldNormReader::ConstFieldNorm { fieldnorm_id, .. } => *fieldnorm_id,
|
||||
|
||||
FieldNormReader::OneByte(data) => data.as_slice()[doc_id as usize],
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a `fieldnorm_id` into a fieldnorm.
|
||||
@@ -111,10 +133,8 @@ impl FieldNormReader {
|
||||
.cloned()
|
||||
.map(FieldNormReader::fieldnorm_to_id)
|
||||
.collect::<Vec<u8>>();
|
||||
let field_norms_data = ReadOnlySource::from(field_norms_id);
|
||||
FieldNormReader {
|
||||
data: field_norms_data,
|
||||
}
|
||||
let field_norms_data = OwnedBytes::new(field_norms_id);
|
||||
FieldNormReader::OneByte(field_norms_data)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ use std::io;
|
||||
/// byte per document per field.
|
||||
pub struct FieldNormsWriter {
|
||||
fields: Vec<Field>,
|
||||
fieldnorms_buffer: Vec<Vec<u8>>,
|
||||
fieldnorms_buffer: Vec<Option<Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl FieldNormsWriter {
|
||||
@@ -23,7 +23,7 @@ impl FieldNormsWriter {
|
||||
schema
|
||||
.fields()
|
||||
.filter_map(|(field, field_entry)| {
|
||||
if field_entry.is_indexed() {
|
||||
if field_entry.has_fieldnorms() {
|
||||
Some(field)
|
||||
} else {
|
||||
None
|
||||
@@ -36,15 +36,14 @@ impl FieldNormsWriter {
|
||||
/// specified in the schema.
|
||||
pub fn for_schema(schema: &Schema) -> FieldNormsWriter {
|
||||
let fields = FieldNormsWriter::fields_with_fieldnorm(schema);
|
||||
let max_field = fields
|
||||
.iter()
|
||||
.map(Field::field_id)
|
||||
.max()
|
||||
.map(|max_field_id| max_field_id as usize + 1)
|
||||
.unwrap_or(0);
|
||||
let num_fields = schema.num_fields();
|
||||
let mut fieldnorms_buffer: Vec<Option<Vec<u8>>> = vec![None; num_fields];
|
||||
for field in &fields {
|
||||
fieldnorms_buffer[field.field_id() as usize] = Some(Vec::new());
|
||||
}
|
||||
FieldNormsWriter {
|
||||
fields,
|
||||
fieldnorms_buffer: (0..max_field).map(|_| Vec::new()).collect::<Vec<_>>(),
|
||||
fieldnorms_buffer,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,8 +52,10 @@ impl FieldNormsWriter {
|
||||
///
|
||||
/// Will extend with 0-bytes for documents that have not been seen.
|
||||
pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) {
|
||||
for field in self.fields.iter() {
|
||||
self.fieldnorms_buffer[field.field_id() as usize].resize(max_doc as usize, 0u8);
|
||||
for buffer_opt in self.fieldnorms_buffer.iter_mut() {
|
||||
if let Some(buffer) = buffer_opt {
|
||||
buffer.resize(max_doc as usize, 0u8);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,21 +68,22 @@ impl FieldNormsWriter {
|
||||
/// * field - the field being set
|
||||
/// * fieldnorm - the number of terms present in document `doc` in field `field`
|
||||
pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) {
|
||||
let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.field_id() as usize];
|
||||
assert!(
|
||||
fieldnorm_buffer.len() <= doc as usize,
|
||||
"Cannot register a given fieldnorm twice"
|
||||
);
|
||||
// we fill intermediary `DocId` as having a fieldnorm of 0.
|
||||
fieldnorm_buffer.resize(doc as usize + 1, 0u8);
|
||||
fieldnorm_buffer[doc as usize] = fieldnorm_to_id(fieldnorm);
|
||||
if let Some(fieldnorm_buffer) = self.fieldnorms_buffer[field.field_id() as usize].as_mut() {
|
||||
assert!(
|
||||
fieldnorm_buffer.len() <= doc as usize,
|
||||
"Cannot register a given fieldnorm twice" // we fill intermediary `DocId` as having a fieldnorm of 0.
|
||||
);
|
||||
fieldnorm_buffer.resize(doc as usize + 1, 0u8);
|
||||
fieldnorm_buffer[doc as usize] = fieldnorm_to_id(fieldnorm);
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize the seen fieldnorm values to the serializer for all fields.
|
||||
pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> {
|
||||
for &field in self.fields.iter() {
|
||||
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
|
||||
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
|
||||
if let Some(buffer) = self.fieldnorms_buffer[field.field_id() as usize].as_ref() {
|
||||
fieldnorms_serializer.serialize_field(field, &buffer[..])?;
|
||||
}
|
||||
}
|
||||
fieldnorms_serializer.close()?;
|
||||
Ok(())
|
||||
|
||||
@@ -108,9 +108,9 @@ fn compute_deleted_bitset(
|
||||
// Limit doc helps identify the first document
|
||||
// that may be affected by the delete operation.
|
||||
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
|
||||
let inverted_index = segment_reader.inverted_index(delete_op.term.field());
|
||||
let inverted_index = segment_reader.inverted_index(delete_op.term.field())?;
|
||||
if let Some(mut docset) =
|
||||
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)
|
||||
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)?
|
||||
{
|
||||
let mut deleted_doc = docset.doc();
|
||||
while deleted_doc != TERMINATED {
|
||||
@@ -979,7 +979,7 @@ mod tests {
|
||||
let num_docs_containing = |s: &str| {
|
||||
let searcher = reader.searcher();
|
||||
let term = Term::from_field_text(text_field, s);
|
||||
searcher.doc_freq(&term)
|
||||
searcher.doc_freq(&term).unwrap()
|
||||
};
|
||||
|
||||
{
|
||||
@@ -1015,7 +1015,7 @@ mod tests {
|
||||
.unwrap();
|
||||
let num_docs_containing = |s: &str| {
|
||||
let term_a = Term::from_field_text(text_field, s);
|
||||
reader.searcher().doc_freq(&term_a)
|
||||
reader.searcher().doc_freq(&term_a).unwrap()
|
||||
};
|
||||
{
|
||||
// writing the segment
|
||||
@@ -1110,6 +1110,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.doc_freq(&term_a)
|
||||
.unwrap()
|
||||
};
|
||||
assert_eq!(num_docs_containing("a"), 0);
|
||||
assert_eq!(num_docs_containing("b"), 100);
|
||||
@@ -1129,7 +1130,7 @@ mod tests {
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let term = Term::from_field_text(text_field, s);
|
||||
searcher.doc_freq(&term)
|
||||
searcher.doc_freq(&term).unwrap()
|
||||
};
|
||||
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
|
||||
|
||||
@@ -1180,7 +1181,15 @@ mod tests {
|
||||
|
||||
// working with an empty index == no documents
|
||||
let term_b = Term::from_field_text(text_field, "b");
|
||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_b), 0);
|
||||
assert_eq!(
|
||||
index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.doc_freq(&term_b)
|
||||
.unwrap(),
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1200,7 +1209,15 @@ mod tests {
|
||||
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
// expect the document with that term to be in the index
|
||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
|
||||
assert_eq!(
|
||||
index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.doc_freq(&term_a)
|
||||
.unwrap(),
|
||||
1
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1226,7 +1243,15 @@ mod tests {
|
||||
// Find original docs in the index
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
// expect the document with that term to be in the index
|
||||
assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
|
||||
assert_eq!(
|
||||
index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.doc_freq(&term_a)
|
||||
.unwrap(),
|
||||
1
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -38,7 +38,7 @@ fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::R
|
||||
count[fieldnorm_id as usize] += 1;
|
||||
}
|
||||
} else {
|
||||
total_tokens += reader.inverted_index(field).total_num_tokens();
|
||||
total_tokens += reader.inverted_index(field)?.total_num_tokens();
|
||||
}
|
||||
}
|
||||
Ok(total_tokens
|
||||
@@ -194,7 +194,7 @@ impl IndexMerger {
|
||||
) -> crate::Result<()> {
|
||||
for (field, field_entry) in self.schema.fields() {
|
||||
let field_type = field_entry.field_type();
|
||||
match *field_type {
|
||||
match field_type {
|
||||
FieldType::HierarchicalFacet => {
|
||||
let term_ordinal_mapping = term_ord_mappings
|
||||
.remove(&field)
|
||||
@@ -223,8 +223,10 @@ impl IndexMerger {
|
||||
// They can be implemented using what is done
|
||||
// for facets in the future.
|
||||
}
|
||||
FieldType::Bytes => {
|
||||
self.write_bytes_fast_field(field, fast_field_serializer)?;
|
||||
FieldType::Bytes(byte_options) => {
|
||||
if byte_options.is_fast() {
|
||||
self.write_bytes_fast_field(field, fast_field_serializer)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -443,9 +445,11 @@ impl IndexMerger {
|
||||
let mut bytes_readers: Vec<BytesFastFieldReader> = Vec::new();
|
||||
|
||||
for reader in &self.readers {
|
||||
let bytes_reader = reader.fast_fields().bytes(field).expect(
|
||||
"Failed to find bytes fast field reader. This is a bug in tantivy, please report.",
|
||||
);
|
||||
let bytes_reader = reader.fast_fields().bytes(field).ok_or_else(|| {
|
||||
crate::TantivyError::InvalidArgument(
|
||||
"Bytes fast field {:?} not found in segment.".to_string(),
|
||||
)
|
||||
})?;
|
||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||
for doc in 0u32..reader.max_doc() {
|
||||
if delete_bitset.is_alive(doc) {
|
||||
@@ -498,14 +502,15 @@ impl IndexMerger {
|
||||
) -> crate::Result<Option<TermOrdinalMapping>> {
|
||||
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
|
||||
let mut delta_computer = DeltaComputer::new();
|
||||
|
||||
let mut field_term_streams = Vec::new();
|
||||
let mut max_term_ords: Vec<TermOrdinal> = Vec::new();
|
||||
|
||||
let field_readers: Vec<Arc<InvertedIndexReader>> = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| reader.inverted_index(indexed_field))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut field_term_streams = Vec::new();
|
||||
let mut max_term_ords: Vec<TermOrdinal> = Vec::new();
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
for field_reader in &field_readers {
|
||||
let terms = field_reader.terms();
|
||||
@@ -578,8 +583,8 @@ impl IndexMerger {
|
||||
let term_info = heap_item.streamer.value();
|
||||
let segment_reader = &self.readers[heap_item.segment_ord];
|
||||
let inverted_index: &InvertedIndexReader = &*field_readers[segment_ord];
|
||||
let segment_postings =
|
||||
inverted_index.read_postings_from_terminfo(term_info, segment_postings_option);
|
||||
let segment_postings = inverted_index
|
||||
.read_postings_from_terminfo(term_info, segment_postings_option)?;
|
||||
let delete_bitset_opt = segment_reader.delete_bitset();
|
||||
let doc_freq = if let Some(delete_bitset) = delete_bitset_opt {
|
||||
segment_postings.doc_freq_given_deletes(delete_bitset)
|
||||
@@ -648,7 +653,7 @@ impl IndexMerger {
|
||||
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> {
|
||||
let mut term_ordinal_mappings = HashMap::new();
|
||||
for (field, field_entry) in self.schema.fields() {
|
||||
let fieldnorm_reader = fieldnorm_readers.get_field(field);
|
||||
let fieldnorm_reader = fieldnorm_readers.get_field(field)?;
|
||||
if field_entry.is_indexed() {
|
||||
if let Some(term_ordinal_mapping) = self.write_postings_for_field(
|
||||
field,
|
||||
@@ -665,7 +670,7 @@ impl IndexMerger {
|
||||
|
||||
fn write_storable_fields(&self, store_writer: &mut StoreWriter) -> crate::Result<()> {
|
||||
for reader in &self.readers {
|
||||
let store_reader = reader.get_store_reader();
|
||||
let store_reader = reader.get_store_reader()?;
|
||||
if reader.num_deleted_docs() > 0 {
|
||||
for doc_id in reader.doc_ids_alive() {
|
||||
let doc = store_reader.get(doc_id)?;
|
||||
@@ -720,12 +725,12 @@ mod tests {
|
||||
use crate::IndexWriter;
|
||||
use crate::Searcher;
|
||||
use crate::{schema, DocSet, SegmentId};
|
||||
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
|
||||
use byteorder::{BigEndian, ReadBytesExt};
|
||||
use futures::executor::block_on;
|
||||
use std::io::Cursor;
|
||||
use schema::FAST;
|
||||
|
||||
#[test]
|
||||
fn test_index_merger_no_deletes() {
|
||||
fn test_index_merger_no_deletes() -> crate::Result<()> {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let text_fieldtype = schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
@@ -738,98 +743,77 @@ mod tests {
|
||||
let date_field = schema_builder.add_date_field("date", INDEXED);
|
||||
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
|
||||
let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
|
||||
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let reader = index.reader().unwrap();
|
||||
let reader = index.reader()?;
|
||||
let curr_time = chrono::Utc::now();
|
||||
let add_score_bytes = |doc: &mut Document, score: u32| {
|
||||
let mut bytes = Vec::new();
|
||||
bytes
|
||||
.write_u32::<BigEndian>(score)
|
||||
.expect("failed to write u32 bytes to Vec...");
|
||||
doc.add_bytes(bytes_score_field, bytes);
|
||||
};
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
{
|
||||
// writing the segment
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "af b");
|
||||
doc.add_u64(score_field, 3);
|
||||
doc.add_date(date_field, &curr_time);
|
||||
add_score_bytes(&mut doc, 3);
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "a b c");
|
||||
doc.add_u64(score_field, 5);
|
||||
add_score_bytes(&mut doc, 5);
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "a b c d");
|
||||
doc.add_u64(score_field, 7);
|
||||
add_score_bytes(&mut doc, 7);
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().expect("committed");
|
||||
}
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
// writing the segment
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "af b",
|
||||
score_field => 3u64,
|
||||
date_field => curr_time,
|
||||
bytes_score_field => 3u32.to_be_bytes().as_ref()
|
||||
));
|
||||
|
||||
{
|
||||
// writing the segment
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "af b");
|
||||
doc.add_date(date_field, &curr_time);
|
||||
doc.add_u64(score_field, 11);
|
||||
add_score_bytes(&mut doc, 11);
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "a b c g");
|
||||
doc.add_u64(score_field, 13);
|
||||
add_score_bytes(&mut doc, 13);
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().expect("Commit failed");
|
||||
}
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "a b c",
|
||||
score_field => 5u64,
|
||||
bytes_score_field => 5u32.to_be_bytes().as_ref()
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "a b c d",
|
||||
score_field => 7u64,
|
||||
bytes_score_field => 7u32.to_be_bytes().as_ref()
|
||||
));
|
||||
index_writer.commit()?;
|
||||
// writing the segment
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "af b",
|
||||
date_field => curr_time,
|
||||
score_field => 11u64,
|
||||
bytes_score_field => 11u32.to_be_bytes().as_ref()
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "a b c g",
|
||||
score_field => 13u64,
|
||||
bytes_score_field => 13u32.to_be_bytes().as_ref()
|
||||
));
|
||||
index_writer.commit()?;
|
||||
}
|
||||
{
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
||||
index_writer.wait_merging_threads().unwrap();
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
block_on(index_writer.merge(&segment_ids))?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
{
|
||||
reader.reload().unwrap();
|
||||
reader.reload()?;
|
||||
let searcher = reader.searcher();
|
||||
let get_doc_ids = |terms: Vec<Term>| {
|
||||
let query = BooleanQuery::new_multiterms_query(terms);
|
||||
let top_docs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap();
|
||||
top_docs.docs().to_vec()
|
||||
searcher
|
||||
.search(&query, &TEST_COLLECTOR_WITH_SCORE)
|
||||
.map(|top_docs| top_docs.docs().to_vec())
|
||||
};
|
||||
{
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "a")])?,
|
||||
vec![DocAddress(0, 1), DocAddress(0, 2), DocAddress(0, 4)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "af")])?,
|
||||
vec![DocAddress(0, 0), DocAddress(0, 3)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "g")]),
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "g")])?,
|
||||
vec![DocAddress(0, 4)]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
|
||||
get_doc_ids(vec![Term::from_field_text(text_field, "b")])?,
|
||||
vec![
|
||||
DocAddress(0, 0),
|
||||
DocAddress(0, 1),
|
||||
@@ -839,60 +823,57 @@ mod tests {
|
||||
]
|
||||
);
|
||||
assert_eq!(
|
||||
get_doc_ids(vec![Term::from_field_date(date_field, &curr_time)]),
|
||||
get_doc_ids(vec![Term::from_field_date(date_field, &curr_time)])?,
|
||||
vec![DocAddress(0, 0), DocAddress(0, 3)]
|
||||
);
|
||||
}
|
||||
{
|
||||
let doc = searcher.doc(DocAddress(0, 0)).unwrap();
|
||||
let doc = searcher.doc(DocAddress(0, 0))?;
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b"));
|
||||
}
|
||||
{
|
||||
let doc = searcher.doc(DocAddress(0, 1)).unwrap();
|
||||
let doc = searcher.doc(DocAddress(0, 1))?;
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c"));
|
||||
}
|
||||
{
|
||||
let doc = searcher.doc(DocAddress(0, 2)).unwrap();
|
||||
let doc = searcher.doc(DocAddress(0, 2))?;
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c d"));
|
||||
}
|
||||
{
|
||||
let doc = searcher.doc(DocAddress(0, 3)).unwrap();
|
||||
let doc = searcher.doc(DocAddress(0, 3))?;
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b"));
|
||||
}
|
||||
{
|
||||
let doc = searcher.doc(DocAddress(0, 4)).unwrap();
|
||||
let doc = searcher.doc(DocAddress(0, 4))?;
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c g"));
|
||||
}
|
||||
{
|
||||
let get_fast_vals = |terms: Vec<Term>| {
|
||||
let query = BooleanQuery::new_multiterms_query(terms);
|
||||
searcher
|
||||
.search(&query, &FastFieldTestCollector::for_field(score_field))
|
||||
.unwrap()
|
||||
searcher.search(&query, &FastFieldTestCollector::for_field(score_field))
|
||||
};
|
||||
let get_fast_vals_bytes = |terms: Vec<Term>| {
|
||||
let query = BooleanQuery::new_multiterms_query(terms);
|
||||
searcher
|
||||
.search(
|
||||
&query,
|
||||
&BytesFastFieldTestCollector::for_field(bytes_score_field),
|
||||
)
|
||||
.expect("failed to search")
|
||||
searcher.search(
|
||||
&query,
|
||||
&BytesFastFieldTestCollector::for_field(bytes_score_field),
|
||||
)
|
||||
};
|
||||
assert_eq!(
|
||||
get_fast_vals(vec![Term::from_field_text(text_field, "a")]),
|
||||
get_fast_vals(vec![Term::from_field_text(text_field, "a")])?,
|
||||
vec![5, 7, 13]
|
||||
);
|
||||
assert_eq!(
|
||||
get_fast_vals_bytes(vec![Term::from_field_text(text_field, "a")]),
|
||||
get_fast_vals_bytes(vec![Term::from_field_text(text_field, "a")])?,
|
||||
vec![0, 0, 0, 5, 0, 0, 0, 7, 0, 0, 0, 13]
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_merger_with_deletes() {
|
||||
fn test_index_merger_with_deletes() -> crate::Result<()> {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let text_fieldtype = schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
@@ -902,27 +883,26 @@ mod tests {
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
|
||||
let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
|
||||
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let reader = index.reader().unwrap();
|
||||
let search_term = |searcher: &Searcher, term: Term| {
|
||||
let collector = FastFieldTestCollector::for_field(score_field);
|
||||
let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
|
||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||
let (scores, bytes) = searcher
|
||||
searcher
|
||||
.search(&term_query, &(collector, bytes_collector))
|
||||
.unwrap();
|
||||
let mut score_bytes = Cursor::new(bytes);
|
||||
for &score in &scores {
|
||||
assert_eq!(score as u32, score_bytes.read_u32::<BigEndian>().unwrap());
|
||||
}
|
||||
|
||||
scores
|
||||
.map(|(scores, bytes)| {
|
||||
let mut score_bytes = &bytes[..];
|
||||
for &score in &scores {
|
||||
assert_eq!(score as u32, score_bytes.read_u32::<BigEndian>().unwrap());
|
||||
}
|
||||
scores
|
||||
})
|
||||
};
|
||||
|
||||
let empty_vec = Vec::<u64>::new();
|
||||
|
||||
{
|
||||
// a first commit
|
||||
index_writer.add_document(doc!(
|
||||
@@ -941,26 +921,26 @@ mod tests {
|
||||
score_field => 3u64,
|
||||
bytes_score_field => vec![0u8, 0, 0, 3],
|
||||
));
|
||||
index_writer.commit().expect("committed");
|
||||
reader.reload().unwrap();
|
||||
index_writer.commit()?;
|
||||
reader.reload()?;
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.num_docs(), 2);
|
||||
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
|
||||
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "a")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "a"))?,
|
||||
vec![1]
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "b")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "b"))?,
|
||||
vec![1]
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "c")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "c"))?,
|
||||
vec![3]
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "d")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "d"))?,
|
||||
vec![1, 3]
|
||||
);
|
||||
}
|
||||
@@ -988,8 +968,8 @@ mod tests {
|
||||
score_field => 7_000u64,
|
||||
bytes_score_field => vec![0u8, 0, 27, 88],
|
||||
));
|
||||
index_writer.commit().expect("committed");
|
||||
reader.reload().unwrap();
|
||||
index_writer.commit()?;
|
||||
reader.reload()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
assert_eq!(searcher.segment_readers().len(), 2);
|
||||
@@ -999,31 +979,31 @@ mod tests {
|
||||
assert_eq!(searcher.segment_readers()[1].num_docs(), 1);
|
||||
assert_eq!(searcher.segment_readers()[1].max_doc(), 3);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "a")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "a"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "b")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "b"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "c")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "c"))?,
|
||||
vec![3]
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "d")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "d"))?,
|
||||
vec![3]
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "e")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "e"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "f")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "f"))?,
|
||||
vec![6_000]
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "g")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "g"))?,
|
||||
vec![6_000, 7_000]
|
||||
);
|
||||
|
||||
@@ -1045,42 +1025,40 @@ mod tests {
|
||||
}
|
||||
{
|
||||
// merging the segments
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
||||
reader.reload().unwrap();
|
||||
let segment_ids = index.searchable_segment_ids()?;
|
||||
block_on(index_writer.merge(&segment_ids))?;
|
||||
reader.reload()?;
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
assert_eq!(searcher.num_docs(), 3);
|
||||
assert_eq!(searcher.segment_readers()[0].num_docs(), 3);
|
||||
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "a")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "a"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "b")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "b"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "c")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "c"))?,
|
||||
vec![3]
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "d")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "d"))?,
|
||||
vec![3]
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "e")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "e"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "f")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "f"))?,
|
||||
vec![6_000]
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "g")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "g"))?,
|
||||
vec![6_000, 7_000]
|
||||
);
|
||||
let score_field_reader = searcher
|
||||
@@ -1094,40 +1072,40 @@ mod tests {
|
||||
{
|
||||
// test a commit with only deletes
|
||||
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
||||
index_writer.commit().unwrap();
|
||||
index_writer.commit()?;
|
||||
|
||||
reader.reload().unwrap();
|
||||
reader.reload()?;
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
assert_eq!(searcher.num_docs(), 2);
|
||||
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
|
||||
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "a")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "a"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "b")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "b"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "c")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "c"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "d")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "d"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "e")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "e"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "f")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "f"))?,
|
||||
vec![6_000]
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "g")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "g"))?,
|
||||
vec![6_000, 7_000]
|
||||
);
|
||||
let score_field_reader = searcher
|
||||
@@ -1140,11 +1118,9 @@ mod tests {
|
||||
}
|
||||
{
|
||||
// Test merging a single segment in order to remove deletes.
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
||||
reader.reload().unwrap();
|
||||
let segment_ids = index.searchable_segment_ids()?;
|
||||
block_on(index_writer.merge(&segment_ids))?;
|
||||
reader.reload()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
@@ -1152,31 +1128,31 @@ mod tests {
|
||||
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
|
||||
assert_eq!(searcher.segment_readers()[0].max_doc(), 2);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "a")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "a"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "b")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "b"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "c")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "c"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "d")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "d"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "e")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "e"))?,
|
||||
empty_vec
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "f")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "f"))?,
|
||||
vec![6_000]
|
||||
);
|
||||
assert_eq!(
|
||||
search_term(&searcher, Term::from_field_text(text_field, "g")),
|
||||
search_term(&searcher, Term::from_field_text(text_field, "g"))?,
|
||||
vec![6_000, 7_000]
|
||||
);
|
||||
let score_field_reader = searcher
|
||||
@@ -1191,17 +1167,16 @@ mod tests {
|
||||
{
|
||||
// Test removing all docs
|
||||
index_writer.delete_term(Term::from_field_text(text_field, "g"));
|
||||
index_writer.commit().unwrap();
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
reader.reload().unwrap();
|
||||
index_writer.commit()?;
|
||||
let segment_ids = index.searchable_segment_ids()?;
|
||||
reader.reload()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
assert!(segment_ids.is_empty());
|
||||
assert!(searcher.segment_readers().is_empty());
|
||||
assert_eq!(searcher.num_docs(), 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1558,7 +1533,7 @@ mod tests {
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let mut term_scorer = term_query
|
||||
.specialized_weight(&searcher, true)
|
||||
.specialized_weight(&searcher, true)?
|
||||
.specialized_scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||
assert_eq!(term_scorer.doc(), 0);
|
||||
assert_nearly_equals!(term_scorer.block_max_score(), 0.0079681855);
|
||||
@@ -1573,7 +1548,7 @@ mod tests {
|
||||
assert_eq!(searcher.segment_readers().len(), 2);
|
||||
for segment_reader in searcher.segment_readers() {
|
||||
let mut term_scorer = term_query
|
||||
.specialized_weight(&searcher, true)
|
||||
.specialized_weight(&searcher, true)?
|
||||
.specialized_scorer(segment_reader, 1.0)?;
|
||||
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
|
||||
for doc in segment_reader.doc_ids_alive() {
|
||||
@@ -1597,7 +1572,7 @@ mod tests {
|
||||
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
let mut term_scorer = term_query
|
||||
.specialized_weight(&searcher, true)
|
||||
.specialized_weight(&searcher, true)?
|
||||
.specialized_scorer(segment_reader, 1.0)?;
|
||||
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
|
||||
for doc in segment_reader.doc_ids_alive() {
|
||||
|
||||
@@ -43,7 +43,7 @@ const NUM_MERGE_THREADS: usize = 4;
|
||||
/// and flushed.
|
||||
///
|
||||
/// This method is not part of tantivy's public API
|
||||
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::Result<()> {
|
||||
pub fn save_new_metas(schema: Schema, directory: &dyn Directory) -> crate::Result<()> {
|
||||
save_metas(
|
||||
&IndexMeta {
|
||||
segments: Vec::new(),
|
||||
@@ -64,7 +64,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::R
|
||||
/// and flushed.
|
||||
///
|
||||
/// This method is not part of tantivy's public API
|
||||
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> {
|
||||
fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()> {
|
||||
info!("save metas");
|
||||
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
||||
// Just adding a new line at the end of the buffer.
|
||||
@@ -450,9 +450,8 @@ impl SegmentUpdater {
|
||||
.into_iter()
|
||||
.map(|merge_candidate: MergeCandidate| {
|
||||
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
merge_candidates.extend(committed_merge_candidates.into_iter());
|
||||
});
|
||||
merge_candidates.extend(committed_merge_candidates);
|
||||
|
||||
for merge_operation in merge_candidates {
|
||||
if let Err(err) = self.start_merge(merge_operation) {
|
||||
|
||||
@@ -16,7 +16,6 @@ use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
|
||||
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
||||
use crate::Opstamp;
|
||||
use crate::{DocId, SegmentComponent};
|
||||
use std::io;
|
||||
|
||||
/// Computes the initial size of the hash table.
|
||||
///
|
||||
@@ -117,7 +116,11 @@ impl SegmentWriter {
|
||||
/// Indexes a new document
|
||||
///
|
||||
/// As a user, you should rather use `IndexWriter`'s add_document.
|
||||
pub fn add_document(&mut self, add_operation: AddOperation, schema: &Schema) -> io::Result<()> {
|
||||
pub fn add_document(
|
||||
&mut self,
|
||||
add_operation: AddOperation,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<()> {
|
||||
let doc_id = self.max_doc;
|
||||
let mut doc = add_operation.document;
|
||||
self.doc_opstamps.push(add_operation.opstamp);
|
||||
@@ -125,13 +128,20 @@ impl SegmentWriter {
|
||||
self.fast_field_writers.add_document(&doc);
|
||||
|
||||
for (field, field_values) in doc.get_sorted_field_values() {
|
||||
let field_options = schema.get_field_entry(field);
|
||||
if !field_options.is_indexed() {
|
||||
let field_entry = schema.get_field_entry(field);
|
||||
let make_schema_error = || {
|
||||
crate::TantivyError::SchemaError(format!(
|
||||
"Expected a {:?} for field {:?}",
|
||||
field_entry.field_type().value_type(),
|
||||
field_entry.name()
|
||||
))
|
||||
};
|
||||
if !field_entry.is_indexed() {
|
||||
continue;
|
||||
}
|
||||
let (term_buffer, multifield_postings) =
|
||||
(&mut self.term_buffer, &mut self.multifield_postings);
|
||||
match *field_options.field_type() {
|
||||
match *field_entry.field_type() {
|
||||
FieldType::HierarchicalFacet => {
|
||||
term_buffer.set_field(field);
|
||||
let facets =
|
||||
@@ -143,18 +153,20 @@ impl SegmentWriter {
|
||||
panic!("Expected hierarchical facet");
|
||||
}
|
||||
});
|
||||
for fake_str in facets {
|
||||
for facet_str in facets {
|
||||
let mut unordered_term_id_opt = None;
|
||||
FacetTokenizer.token_stream(fake_str).process(&mut |token| {
|
||||
term_buffer.set_text(&token.text);
|
||||
let unordered_term_id =
|
||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
unordered_term_id_opt = Some(unordered_term_id);
|
||||
});
|
||||
FacetTokenizer
|
||||
.token_stream(facet_str)
|
||||
.process(&mut |token| {
|
||||
term_buffer.set_text(&token.text);
|
||||
let unordered_term_id =
|
||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
unordered_term_id_opt = Some(unordered_term_id);
|
||||
});
|
||||
if let Some(unordered_term_id) = unordered_term_id_opt {
|
||||
self.fast_field_writers
|
||||
.get_multivalue_writer(field)
|
||||
.expect("multified writer for facet missing")
|
||||
.expect("writer for facet missing")
|
||||
.add_val(unordered_term_id);
|
||||
}
|
||||
}
|
||||
@@ -205,7 +217,11 @@ impl SegmentWriter {
|
||||
if int_option.is_indexed() {
|
||||
for field_value in field_values {
|
||||
term_buffer.set_field(field_value.field());
|
||||
term_buffer.set_u64(field_value.value().u64_value());
|
||||
let u64_val = field_value
|
||||
.value()
|
||||
.u64_value()
|
||||
.ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_u64(u64_val);
|
||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
}
|
||||
}
|
||||
@@ -214,7 +230,11 @@ impl SegmentWriter {
|
||||
if int_option.is_indexed() {
|
||||
for field_value in field_values {
|
||||
term_buffer.set_field(field_value.field());
|
||||
term_buffer.set_i64(field_value.value().date_value().timestamp());
|
||||
let date_val = field_value
|
||||
.value()
|
||||
.date_value()
|
||||
.ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_i64(date_val.timestamp());
|
||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
}
|
||||
}
|
||||
@@ -223,7 +243,11 @@ impl SegmentWriter {
|
||||
if int_option.is_indexed() {
|
||||
for field_value in field_values {
|
||||
term_buffer.set_field(field_value.field());
|
||||
term_buffer.set_i64(field_value.value().i64_value());
|
||||
let i64_val = field_value
|
||||
.value()
|
||||
.i64_value()
|
||||
.ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_i64(i64_val);
|
||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
}
|
||||
}
|
||||
@@ -232,13 +256,27 @@ impl SegmentWriter {
|
||||
if int_option.is_indexed() {
|
||||
for field_value in field_values {
|
||||
term_buffer.set_field(field_value.field());
|
||||
term_buffer.set_f64(field_value.value().f64_value());
|
||||
let f64_val = field_value
|
||||
.value()
|
||||
.f64_value()
|
||||
.ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_f64(f64_val);
|
||||
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
FieldType::Bytes => {
|
||||
// Do nothing. Bytes only supports fast fields.
|
||||
FieldType::Bytes(ref option) => {
|
||||
if option.is_indexed() {
|
||||
for field_value in field_values {
|
||||
term_buffer.set_field(field_value.field());
|
||||
let bytes = field_value
|
||||
.value()
|
||||
.bytes_value()
|
||||
.ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_bytes(bytes);
|
||||
self.multifield_postings.subscribe(doc_id, &term_buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
99
src/lib.rs
99
src/lib.rs
@@ -105,7 +105,7 @@ extern crate serde_json;
|
||||
extern crate log;
|
||||
|
||||
#[macro_use]
|
||||
extern crate failure;
|
||||
extern crate thiserror;
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
extern crate test;
|
||||
@@ -134,7 +134,7 @@ mod core;
|
||||
mod indexer;
|
||||
|
||||
#[allow(unused_doc_comments)]
|
||||
mod error;
|
||||
pub mod error;
|
||||
pub mod tokenizer;
|
||||
|
||||
pub mod collector;
|
||||
@@ -157,6 +157,7 @@ pub use self::snippet::{Snippet, SnippetGenerator};
|
||||
|
||||
mod docset;
|
||||
pub use self::docset::{DocSet, TERMINATED};
|
||||
pub use crate::common::HasLen;
|
||||
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||
pub use crate::core::{Executor, SegmentComponent};
|
||||
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
||||
@@ -276,7 +277,7 @@ impl DocAddress {
|
||||
///
|
||||
/// The id used for the segment is actually an ordinal
|
||||
/// in the list of `Segment`s held by a `Searcher`.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct DocAddress(pub SegmentLocalId, pub DocId);
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -383,31 +384,23 @@ mod tests {
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
{
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.commit()?;
|
||||
}
|
||||
{
|
||||
index_writer.add_document(doc!(text_field=>"a"));
|
||||
index_writer.add_document(doc!(text_field=>"a a"));
|
||||
index_writer.commit()?;
|
||||
}
|
||||
{
|
||||
index_writer.add_document(doc!(text_field=>"c"));
|
||||
index_writer.commit()?;
|
||||
}
|
||||
{
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
assert_eq!(searcher.doc_freq(&term_a), 3);
|
||||
let term_b = Term::from_field_text(text_field, "b");
|
||||
assert_eq!(searcher.doc_freq(&term_b), 1);
|
||||
let term_c = Term::from_field_text(text_field, "c");
|
||||
assert_eq!(searcher.doc_freq(&term_c), 2);
|
||||
let term_d = Term::from_field_text(text_field, "d");
|
||||
assert_eq!(searcher.doc_freq(&term_d), 0);
|
||||
}
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.commit()?;
|
||||
index_writer.add_document(doc!(text_field=>"a"));
|
||||
index_writer.add_document(doc!(text_field=>"a a"));
|
||||
index_writer.commit()?;
|
||||
index_writer.add_document(doc!(text_field=>"c"));
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
assert_eq!(searcher.doc_freq(&term_a)?, 3);
|
||||
let term_b = Term::from_field_text(text_field, "b");
|
||||
assert_eq!(searcher.doc_freq(&term_b)?, 1);
|
||||
let term_c = Term::from_field_text(text_field, "c");
|
||||
assert_eq!(searcher.doc_freq(&term_c)?, 2);
|
||||
let term_d = Term::from_field_text(text_field, "d");
|
||||
assert_eq!(searcher.doc_freq(&term_d)?, 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -504,13 +497,13 @@ mod tests {
|
||||
reader.reload()?;
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let inverted_index = segment_reader.inverted_index(text_field);
|
||||
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
assert!(inverted_index
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.is_none());
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||
assert_eq!(postings.doc(), 5);
|
||||
@@ -518,7 +511,7 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||
assert_eq!(postings.doc(), 3);
|
||||
@@ -540,14 +533,14 @@ mod tests {
|
||||
reader.reload()?;
|
||||
let searcher = reader.searcher();
|
||||
let seg_reader = searcher.segment_reader(0);
|
||||
let inverted_index = seg_reader.inverted_index(term_abcd.field());
|
||||
let inverted_index = seg_reader.inverted_index(term_abcd.field())?;
|
||||
|
||||
assert!(inverted_index
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.is_none());
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, seg_reader));
|
||||
assert_eq!(postings.doc(), 5);
|
||||
@@ -555,7 +548,7 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, seg_reader));
|
||||
assert_eq!(postings.doc(), 3);
|
||||
@@ -577,19 +570,19 @@ mod tests {
|
||||
reader.reload()?;
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let inverted_index = segment_reader.inverted_index(term_abcd.field());
|
||||
let inverted_index = segment_reader.inverted_index(term_abcd.field())?;
|
||||
assert!(inverted_index
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.is_none());
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert!(!advance_undeleted(&mut postings, segment_reader));
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||
assert_eq!(postings.doc(), 3);
|
||||
@@ -599,7 +592,7 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert!(advance_undeleted(&mut postings, segment_reader));
|
||||
assert_eq!(postings.doc(), 4);
|
||||
@@ -624,8 +617,8 @@ mod tests {
|
||||
let term = Term::from_field_u64(field, 1u64);
|
||||
let mut postings = searcher
|
||||
.segment_reader(0)
|
||||
.inverted_index(term.field())
|
||||
.read_postings(&term, IndexRecordOption::Basic)
|
||||
.inverted_index(term.field())?
|
||||
.read_postings(&term, IndexRecordOption::Basic)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
@@ -648,8 +641,8 @@ mod tests {
|
||||
let term = Term::from_field_i64(value_field, negative_val);
|
||||
let mut postings = searcher
|
||||
.segment_reader(0)
|
||||
.inverted_index(term.field())
|
||||
.read_postings(&term, IndexRecordOption::Basic)
|
||||
.inverted_index(term.field())?
|
||||
.read_postings(&term, IndexRecordOption::Basic)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
@@ -672,8 +665,8 @@ mod tests {
|
||||
let term = Term::from_field_f64(value_field, val);
|
||||
let mut postings = searcher
|
||||
.segment_reader(0)
|
||||
.inverted_index(term.field())
|
||||
.read_postings(&term, IndexRecordOption::Basic)
|
||||
.inverted_index(term.field())?
|
||||
.read_postings(&term, IndexRecordOption::Basic)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.advance(), TERMINATED);
|
||||
@@ -693,7 +686,7 @@ mod tests {
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let inverted_index = segment_reader.inverted_index(absent_field); //< should not panic
|
||||
let inverted_index = segment_reader.inverted_index(absent_field)?;
|
||||
assert_eq!(inverted_index.terms().num_terms(), 0);
|
||||
Ok(())
|
||||
}
|
||||
@@ -743,14 +736,14 @@ mod tests {
|
||||
let index_reader = index.reader()?;
|
||||
let searcher = index_reader.searcher();
|
||||
let reader = searcher.segment_reader(0);
|
||||
let inverted_index = reader.inverted_index(text_field);
|
||||
let inverted_index = reader.inverted_index(text_field)?;
|
||||
let term_abcd = Term::from_field_text(text_field, "abcd");
|
||||
assert!(inverted_index
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.is_none());
|
||||
let term_af = Term::from_field_text(text_field, "af");
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.term_freq(), 3);
|
||||
@@ -842,11 +835,11 @@ mod tests {
|
||||
text_field => "some other value",
|
||||
other_text_field => "short");
|
||||
assert_eq!(document.len(), 3);
|
||||
let values = document.get_all(text_field);
|
||||
let values: Vec<&Value> = document.get_all(text_field).collect();
|
||||
assert_eq!(values.len(), 2);
|
||||
assert_eq!(values[0].text(), Some("tantivy"));
|
||||
assert_eq!(values[1].text(), Some("some other value"));
|
||||
let values = document.get_all(other_text_field);
|
||||
let values: Vec<&Value> = document.get_all(other_text_field).collect();
|
||||
assert_eq!(values.len(), 1);
|
||||
assert_eq!(values[0].text(), Some("short"));
|
||||
}
|
||||
|
||||
@@ -38,11 +38,11 @@ const LONG_SKIP_INTERVAL: u64 = (LONG_SKIP_IN_BLOCKS * COMPRESSION_BLOCK_SIZE) a
|
||||
pub mod tests {
|
||||
|
||||
use super::PositionSerializer;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::positions::reader::PositionReader;
|
||||
use crate::{common::HasLen, directory::FileSlice};
|
||||
use std::iter;
|
||||
|
||||
fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) {
|
||||
fn create_stream_buffer(vals: &[u32]) -> (FileSlice, FileSlice) {
|
||||
let mut skip_buffer = vec![];
|
||||
let mut stream_buffer = vec![];
|
||||
{
|
||||
@@ -53,10 +53,7 @@ pub mod tests {
|
||||
}
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
(
|
||||
ReadOnlySource::from(stream_buffer),
|
||||
ReadOnlySource::from(skip_buffer),
|
||||
)
|
||||
(FileSlice::from(stream_buffer), FileSlice::from(skip_buffer))
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -65,7 +62,7 @@ pub mod tests {
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
assert_eq!(skip.len(), 12);
|
||||
assert_eq!(stream.len(), 1168);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
||||
for &n in &[1, 10, 127, 128, 130, 312] {
|
||||
let mut v = vec![0u32; n];
|
||||
position_reader.read(0, &mut v[..]);
|
||||
@@ -81,7 +78,7 @@ pub mod tests {
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
assert_eq!(skip.len(), 12);
|
||||
assert_eq!(stream.len(), 1168);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
||||
for &offset in &[1u64, 10u64, 127u64, 128u64, 130u64, 312u64] {
|
||||
for &len in &[1, 10, 130, 500] {
|
||||
let mut v = vec![0u32; len];
|
||||
@@ -100,7 +97,7 @@ pub mod tests {
|
||||
assert_eq!(skip.len(), 12);
|
||||
assert_eq!(stream.len(), 1168);
|
||||
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap();
|
||||
let mut buf = [0u32; 7];
|
||||
let mut c = 0;
|
||||
|
||||
@@ -122,7 +119,7 @@ pub mod tests {
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
assert_eq!(skip.len(), 15_749);
|
||||
assert_eq!(stream.len(), 4_987_872);
|
||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0);
|
||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap();
|
||||
let mut buf = [0u32; 256];
|
||||
position_reader.read(128, &mut buf);
|
||||
for i in 0..256 {
|
||||
@@ -142,7 +139,8 @@ pub mod tests {
|
||||
assert_eq!(skip.len(), 15_749);
|
||||
assert_eq!(stream.len(), 4_987_872);
|
||||
let mut buf = [0u32; 1];
|
||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 200_000);
|
||||
let mut position_reader =
|
||||
PositionReader::new(stream.clone(), skip.clone(), 200_000).unwrap();
|
||||
position_reader.read(230, &mut buf);
|
||||
position_reader.read(9, &mut buf);
|
||||
}
|
||||
@@ -157,7 +155,7 @@ pub mod tests {
|
||||
}
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
let mut buf = Vec::new();
|
||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0);
|
||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap();
|
||||
let mut offset = 0;
|
||||
for i in 1..24 {
|
||||
buf.resize(i, 0);
|
||||
@@ -175,7 +173,7 @@ pub mod tests {
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
assert_eq!(skip.len(), 15_749);
|
||||
assert_eq!(stream.len(), 1_000_000);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024).unwrap();
|
||||
let mut buf = [0u32; 1];
|
||||
position_reader.read(0, &mut buf);
|
||||
assert_eq!(buf[0], CONST_VAL);
|
||||
@@ -194,7 +192,8 @@ pub mod tests {
|
||||
128 * 1024 + 7,
|
||||
128 * 10 * 1024 + 10,
|
||||
] {
|
||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), offset);
|
||||
let mut position_reader =
|
||||
PositionReader::new(stream.clone(), skip.clone(), offset).unwrap();
|
||||
let mut buf = [0u32; 1];
|
||||
position_reader.read(0, &mut buf);
|
||||
assert_eq!(buf[0], offset as u32);
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
use std::io;
|
||||
|
||||
use crate::common::{BinarySerializable, FixedSize};
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
||||
use crate::positions::LONG_SKIP_INTERVAL;
|
||||
use crate::positions::LONG_SKIP_IN_BLOCKS;
|
||||
use bitpacking::{BitPacker, BitPacker4x};
|
||||
|
||||
/// Positions works as a long sequence of compressed block.
|
||||
/// All terms are chained one after the other.
|
||||
///
|
||||
@@ -23,28 +28,28 @@ use crate::positions::LONG_SKIP_IN_BLOCKS;
|
||||
/// A given block obviously takes `(128 x num_bit_for_the_block / num_bits_in_a_byte)`,
|
||||
/// so skipping a block without decompressing it is just a matter of advancing that many
|
||||
/// bytes.
|
||||
use bitpacking::{BitPacker, BitPacker4x};
|
||||
use owned_read::OwnedRead;
|
||||
|
||||
struct Positions {
|
||||
bit_packer: BitPacker4x,
|
||||
skip_source: ReadOnlySource,
|
||||
position_source: ReadOnlySource,
|
||||
long_skip_source: ReadOnlySource,
|
||||
skip_file: FileSlice,
|
||||
position_file: FileSlice,
|
||||
long_skip_data: OwnedBytes,
|
||||
}
|
||||
|
||||
impl Positions {
|
||||
pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
|
||||
let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES);
|
||||
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
|
||||
let (skip_source, long_skip_source) =
|
||||
pub fn new(position_file: FileSlice, skip_file: FileSlice) -> io::Result<Positions> {
|
||||
let (body, footer) = skip_file.split_from_end(u32::SIZE_IN_BYTES);
|
||||
let footer_data = footer.read_bytes()?;
|
||||
let num_long_skips = u32::deserialize(&mut footer_data.as_slice())?;
|
||||
let (skip_file, long_skip_file) =
|
||||
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
|
||||
Positions {
|
||||
let long_skip_data = long_skip_file.read_bytes()?;
|
||||
Ok(Positions {
|
||||
bit_packer: BitPacker4x::new(),
|
||||
skip_source,
|
||||
long_skip_source,
|
||||
position_source,
|
||||
}
|
||||
skip_file,
|
||||
long_skip_data,
|
||||
position_file,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the offset of the block associated to the given `long_skip_id`.
|
||||
@@ -54,19 +59,23 @@ impl Positions {
|
||||
if long_skip_id == 0 {
|
||||
return 0;
|
||||
}
|
||||
let long_skip_slice = self.long_skip_source.as_slice();
|
||||
let long_skip_slice = self.long_skip_data.as_slice();
|
||||
let mut long_skip_blocks: &[u8] = &long_skip_slice[(long_skip_id - 1) * 8..][..8];
|
||||
u64::deserialize(&mut long_skip_blocks).expect("Index corrupted")
|
||||
}
|
||||
|
||||
fn reader(&self, offset: u64) -> PositionReader {
|
||||
fn reader(&self, offset: u64) -> io::Result<PositionReader> {
|
||||
let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize;
|
||||
let offset_num_bytes: u64 = self.long_skip(long_skip_id);
|
||||
let mut position_read = OwnedRead::new(self.position_source.clone());
|
||||
position_read.advance(offset_num_bytes as usize);
|
||||
let mut skip_read = OwnedRead::new(self.skip_source.clone());
|
||||
skip_read.advance(long_skip_id * LONG_SKIP_IN_BLOCKS);
|
||||
PositionReader {
|
||||
let position_read = self
|
||||
.position_file
|
||||
.slice_from(offset_num_bytes as usize)
|
||||
.read_bytes()?;
|
||||
let skip_read = self
|
||||
.skip_file
|
||||
.slice_from(long_skip_id * LONG_SKIP_IN_BLOCKS)
|
||||
.read_bytes()?;
|
||||
Ok(PositionReader {
|
||||
bit_packer: self.bit_packer,
|
||||
skip_read,
|
||||
position_read,
|
||||
@@ -74,14 +83,14 @@ impl Positions {
|
||||
block_offset: std::i64::MAX as u64,
|
||||
anchor_offset: (long_skip_id as u64) * LONG_SKIP_INTERVAL,
|
||||
abs_offset: offset,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PositionReader {
|
||||
skip_read: OwnedRead,
|
||||
position_read: OwnedRead,
|
||||
skip_read: OwnedBytes,
|
||||
position_read: OwnedBytes,
|
||||
bit_packer: BitPacker4x,
|
||||
buffer: Box<[u32; COMPRESSION_BLOCK_SIZE]>,
|
||||
|
||||
@@ -93,11 +102,12 @@ pub struct PositionReader {
|
||||
|
||||
impl PositionReader {
|
||||
pub fn new(
|
||||
position_source: ReadOnlySource,
|
||||
skip_source: ReadOnlySource,
|
||||
position_file: FileSlice,
|
||||
skip_file: FileSlice,
|
||||
offset: u64,
|
||||
) -> PositionReader {
|
||||
Positions::new(position_source, skip_source).reader(offset)
|
||||
) -> io::Result<PositionReader> {
|
||||
let positions = Positions::new(position_file, skip_file)?;
|
||||
positions.reader(offset)
|
||||
}
|
||||
|
||||
fn advance_num_blocks(&mut self, num_blocks: usize) {
|
||||
@@ -131,7 +141,7 @@ impl PositionReader {
|
||||
self.advance_num_blocks(num_blocks_to_skip);
|
||||
self.anchor_offset = offset - (offset % COMPRESSION_BLOCK_SIZE as u64);
|
||||
self.block_offset = self.anchor_offset;
|
||||
let num_bits = self.skip_read.get(0);
|
||||
let num_bits = self.skip_read.as_slice()[0];
|
||||
self.bit_packer
|
||||
.decompress(self.position_read.as_ref(), self.buffer.as_mut(), num_bits);
|
||||
} else {
|
||||
@@ -141,7 +151,7 @@ impl PositionReader {
|
||||
self.anchor_offset = self.block_offset;
|
||||
}
|
||||
|
||||
let mut num_bits = self.skip_read.get(0);
|
||||
let mut num_bits = self.skip_read.as_slice()[0];
|
||||
let mut position_data = self.position_read.as_ref();
|
||||
|
||||
for i in 1.. {
|
||||
@@ -155,7 +165,7 @@ impl PositionReader {
|
||||
output = &mut output[remaining_in_block..];
|
||||
offset += remaining_in_block as u64;
|
||||
position_data = &position_data[(num_bits as usize * COMPRESSION_BLOCK_SIZE / 8)..];
|
||||
num_bits = self.skip_read.get(i);
|
||||
num_bits = self.skip_read.as_slice()[i];
|
||||
self.bit_packer
|
||||
.decompress(position_data, self.buffer.as_mut(), num_bits);
|
||||
self.block_offset += COMPRESSION_BLOCK_SIZE as u64;
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use std::io;
|
||||
|
||||
use crate::common::{BinarySerializable, VInt};
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::postings::compression::{
|
||||
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
|
||||
@@ -34,7 +37,7 @@ pub struct BlockSegmentPostings {
|
||||
|
||||
doc_freq: u32,
|
||||
|
||||
data: ReadOnlySource,
|
||||
data: OwnedBytes,
|
||||
pub(crate) skip_reader: SkipReader,
|
||||
}
|
||||
|
||||
@@ -72,37 +75,34 @@ fn decode_vint_block(
|
||||
|
||||
fn split_into_skips_and_postings(
|
||||
doc_freq: u32,
|
||||
data: ReadOnlySource,
|
||||
) -> (Option<ReadOnlySource>, ReadOnlySource) {
|
||||
mut bytes: OwnedBytes,
|
||||
) -> (Option<OwnedBytes>, OwnedBytes) {
|
||||
if doc_freq < COMPRESSION_BLOCK_SIZE as u32 {
|
||||
return (None, data);
|
||||
return (None, bytes);
|
||||
}
|
||||
let mut data_byte_arr = data.as_slice();
|
||||
let skip_len = VInt::deserialize(&mut data_byte_arr)
|
||||
.expect("Data corrupted")
|
||||
.0 as usize;
|
||||
let vint_len = data.len() - data_byte_arr.len();
|
||||
let (skip_data, postings_data) = data.slice_from(vint_len).split(skip_len);
|
||||
let skip_len = VInt::deserialize(&mut bytes).expect("Data corrupted").0 as usize;
|
||||
let (skip_data, postings_data) = bytes.split(skip_len);
|
||||
(Some(skip_data), postings_data)
|
||||
}
|
||||
|
||||
impl BlockSegmentPostings {
|
||||
pub(crate) fn from_data(
|
||||
pub(crate) fn open(
|
||||
doc_freq: u32,
|
||||
data: ReadOnlySource,
|
||||
data: FileSlice,
|
||||
record_option: IndexRecordOption,
|
||||
requested_option: IndexRecordOption,
|
||||
) -> BlockSegmentPostings {
|
||||
) -> io::Result<BlockSegmentPostings> {
|
||||
let freq_reading_option = match (record_option, requested_option) {
|
||||
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
|
||||
(_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq,
|
||||
(_, _) => FreqReadingOption::ReadFreq,
|
||||
};
|
||||
|
||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, data);
|
||||
let (skip_data_opt, postings_data) =
|
||||
split_into_skips_and_postings(doc_freq, data.read_bytes()?);
|
||||
let skip_reader = match skip_data_opt {
|
||||
Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option),
|
||||
None => SkipReader::new(ReadOnlySource::empty(), doc_freq, record_option),
|
||||
None => SkipReader::new(OwnedBytes::empty(), doc_freq, record_option),
|
||||
};
|
||||
|
||||
let mut block_segment_postings = BlockSegmentPostings {
|
||||
@@ -116,7 +116,7 @@ impl BlockSegmentPostings {
|
||||
skip_reader,
|
||||
};
|
||||
block_segment_postings.load_block();
|
||||
block_segment_postings
|
||||
Ok(block_segment_postings)
|
||||
}
|
||||
|
||||
/// Returns the block_max_score for the current block.
|
||||
@@ -172,15 +172,15 @@ impl BlockSegmentPostings {
|
||||
// # Warning
|
||||
//
|
||||
// This does not reset the positions list.
|
||||
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: ReadOnlySource) {
|
||||
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedBytes) {
|
||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
|
||||
self.data = ReadOnlySource::new(postings_data);
|
||||
self.data = postings_data;
|
||||
self.block_max_score_cache = None;
|
||||
self.loaded_offset = std::usize::MAX;
|
||||
if let Some(skip_data) = skip_data_opt {
|
||||
self.skip_reader.reset(skip_data, doc_freq);
|
||||
} else {
|
||||
self.skip_reader.reset(ReadOnlySource::empty(), doc_freq);
|
||||
self.skip_reader.reset(OwnedBytes::empty(), doc_freq);
|
||||
}
|
||||
self.doc_freq = doc_freq;
|
||||
self.load_block();
|
||||
@@ -344,8 +344,8 @@ impl BlockSegmentPostings {
|
||||
freq_reading_option: FreqReadingOption::NoFreq,
|
||||
block_max_score_cache: None,
|
||||
doc_freq: 0,
|
||||
data: ReadOnlySource::new(vec![]),
|
||||
skip_reader: SkipReader::new(ReadOnlySource::new(vec![]), 0, IndexRecordOption::Basic),
|
||||
data: OwnedBytes::empty(),
|
||||
skip_reader: SkipReader::new(OwnedBytes::empty(), 0, IndexRecordOption::Basic),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -467,10 +467,12 @@ mod tests {
|
||||
index_writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let inverted_index = segment_reader.inverted_index(int_field);
|
||||
let inverted_index = segment_reader.inverted_index(int_field).unwrap();
|
||||
let term = Term::from_field_u64(int_field, 0u64);
|
||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||
inverted_index.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
|
||||
inverted_index
|
||||
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -491,37 +493,38 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_block_segment_postings() {
|
||||
fn test_reset_block_segment_postings() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
// create two postings list, one containg even number,
|
||||
// the other containing odd numbers.
|
||||
for i in 0..6 {
|
||||
let doc = doc!(int_field=> (i % 2) as u64);
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
let mut block_segments;
|
||||
{
|
||||
let term = Term::from_field_u64(int_field, 0u64);
|
||||
let inverted_index = segment_reader.inverted_index(int_field);
|
||||
let inverted_index = segment_reader.inverted_index(int_field)?;
|
||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||
block_segments = inverted_index
|
||||
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic);
|
||||
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)?;
|
||||
}
|
||||
assert_eq!(block_segments.docs(), &[0, 2, 4]);
|
||||
{
|
||||
let term = Term::from_field_u64(int_field, 1u64);
|
||||
let inverted_index = segment_reader.inverted_index(int_field);
|
||||
let inverted_index = segment_reader.inverted_index(int_field)?;
|
||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments);
|
||||
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments)?;
|
||||
}
|
||||
assert_eq!(block_segments.docs(), &[1, 3, 5]);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,8 +28,6 @@ pub use self::segment_postings::SegmentPostings;
|
||||
|
||||
pub(crate) use self::stacker::compute_table_size;
|
||||
|
||||
pub use crate::common::HasLen;
|
||||
|
||||
pub(crate) type UnorderedTermId = u64;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))]
|
||||
@@ -42,8 +40,8 @@ pub(crate) enum FreqReadingOption {
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use super::*;
|
||||
use super::InvertedIndexSerializer;
|
||||
use super::Postings;
|
||||
use crate::core::Index;
|
||||
use crate::core::SegmentComponent;
|
||||
use crate::core::SegmentReader;
|
||||
@@ -58,6 +56,7 @@ pub mod tests {
|
||||
use crate::schema::{IndexRecordOption, TextFieldIndexing};
|
||||
use crate::tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN};
|
||||
use crate::DocId;
|
||||
use crate::HasLen;
|
||||
use crate::Score;
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::rngs::StdRng;
|
||||
@@ -101,12 +100,12 @@ pub mod tests {
|
||||
index_writer.commit()?;
|
||||
|
||||
let searcher = index.reader()?.searcher();
|
||||
let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
|
||||
let inverted_index = searcher.segment_reader(0u32).inverted_index(title)?;
|
||||
let term = Term::from_field_text(title, "abc");
|
||||
let mut positions = Vec::new();
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.doc(), 0);
|
||||
postings.positions(&mut positions);
|
||||
@@ -120,7 +119,7 @@ pub mod tests {
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.advance(), 1);
|
||||
@@ -129,7 +128,7 @@ pub mod tests {
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.seek(1), 1);
|
||||
assert_eq!(postings.doc(), 1);
|
||||
@@ -138,7 +137,7 @@ pub mod tests {
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.seek(1002), 1002);
|
||||
assert_eq!(postings.doc(), 1002);
|
||||
@@ -147,7 +146,7 @@ pub mod tests {
|
||||
}
|
||||
{
|
||||
let mut postings = inverted_index
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.seek(100), 100);
|
||||
assert_eq!(postings.seek(1002), 1002);
|
||||
@@ -159,7 +158,7 @@ pub mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_drop_token_that_are_too_long() {
|
||||
pub fn test_drop_token_that_are_too_long() -> crate::Result<()> {
|
||||
let ok_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN).collect();
|
||||
let mut exceeding_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN + 1).collect();
|
||||
exceeding_token_text.push_str(" hello");
|
||||
@@ -184,7 +183,7 @@ pub mod tests {
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
let inverted_index = segment_reader.inverted_index(text_field);
|
||||
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
assert_eq!(inverted_index.terms().num_terms(), 1);
|
||||
let mut bytes = vec![];
|
||||
assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
|
||||
@@ -196,12 +195,13 @@ pub mod tests {
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(1u32);
|
||||
let inverted_index = segment_reader.inverted_index(text_field);
|
||||
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
assert_eq!(inverted_index.terms().num_terms(), 1);
|
||||
let mut bytes = vec![];
|
||||
assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
|
||||
assert_eq!(&bytes[..], ok_token_text.as_bytes());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -261,15 +261,15 @@ pub mod tests {
|
||||
{
|
||||
let term_a = Term::from_field_text(text_field, "abcdef");
|
||||
assert!(segment_reader
|
||||
.inverted_index(term_a.field())
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.inverted_index(term_a.field())?
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.is_none());
|
||||
}
|
||||
{
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
let mut postings_a = segment_reader
|
||||
.inverted_index(term_a.field())
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.inverted_index(term_a.field())?
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert_eq!(postings_a.len(), 1000);
|
||||
assert_eq!(postings_a.doc(), 0);
|
||||
@@ -291,8 +291,8 @@ pub mod tests {
|
||||
{
|
||||
let term_e = Term::from_field_text(text_field, "e");
|
||||
let mut postings_e = segment_reader
|
||||
.inverted_index(term_e.field())
|
||||
.read_postings(&term_e, IndexRecordOption::WithFreqsAndPositions)
|
||||
.inverted_index(term_e.field())?
|
||||
.read_postings(&term_e, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert_eq!(postings_e.len(), 1000 - 2);
|
||||
for i in 2u32..1000u32 {
|
||||
@@ -312,7 +312,7 @@ pub mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_position_and_fieldnorm2() {
|
||||
pub fn test_position_and_fieldnorm2() -> crate::Result<()> {
|
||||
let mut positions: Vec<u32> = Vec::new();
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
@@ -328,16 +328,17 @@ pub mod tests {
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut postings = segment_reader
|
||||
.inverted_index(text_field)
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||
.inverted_index(text_field)?
|
||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
|
||||
.unwrap();
|
||||
assert_eq!(postings.doc(), 1u32);
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(&positions[..], &[1u32, 4]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skip_next() {
|
||||
fn test_skip_next() -> crate::Result<()> {
|
||||
let term_0 = Term::from_field_u64(Field::from_field_id(0), 0);
|
||||
let term_1 = Term::from_field_u64(Field::from_field_id(0), 1);
|
||||
let term_2 = Term::from_field_u64(Field::from_field_id(0), 2);
|
||||
@@ -348,10 +349,9 @@ pub mod tests {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let value_field = schema_builder.add_u64_field("value", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
for i in 0u64..num_docs as u64 {
|
||||
let doc = doc!(value_field => 2u64, value_field => i % 2u64);
|
||||
index_writer.add_document(doc);
|
||||
@@ -360,15 +360,15 @@ pub mod tests {
|
||||
}
|
||||
index
|
||||
};
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
// check that the basic usage works
|
||||
for i in 0..num_docs - 1 {
|
||||
for j in i + 1..num_docs {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_2.field())
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
||||
.inverted_index(term_2.field())?
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||
.unwrap();
|
||||
assert_eq!(segment_postings.seek(i), i);
|
||||
assert_eq!(segment_postings.doc(), i);
|
||||
@@ -380,8 +380,8 @@ pub mod tests {
|
||||
|
||||
{
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_2.field())
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
||||
.inverted_index(term_2.field())?
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||
.unwrap();
|
||||
|
||||
// check that `skip_next` advances the iterator
|
||||
@@ -400,8 +400,8 @@ pub mod tests {
|
||||
// check that filtering works
|
||||
{
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_0.field())
|
||||
.read_postings(&term_0, IndexRecordOption::Basic)
|
||||
.inverted_index(term_0.field())?
|
||||
.read_postings(&term_0, IndexRecordOption::Basic)?
|
||||
.unwrap();
|
||||
|
||||
for i in 0..num_docs / 2 {
|
||||
@@ -410,8 +410,8 @@ pub mod tests {
|
||||
}
|
||||
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_0.field())
|
||||
.read_postings(&term_0, IndexRecordOption::Basic)
|
||||
.inverted_index(term_0.field())?
|
||||
.read_postings(&term_0, IndexRecordOption::Basic)?
|
||||
.unwrap();
|
||||
|
||||
for i in 0..num_docs / 2 - 1 {
|
||||
@@ -422,19 +422,19 @@ pub mod tests {
|
||||
|
||||
// delete some of the documents
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.delete_term(term_0);
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let searcher = index.reader()?.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
// make sure seeking still works
|
||||
for i in 0..num_docs {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_2.field())
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
||||
.inverted_index(term_2.field())?
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||
.unwrap();
|
||||
|
||||
if i % 2 == 0 {
|
||||
@@ -450,8 +450,8 @@ pub mod tests {
|
||||
// now try with a longer sequence
|
||||
{
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_2.field())
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
||||
.inverted_index(term_2.field())?
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)?
|
||||
.unwrap();
|
||||
|
||||
let mut last = 2; // start from 5 to avoid seeking to 3 twice
|
||||
@@ -476,20 +476,19 @@ pub mod tests {
|
||||
|
||||
// delete everything else
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.delete_term(term_1);
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let searcher = index.reader()?.searcher();
|
||||
|
||||
// finally, check that it's empty
|
||||
{
|
||||
let searchable_segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("could not get index segment ids");
|
||||
let searchable_segment_ids = index.searchable_segment_ids()?;
|
||||
assert!(searchable_segment_ids.is_empty());
|
||||
assert_eq!(searcher.num_docs(), 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub static TERM_A: Lazy<Term> = Lazy::new(|| {
|
||||
@@ -621,7 +620,7 @@ mod bench {
|
||||
b.iter(|| {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(TERM_A.field())
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)?
|
||||
.unwrap();
|
||||
while segment_postings.advance() != TERMINATED {}
|
||||
});
|
||||
@@ -636,18 +635,22 @@ mod bench {
|
||||
let segment_postings_a = segment_reader
|
||||
.inverted_index(TERM_A.field())
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let segment_postings_b = segment_reader
|
||||
.inverted_index(TERM_B.field())
|
||||
.read_postings(&*TERM_B, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let segment_postings_c = segment_reader
|
||||
.inverted_index(TERM_C.field())
|
||||
.read_postings(&*TERM_C, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let segment_postings_d = segment_reader
|
||||
.inverted_index(TERM_D.field())
|
||||
.read_postings(&*TERM_D, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let mut intersection = Intersection::new(vec![
|
||||
segment_postings_a,
|
||||
@@ -668,6 +671,7 @@ mod bench {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(TERM_A.field())
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let mut existing_docs = Vec::new();
|
||||
|
||||
@@ -38,12 +38,8 @@ fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<dyn PostingsWriter>
|
||||
| FieldType::I64(_)
|
||||
| FieldType::F64(_)
|
||||
| FieldType::Date(_)
|
||||
| FieldType::Bytes(_)
|
||||
| FieldType::HierarchicalFacet => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(),
|
||||
FieldType::Bytes => {
|
||||
// FieldType::Bytes cannot actually be indexed.
|
||||
// TODO fix during the indexer refactoring described in #276
|
||||
SpecializedPostingsWriter::<NothingRecorder>::new_boxed()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,11 +157,12 @@ impl MultiFieldPostingsWriter {
|
||||
unordered_term_mappings.insert(field, mapping);
|
||||
}
|
||||
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {}
|
||||
FieldType::Bytes => {}
|
||||
FieldType::Bytes(_) => {}
|
||||
}
|
||||
|
||||
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
|
||||
let fieldnorm_reader = fieldnorm_readers.get_field(field);
|
||||
let postings_writer =
|
||||
self.per_field_postings_writers[field.field_id() as usize].as_ref();
|
||||
let fieldnorm_reader = fieldnorm_readers.get_field(field)?;
|
||||
let mut field_serializer = serializer.new_field(
|
||||
field,
|
||||
postings_writer.total_num_tokens(),
|
||||
|
||||
@@ -1,21 +1,16 @@
|
||||
use crate::common::HasLen;
|
||||
|
||||
use crate::directory::FileSlice;
|
||||
use crate::docset::DocSet;
|
||||
use crate::fastfield::DeleteBitSet;
|
||||
use crate::positions::PositionReader;
|
||||
|
||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
use crate::postings::serializer::PostingsSerializer;
|
||||
use crate::postings::BlockSearcher;
|
||||
|
||||
use crate::postings::BlockSegmentPostings;
|
||||
use crate::postings::Postings;
|
||||
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::{DocId, TERMINATED};
|
||||
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::fastfield::DeleteBitSet;
|
||||
use crate::postings::BlockSegmentPostings;
|
||||
|
||||
/// `SegmentPostings` represents the inverted list or postings associated to
|
||||
/// a term in a `Segment`.
|
||||
///
|
||||
@@ -77,7 +72,7 @@ impl SegmentPostings {
|
||||
let mut buffer = Vec::new();
|
||||
{
|
||||
let mut postings_serializer =
|
||||
PostingsSerializer::new(&mut buffer, 0.0, false, false, None);
|
||||
PostingsSerializer::new(&mut buffer, 0.0, IndexRecordOption::Basic, None);
|
||||
postings_serializer.new_term(docs.len() as u32);
|
||||
for &doc in docs {
|
||||
postings_serializer.write_doc(doc, 1u32);
|
||||
@@ -86,12 +81,13 @@ impl SegmentPostings {
|
||||
.close_term(docs.len() as u32)
|
||||
.expect("In memory Serialization should never fail.");
|
||||
}
|
||||
let block_segment_postings = BlockSegmentPostings::from_data(
|
||||
let block_segment_postings = BlockSegmentPostings::open(
|
||||
docs.len() as u32,
|
||||
ReadOnlySource::from(buffer),
|
||||
FileSlice::from(buffer),
|
||||
IndexRecordOption::Basic,
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
)
|
||||
.unwrap();
|
||||
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||
}
|
||||
|
||||
@@ -120,8 +116,7 @@ impl SegmentPostings {
|
||||
let mut postings_serializer = PostingsSerializer::new(
|
||||
&mut buffer,
|
||||
average_field_norm,
|
||||
true,
|
||||
false,
|
||||
IndexRecordOption::WithFreqs,
|
||||
fieldnorm_reader,
|
||||
);
|
||||
postings_serializer.new_term(doc_and_tfs.len() as u32);
|
||||
@@ -131,12 +126,13 @@ impl SegmentPostings {
|
||||
postings_serializer
|
||||
.close_term(doc_and_tfs.len() as u32)
|
||||
.unwrap();
|
||||
let block_segment_postings = BlockSegmentPostings::from_data(
|
||||
let block_segment_postings = BlockSegmentPostings::open(
|
||||
doc_and_tfs.len() as u32,
|
||||
ReadOnlySource::from(buffer),
|
||||
FileSlice::from(buffer),
|
||||
IndexRecordOption::WithFreqs,
|
||||
IndexRecordOption::WithFreqs,
|
||||
);
|
||||
)
|
||||
.unwrap();
|
||||
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||
}
|
||||
|
||||
@@ -204,7 +200,7 @@ impl DocSet for SegmentPostings {
|
||||
}
|
||||
|
||||
/// Return the current document's `DocId`.
|
||||
#[inline]
|
||||
#[inline(always)]
|
||||
fn doc(&self) -> DocId {
|
||||
self.block_cursor.doc(self.cur)
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ use crate::positions::PositionSerializer;
|
||||
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
|
||||
use crate::postings::skip::SkipSerializer;
|
||||
use crate::query::BM25Weight;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::{Field, FieldEntry, FieldType};
|
||||
use crate::schema::{IndexRecordOption, Schema};
|
||||
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
|
||||
use crate::{DocId, Score};
|
||||
use std::cmp::Ordering;
|
||||
@@ -143,30 +143,24 @@ impl<'a> FieldSerializer<'a> {
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
) -> io::Result<FieldSerializer<'a>> {
|
||||
total_num_tokens.serialize(postings_write)?;
|
||||
let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
|
||||
let mode = match field_type {
|
||||
FieldType::Str(ref text_options) => {
|
||||
if let Some(text_indexing_options) = text_options.get_indexing_options() {
|
||||
let index_option = text_indexing_options.index_option();
|
||||
(index_option.has_freq(), index_option.has_positions())
|
||||
text_indexing_options.index_option()
|
||||
} else {
|
||||
(false, false)
|
||||
IndexRecordOption::Basic
|
||||
}
|
||||
}
|
||||
_ => (false, false),
|
||||
_ => IndexRecordOption::Basic,
|
||||
};
|
||||
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
|
||||
let average_fieldnorm = fieldnorm_reader
|
||||
.as_ref()
|
||||
.map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score))
|
||||
.unwrap_or(0.0);
|
||||
let postings_serializer = PostingsSerializer::new(
|
||||
postings_write,
|
||||
average_fieldnorm,
|
||||
term_freq_enabled,
|
||||
position_enabled,
|
||||
fieldnorm_reader,
|
||||
);
|
||||
let positions_serializer_opt = if position_enabled {
|
||||
let postings_serializer =
|
||||
PostingsSerializer::new(postings_write, average_fieldnorm, mode, fieldnorm_reader);
|
||||
let positions_serializer_opt = if mode.has_positions() {
|
||||
Some(PositionSerializer::new(positions_write, positionsidx_write))
|
||||
} else {
|
||||
None
|
||||
@@ -323,29 +317,22 @@ pub struct PostingsSerializer<W: Write> {
|
||||
postings_write: Vec<u8>,
|
||||
skip_write: SkipSerializer,
|
||||
|
||||
termfreq_enabled: bool,
|
||||
termfreq_sum_enabled: bool,
|
||||
mode: IndexRecordOption,
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
|
||||
bm25_weight: Option<BM25Weight>,
|
||||
|
||||
num_docs: u32, // Number of docs in the segment
|
||||
avg_fieldnorm: Score, // Average number of term in the field for that segment.
|
||||
// this value is used to compute the block wand information.
|
||||
// this value is used to compute the block wand information.
|
||||
}
|
||||
|
||||
impl<W: Write> PostingsSerializer<W> {
|
||||
pub fn new(
|
||||
write: W,
|
||||
avg_fieldnorm: Score,
|
||||
termfreq_enabled: bool,
|
||||
termfreq_sum_enabled: bool,
|
||||
mode: IndexRecordOption,
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
) -> PostingsSerializer<W> {
|
||||
let num_docs = fieldnorm_reader
|
||||
.as_ref()
|
||||
.map(|fieldnorm_reader| fieldnorm_reader.num_docs())
|
||||
.unwrap_or(0u32);
|
||||
PostingsSerializer {
|
||||
output_write: CountingWriter::wrap(write),
|
||||
|
||||
@@ -356,26 +343,30 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
skip_write: SkipSerializer::new(),
|
||||
|
||||
last_doc_id_encoded: 0u32,
|
||||
termfreq_enabled,
|
||||
termfreq_sum_enabled,
|
||||
mode,
|
||||
|
||||
fieldnorm_reader,
|
||||
bm25_weight: None,
|
||||
|
||||
num_docs,
|
||||
avg_fieldnorm,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of documents in the segment currently being serialized.
|
||||
/// This function may return `None` if there are no fieldnorm for that field.
|
||||
fn num_docs_in_segment(&self) -> Option<u32> {
|
||||
self.fieldnorm_reader
|
||||
.as_ref()
|
||||
.map(|reader| reader.num_docs())
|
||||
}
|
||||
|
||||
pub fn new_term(&mut self, term_doc_freq: u32) {
|
||||
if self.termfreq_enabled && self.num_docs > 0 {
|
||||
let bm25_weight = BM25Weight::for_one_term(
|
||||
term_doc_freq as u64,
|
||||
self.num_docs as u64,
|
||||
self.avg_fieldnorm,
|
||||
);
|
||||
self.bm25_weight = Some(bm25_weight);
|
||||
if self.mode.has_freq() {
|
||||
return;
|
||||
}
|
||||
self.bm25_weight = self.num_docs_in_segment().map(|num_docs| {
|
||||
BM25Weight::for_one_term(term_doc_freq as u64, num_docs as u64, self.avg_fieldnorm)
|
||||
});
|
||||
}
|
||||
|
||||
fn write_block(&mut self) {
|
||||
@@ -390,13 +381,15 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
// last el block 0, offset block 1,
|
||||
self.postings_write.extend(block_encoded);
|
||||
}
|
||||
if self.termfreq_enabled {
|
||||
if self.mode.has_freq() {
|
||||
let (num_bits, block_encoded): (u8, &[u8]) = self
|
||||
.block_encoder
|
||||
.compress_block_unsorted(&self.block.term_freqs());
|
||||
self.postings_write.extend(block_encoded);
|
||||
self.skip_write.write_term_freq(num_bits);
|
||||
if self.termfreq_sum_enabled {
|
||||
if self.mode.has_positions() {
|
||||
// We serialize the sum of term freqs within the skip information
|
||||
// in order to navigate through positions.
|
||||
let sum_freq = self.block.term_freqs().iter().cloned().sum();
|
||||
self.skip_write.write_total_term_freq(sum_freq);
|
||||
}
|
||||
@@ -455,7 +448,7 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
self.postings_write.write_all(block_encoded)?;
|
||||
}
|
||||
// ... Idem for term frequencies
|
||||
if self.termfreq_enabled {
|
||||
if self.mode.has_freq() {
|
||||
let block_encoded = self
|
||||
.block_encoder
|
||||
.compress_vint_unsorted(self.block.term_freqs());
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use crate::common::{read_u32_vint_no_advance, serialize_vint_u32, BinarySerializable, VInt};
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::common::{read_u32_vint_no_advance, serialize_vint_u32, BinarySerializable};
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
|
||||
use crate::query::BM25Weight;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::{DocId, Score, TERMINATED};
|
||||
use owned_read::OwnedRead;
|
||||
|
||||
pub struct SkipSerializer {
|
||||
buffer: Vec<u8>,
|
||||
@@ -62,7 +61,7 @@ impl SkipSerializer {
|
||||
pub(crate) struct SkipReader {
|
||||
last_doc_in_block: DocId,
|
||||
pub(crate) last_doc_in_previous_block: DocId,
|
||||
owned_read: OwnedRead,
|
||||
owned_read: OwnedBytes,
|
||||
skip_info: IndexRecordOption,
|
||||
byte_offset: usize,
|
||||
remaining_docs: u32, // number of docs remaining, including the
|
||||
@@ -93,7 +92,7 @@ impl Default for BlockInfo {
|
||||
}
|
||||
|
||||
impl SkipReader {
|
||||
pub fn new(data: ReadOnlySource, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
|
||||
pub fn new(data: OwnedBytes, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
|
||||
let mut skip_reader = SkipReader {
|
||||
last_doc_in_block: if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
0
|
||||
@@ -101,7 +100,7 @@ impl SkipReader {
|
||||
TERMINATED
|
||||
},
|
||||
last_doc_in_previous_block: 0u32,
|
||||
owned_read: OwnedRead::new(data),
|
||||
owned_read: data,
|
||||
skip_info,
|
||||
block_info: BlockInfo::VInt { num_docs: doc_freq },
|
||||
byte_offset: 0,
|
||||
@@ -114,14 +113,14 @@ impl SkipReader {
|
||||
skip_reader
|
||||
}
|
||||
|
||||
pub fn reset(&mut self, data: ReadOnlySource, doc_freq: u32) {
|
||||
pub fn reset(&mut self, data: OwnedBytes, doc_freq: u32) {
|
||||
self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
0
|
||||
} else {
|
||||
TERMINATED
|
||||
};
|
||||
self.last_doc_in_previous_block = 0u32;
|
||||
self.owned_read = OwnedRead::new(data);
|
||||
self.owned_read = data;
|
||||
self.block_info = BlockInfo::VInt { num_docs: doc_freq };
|
||||
self.byte_offset = 0;
|
||||
self.remaining_docs = doc_freq;
|
||||
@@ -154,17 +153,24 @@ impl SkipReader {
|
||||
self.position_offset
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn byte_offset(&self) -> usize {
|
||||
self.byte_offset
|
||||
}
|
||||
|
||||
fn read_block_info(&mut self) {
|
||||
let doc_delta = u32::deserialize(&mut self.owned_read).expect("Skip data corrupted");
|
||||
let doc_delta = {
|
||||
let bytes = self.owned_read.as_slice();
|
||||
let mut buf = [0; 4];
|
||||
buf.copy_from_slice(&bytes[..4]);
|
||||
u32::from_le_bytes(buf)
|
||||
};
|
||||
self.last_doc_in_block += doc_delta as DocId;
|
||||
let doc_num_bits = self.owned_read.get(0);
|
||||
let doc_num_bits = self.owned_read.as_slice()[4];
|
||||
|
||||
match self.skip_info {
|
||||
IndexRecordOption::Basic => {
|
||||
self.owned_read.advance(1);
|
||||
self.owned_read.advance(5);
|
||||
self.block_info = BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
tf_num_bits: 0,
|
||||
@@ -174,11 +180,11 @@ impl SkipReader {
|
||||
};
|
||||
}
|
||||
IndexRecordOption::WithFreqs => {
|
||||
let tf_num_bits = self.owned_read.get(1);
|
||||
let block_wand_fieldnorm_id = self.owned_read.get(2);
|
||||
let data = &self.owned_read.as_ref()[3..];
|
||||
let (block_wand_term_freq, num_bytes) = read_u32_vint_no_advance(data);
|
||||
self.owned_read.advance(3 + num_bytes);
|
||||
let bytes = self.owned_read.as_slice();
|
||||
let tf_num_bits = bytes[5];
|
||||
let block_wand_fieldnorm_id = bytes[6];
|
||||
let (block_wand_term_freq, num_bytes) = read_u32_vint_no_advance(&bytes[7..]);
|
||||
self.owned_read.advance(7 + num_bytes);
|
||||
self.block_info = BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
tf_num_bits,
|
||||
@@ -188,13 +194,16 @@ impl SkipReader {
|
||||
};
|
||||
}
|
||||
IndexRecordOption::WithFreqsAndPositions => {
|
||||
let tf_num_bits = self.owned_read.get(1);
|
||||
self.owned_read.advance(2);
|
||||
let tf_sum = u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
|
||||
let block_wand_fieldnorm_id = self.owned_read.get(0);
|
||||
self.owned_read.advance(1);
|
||||
let block_wand_term_freq =
|
||||
VInt::deserialize_u64(&mut self.owned_read).unwrap() as u32;
|
||||
let bytes = self.owned_read.as_slice();
|
||||
let tf_num_bits = bytes[5];
|
||||
let tf_sum = {
|
||||
let mut buf = [0; 4];
|
||||
buf.copy_from_slice(&bytes[6..10]);
|
||||
u32::from_le_bytes(buf)
|
||||
};
|
||||
let block_wand_fieldnorm_id = bytes[10];
|
||||
let (block_wand_term_freq, num_bytes) = read_u32_vint_no_advance(&bytes[11..]);
|
||||
self.owned_read.advance(11 + num_bytes);
|
||||
self.block_info = BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
tf_num_bits,
|
||||
@@ -262,7 +271,7 @@ mod tests {
|
||||
use super::BlockInfo;
|
||||
use super::IndexRecordOption;
|
||||
use super::{SkipReader, SkipSerializer};
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
|
||||
#[test]
|
||||
@@ -278,11 +287,8 @@ mod tests {
|
||||
skip_serializer.data().to_owned()
|
||||
};
|
||||
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
||||
let mut skip_reader = SkipReader::new(
|
||||
ReadOnlySource::new(buf),
|
||||
doc_freq,
|
||||
IndexRecordOption::WithFreqs,
|
||||
);
|
||||
let mut skip_reader =
|
||||
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::WithFreqs);
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info,
|
||||
@@ -323,11 +329,8 @@ mod tests {
|
||||
skip_serializer.data().to_owned()
|
||||
};
|
||||
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
||||
let mut skip_reader = SkipReader::new(
|
||||
ReadOnlySource::from(buf),
|
||||
doc_freq,
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
let mut skip_reader =
|
||||
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic);
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info(),
|
||||
@@ -367,11 +370,8 @@ mod tests {
|
||||
skip_serializer.data().to_owned()
|
||||
};
|
||||
let doc_freq = COMPRESSION_BLOCK_SIZE as u32;
|
||||
let mut skip_reader = SkipReader::new(
|
||||
ReadOnlySource::from(buf),
|
||||
doc_freq,
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
let mut skip_reader =
|
||||
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic);
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info(),
|
||||
|
||||
@@ -206,8 +206,8 @@ mod tests {
|
||||
fn test_stack_long() {
|
||||
let mut heap = MemoryArena::new();
|
||||
let mut stack = ExpUnrolledLinkedList::new();
|
||||
let source: Vec<u32> = (0..100).collect();
|
||||
for &el in &source {
|
||||
let data: Vec<u32> = (0..100).collect();
|
||||
for &el in &data {
|
||||
assert!(stack
|
||||
.writer(&mut heap)
|
||||
.write_u32::<LittleEndian>(el)
|
||||
@@ -221,7 +221,7 @@ mod tests {
|
||||
result.push(LittleEndian::read_u32(&remaining[..4]));
|
||||
remaining = &remaining[4..];
|
||||
}
|
||||
assert_eq!(&result[..], &source[..]);
|
||||
assert_eq!(&result[..], &data[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -42,13 +42,13 @@ where
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let max_doc = reader.max_doc();
|
||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||
let inverted_index = reader.inverted_index(self.field);
|
||||
let inverted_index = reader.inverted_index(self.field)?;
|
||||
let term_dict = inverted_index.terms();
|
||||
let mut term_stream = self.automaton_stream(term_dict);
|
||||
while term_stream.advance() {
|
||||
let term_info = term_stream.value();
|
||||
let mut block_segment_postings = inverted_index
|
||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
|
||||
loop {
|
||||
let docs = block_segment_postings.docs();
|
||||
if docs.is_empty() {
|
||||
|
||||
@@ -52,7 +52,7 @@ impl BM25Weight {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> BM25Weight {
|
||||
pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> crate::Result<BM25Weight> {
|
||||
assert!(!terms.is_empty(), "BM25 requires at least one term");
|
||||
let field = terms[0].field();
|
||||
for term in &terms[1..] {
|
||||
@@ -66,25 +66,27 @@ impl BM25Weight {
|
||||
let mut total_num_tokens = 0u64;
|
||||
let mut total_num_docs = 0u64;
|
||||
for segment_reader in searcher.segment_readers() {
|
||||
let inverted_index = segment_reader.inverted_index(field);
|
||||
let inverted_index = segment_reader.inverted_index(field)?;
|
||||
total_num_tokens += inverted_index.total_num_tokens();
|
||||
total_num_docs += u64::from(segment_reader.max_doc());
|
||||
}
|
||||
let average_fieldnorm = total_num_tokens as Score / total_num_docs as Score;
|
||||
|
||||
if terms.len() == 1 {
|
||||
let term_doc_freq = searcher.doc_freq(&terms[0]);
|
||||
BM25Weight::for_one_term(term_doc_freq, total_num_docs, average_fieldnorm)
|
||||
let term_doc_freq = searcher.doc_freq(&terms[0])?;
|
||||
Ok(BM25Weight::for_one_term(
|
||||
term_doc_freq,
|
||||
total_num_docs,
|
||||
average_fieldnorm,
|
||||
))
|
||||
} else {
|
||||
let idf = terms
|
||||
.iter()
|
||||
.map(|term| {
|
||||
let term_doc_freq = searcher.doc_freq(term);
|
||||
idf(term_doc_freq, total_num_docs)
|
||||
})
|
||||
.sum::<Score>();
|
||||
let idf_explain = Explanation::new("idf", idf);
|
||||
BM25Weight::new(idf_explain, average_fieldnorm)
|
||||
let mut idf_sum: Score = 0.0;
|
||||
for term in terms {
|
||||
let term_doc_freq = searcher.doc_freq(term)?;
|
||||
idf_sum += idf(term_doc_freq, total_num_docs);
|
||||
}
|
||||
let idf_explain = Explanation::new("idf", idf_sum);
|
||||
Ok(BM25Weight::new(idf_explain, average_fieldnorm))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -83,7 +83,7 @@ use std::collections::BTreeSet;
|
||||
/// ];
|
||||
/// // Make a BooleanQuery equivalent to
|
||||
/// // title:+diary title:-girl
|
||||
/// let diary_must_and_girl_mustnot = BooleanQuery::from(queries_with_occurs1);
|
||||
/// let diary_must_and_girl_mustnot = BooleanQuery::new(queries_with_occurs1);
|
||||
/// let count1 = searcher.search(&diary_must_and_girl_mustnot, &Count)?;
|
||||
/// assert_eq!(count1, 1);
|
||||
///
|
||||
@@ -93,7 +93,7 @@ use std::collections::BTreeSet;
|
||||
/// IndexRecordOption::Basic,
|
||||
/// ));
|
||||
/// // "title:diary OR title:cow"
|
||||
/// let title_diary_or_cow = BooleanQuery::from(vec![
|
||||
/// let title_diary_or_cow = BooleanQuery::new(vec![
|
||||
/// (Occur::Should, diary_term_query.box_clone()),
|
||||
/// (Occur::Should, cow_term_query),
|
||||
/// ]);
|
||||
@@ -108,7 +108,7 @@ use std::collections::BTreeSet;
|
||||
/// // You can combine subqueries of different types into 1 BooleanQuery:
|
||||
/// // `TermQuery` and `PhraseQuery`
|
||||
/// // "title:diary OR "dairy cow"
|
||||
/// let term_of_phrase_query = BooleanQuery::from(vec![
|
||||
/// let term_of_phrase_query = BooleanQuery::new(vec![
|
||||
/// (Occur::Should, diary_term_query.box_clone()),
|
||||
/// (Occur::Should, phrase_query.box_clone()),
|
||||
/// ]);
|
||||
@@ -117,7 +117,7 @@ use std::collections::BTreeSet;
|
||||
///
|
||||
/// // You can nest one BooleanQuery inside another
|
||||
/// // body:found AND ("title:diary OR "dairy cow")
|
||||
/// let nested_query = BooleanQuery::from(vec![
|
||||
/// let nested_query = BooleanQuery::new(vec![
|
||||
/// (Occur::Must, body_term_query),
|
||||
/// (Occur::Must, Box::new(term_of_phrase_query))
|
||||
/// ]);
|
||||
@@ -143,7 +143,7 @@ impl Clone for BooleanQuery {
|
||||
|
||||
impl From<Vec<(Occur, Box<dyn Query>)>> for BooleanQuery {
|
||||
fn from(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery {
|
||||
BooleanQuery { subqueries }
|
||||
BooleanQuery::new(subqueries)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -167,6 +167,23 @@ impl Query for BooleanQuery {
|
||||
}
|
||||
|
||||
impl BooleanQuery {
|
||||
/// Creates a new boolean query.
|
||||
pub fn new(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery {
|
||||
BooleanQuery { subqueries }
|
||||
}
|
||||
|
||||
/// Returns the intersection of the queries.
|
||||
pub fn intersection(queries: Vec<Box<dyn Query>>) -> BooleanQuery {
|
||||
let subqueries = queries.into_iter().map(|s| (Occur::Must, s)).collect();
|
||||
BooleanQuery::new(subqueries)
|
||||
}
|
||||
|
||||
/// Returns the union of the queries.
|
||||
pub fn union(queries: Vec<Box<dyn Query>>) -> BooleanQuery {
|
||||
let subqueries = queries.into_iter().map(|s| (Occur::Should, s)).collect();
|
||||
BooleanQuery::new(subqueries)
|
||||
}
|
||||
|
||||
/// Helper method to create a boolean query matching a given list of terms.
|
||||
/// The resulting query is a disjunction of the terms.
|
||||
pub fn new_multiterms_query(terms: Vec<Term>) -> BooleanQuery {
|
||||
@@ -178,7 +195,7 @@ impl BooleanQuery {
|
||||
(Occur::Should, term_query)
|
||||
})
|
||||
.collect();
|
||||
BooleanQuery::from(occur_term_queries)
|
||||
BooleanQuery::new(occur_term_queries)
|
||||
}
|
||||
|
||||
/// Deconstructed view of the clauses making up this query.
|
||||
@@ -186,3 +203,77 @@ impl BooleanQuery {
|
||||
&self.subqueries[..]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::BooleanQuery;
|
||||
use crate::collector::DocSetCollector;
|
||||
use crate::query::{QueryClone, TermQuery};
|
||||
use crate::schema::{IndexRecordOption, Schema, TEXT};
|
||||
use crate::{DocAddress, Index, Term};
|
||||
|
||||
fn create_test_index() -> crate::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.add_document(doc!(text=>"b c"));
|
||||
writer.add_document(doc!(text=>"a c"));
|
||||
writer.add_document(doc!(text=>"a b"));
|
||||
writer.add_document(doc!(text=>"a d"));
|
||||
writer.commit()?;
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_union() -> crate::Result<()> {
|
||||
let index = create_test_index()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let text = index.schema().get_field("text").unwrap();
|
||||
let term_a = TermQuery::new(Term::from_field_text(text, "a"), IndexRecordOption::Basic);
|
||||
let term_d = TermQuery::new(Term::from_field_text(text, "d"), IndexRecordOption::Basic);
|
||||
let union_ad = BooleanQuery::union(vec![term_a.box_clone(), term_d.box_clone()]);
|
||||
let docs = searcher.search(&union_ad, &DocSetCollector)?;
|
||||
assert_eq!(
|
||||
docs,
|
||||
vec![
|
||||
DocAddress(0u32, 1u32),
|
||||
DocAddress(0u32, 2u32),
|
||||
DocAddress(0u32, 3u32)
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intersection() -> crate::Result<()> {
|
||||
let index = create_test_index()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let text = index.schema().get_field("text").unwrap();
|
||||
let term_a = TermQuery::new(Term::from_field_text(text, "a"), IndexRecordOption::Basic);
|
||||
let term_b = TermQuery::new(Term::from_field_text(text, "b"), IndexRecordOption::Basic);
|
||||
let term_c = TermQuery::new(Term::from_field_text(text, "c"), IndexRecordOption::Basic);
|
||||
let intersection_ab =
|
||||
BooleanQuery::intersection(vec![term_a.box_clone(), term_b.box_clone()]);
|
||||
let intersection_ac =
|
||||
BooleanQuery::intersection(vec![term_a.box_clone(), term_c.box_clone()]);
|
||||
let intersection_bc =
|
||||
BooleanQuery::intersection(vec![term_b.box_clone(), term_c.box_clone()]);
|
||||
{
|
||||
let docs = searcher.search(&intersection_ab, &DocSetCollector)?;
|
||||
assert_eq!(docs, vec![DocAddress(0u32, 2u32)].into_iter().collect());
|
||||
}
|
||||
{
|
||||
let docs = searcher.search(&intersection_ac, &DocSetCollector)?;
|
||||
assert_eq!(docs, vec![DocAddress(0u32, 1u32)].into_iter().collect());
|
||||
}
|
||||
{
|
||||
let docs = searcher.search(&intersection_bc, &DocSetCollector)?;
|
||||
assert_eq!(docs, vec![DocAddress(0u32, 0u32)].into_iter().collect());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,13 +33,11 @@ mod tests {
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
{
|
||||
index_writer.add_document(doc!(text_field => "a b c"));
|
||||
index_writer.add_document(doc!(text_field => "a c"));
|
||||
index_writer.add_document(doc!(text_field => "b c"));
|
||||
index_writer.add_document(doc!(text_field => "a b c d"));
|
||||
index_writer.add_document(doc!(text_field => "d"));
|
||||
}
|
||||
index_writer.add_document(doc!(text_field => "a b c"));
|
||||
index_writer.add_document(doc!(text_field => "a c"));
|
||||
index_writer.add_document(doc!(text_field => "b c"));
|
||||
index_writer.add_document(doc!(text_field => "a b c d"));
|
||||
index_writer.add_document(doc!(text_field => "d"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
(index, text_field)
|
||||
@@ -134,29 +132,29 @@ mod tests {
|
||||
.collect::<Vec<DocId>>()
|
||||
};
|
||||
{
|
||||
let boolean_query = BooleanQuery::from(vec![(Occur::Must, make_term_query("a"))]);
|
||||
let boolean_query = BooleanQuery::new(vec![(Occur::Must, make_term_query("a"))]);
|
||||
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
|
||||
}
|
||||
{
|
||||
let boolean_query = BooleanQuery::from(vec![(Occur::Should, make_term_query("a"))]);
|
||||
let boolean_query = BooleanQuery::new(vec![(Occur::Should, make_term_query("a"))]);
|
||||
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
|
||||
}
|
||||
{
|
||||
let boolean_query = BooleanQuery::from(vec![
|
||||
let boolean_query = BooleanQuery::new(vec![
|
||||
(Occur::Should, make_term_query("a")),
|
||||
(Occur::Should, make_term_query("b")),
|
||||
]);
|
||||
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 2, 3]);
|
||||
}
|
||||
{
|
||||
let boolean_query = BooleanQuery::from(vec![
|
||||
let boolean_query = BooleanQuery::new(vec![
|
||||
(Occur::Must, make_term_query("a")),
|
||||
(Occur::Should, make_term_query("b")),
|
||||
]);
|
||||
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
|
||||
}
|
||||
{
|
||||
let boolean_query = BooleanQuery::from(vec![
|
||||
let boolean_query = BooleanQuery::new(vec![
|
||||
(Occur::Must, make_term_query("a")),
|
||||
(Occur::Should, make_term_query("b")),
|
||||
(Occur::MustNot, make_term_query("d")),
|
||||
@@ -164,7 +162,7 @@ mod tests {
|
||||
assert_eq!(matching_docs(&boolean_query), vec![0, 1]);
|
||||
}
|
||||
{
|
||||
let boolean_query = BooleanQuery::from(vec![(Occur::MustNot, make_term_query("d"))]);
|
||||
let boolean_query = BooleanQuery::new(vec![(Occur::MustNot, make_term_query("d"))]);
|
||||
assert_eq!(matching_docs(&boolean_query), Vec::<u32>::new());
|
||||
}
|
||||
}
|
||||
@@ -194,7 +192,7 @@ mod tests {
|
||||
let score_doc_4: Score; // score of doc 4 should not be influenced by exclusion
|
||||
{
|
||||
let boolean_query_no_excluded =
|
||||
BooleanQuery::from(vec![(Occur::Must, make_term_query("d"))]);
|
||||
BooleanQuery::new(vec![(Occur::Must, make_term_query("d"))]);
|
||||
let topdocs_no_excluded = matching_topdocs(&boolean_query_no_excluded);
|
||||
assert_eq!(topdocs_no_excluded.len(), 2);
|
||||
let (top_score, top_doc) = topdocs_no_excluded[0];
|
||||
@@ -204,7 +202,7 @@ mod tests {
|
||||
}
|
||||
|
||||
{
|
||||
let boolean_query_two_excluded = BooleanQuery::from(vec![
|
||||
let boolean_query_two_excluded = BooleanQuery::new(vec![
|
||||
(Occur::Must, make_term_query("d")),
|
||||
(Occur::MustNot, make_term_query("a")),
|
||||
(Occur::MustNot, make_term_query("b")),
|
||||
@@ -241,7 +239,7 @@ mod tests {
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let boolean_query =
|
||||
BooleanQuery::from(vec![(Occur::Should, term_a), (Occur::Should, term_b)]);
|
||||
BooleanQuery::new(vec![(Occur::Should, term_a), (Occur::Should, term_b)]);
|
||||
let boolean_weight = boolean_query.weight(&searcher, true).unwrap();
|
||||
{
|
||||
let mut boolean_scorer = boolean_weight
|
||||
@@ -281,7 +279,7 @@ mod tests {
|
||||
};
|
||||
|
||||
{
|
||||
let boolean_query = BooleanQuery::from(vec![
|
||||
let boolean_query = BooleanQuery::new(vec![
|
||||
(Occur::Must, make_term_query("a")),
|
||||
(Occur::Must, make_term_query("b")),
|
||||
]);
|
||||
@@ -290,4 +288,29 @@ mod tests {
|
||||
assert_nearly_equals!(scores[1], 0.84699446);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_explain() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text = schema_builder.add_text_field("text", STRING);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 5_000_000)?;
|
||||
index_writer.add_document(doc!(text=>"a"));
|
||||
index_writer.add_document(doc!(text=>"b"));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let term_a: Box<dyn Query> = Box::new(TermQuery::new(
|
||||
Term::from_field_text(text, "a"),
|
||||
IndexRecordOption::Basic,
|
||||
));
|
||||
let term_b: Box<dyn Query> = Box::new(TermQuery::new(
|
||||
Term::from_field_text(text, "b"),
|
||||
IndexRecordOption::Basic,
|
||||
));
|
||||
let query = BooleanQuery::from(vec![(Occur::Should, term_a), (Occur::Should, term_b)]);
|
||||
let explanation = query.explain(&searcher, DocAddress(0, 0u32))?;
|
||||
assert_nearly_equals!(explanation.value(), 0.6931472f32);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ mod tests {
|
||||
let explanation = query.explain(&searcher, DocAddress(0, 0u32)).unwrap();
|
||||
assert_eq!(
|
||||
explanation.to_pretty_json(),
|
||||
"{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\"\n }\n ]\n}"
|
||||
"{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\",\n \"context\": []\n }\n ],\n \"context\": []\n}"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ pub struct Explanation {
|
||||
description: String,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
details: Vec<Explanation>,
|
||||
context: Vec<String>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Explanation {
|
||||
@@ -32,6 +33,7 @@ impl Explanation {
|
||||
value,
|
||||
description: description.to_string(),
|
||||
details: vec![],
|
||||
context: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,6 +49,11 @@ impl Explanation {
|
||||
self.details.push(child_explanation);
|
||||
}
|
||||
|
||||
/// Adds some extra context to the explanation.
|
||||
pub fn add_context(&mut self, context: String) {
|
||||
self.context.push(context);
|
||||
}
|
||||
|
||||
/// Shortcut for `self.details.push(Explanation::new(name, value));`
|
||||
pub fn add_const<T: ToString>(&mut self, name: T, value: Score) {
|
||||
self.details.push(Explanation::new(name, value));
|
||||
|
||||
@@ -46,7 +46,7 @@ pub(crate) use self::fuzzy_query::DFAWrapper;
|
||||
pub use self::fuzzy_query::FuzzyTermQuery;
|
||||
pub use self::intersection::intersect_scorers;
|
||||
pub use self::phrase_query::PhraseQuery;
|
||||
pub use self::query::Query;
|
||||
pub use self::query::{Query, QueryClone};
|
||||
pub use self::query_parser::QueryParser;
|
||||
pub use self::query_parser::QueryParserError;
|
||||
pub use self::range_query::RangeQuery;
|
||||
|
||||
@@ -95,7 +95,7 @@ impl PhraseQuery {
|
||||
)));
|
||||
}
|
||||
let terms = self.phrase_terms();
|
||||
let bm25_weight = BM25Weight::for_terms(searcher, &terms);
|
||||
let bm25_weight = BM25Weight::for_terms(searcher, &terms)?;
|
||||
Ok(PhraseWeight::new(
|
||||
self.phrase_terms.clone(),
|
||||
bm25_weight,
|
||||
|
||||
@@ -48,8 +48,8 @@ impl PhraseWeight {
|
||||
let mut term_postings_list = Vec::new();
|
||||
for &(offset, ref term) in &self.phrase_terms {
|
||||
if let Some(postings) = reader
|
||||
.inverted_index(term.field())
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.inverted_index(term.field())?
|
||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
{
|
||||
term_postings_list.push((offset, postings));
|
||||
} else {
|
||||
@@ -66,8 +66,8 @@ impl PhraseWeight {
|
||||
let mut term_postings_list = Vec::new();
|
||||
for &(offset, ref term) in &self.phrase_terms {
|
||||
if let Some(postings) = reader
|
||||
.inverted_index(term.field())
|
||||
.read_postings_no_deletes(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||
.inverted_index(term.field())?
|
||||
.read_postings_no_deletes(&term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
{
|
||||
term_postings_list.push((offset, postings));
|
||||
} else {
|
||||
|
||||
@@ -71,7 +71,9 @@ pub trait Query: QueryClone + Send + Sync + downcast_rs::Downcast + fmt::Debug {
|
||||
fn query_terms(&self, _term_set: &mut BTreeSet<Term>) {}
|
||||
}
|
||||
|
||||
/// Implements `box_clone`.
|
||||
pub trait QueryClone {
|
||||
/// Returns a boxed clone of `self`.
|
||||
fn box_clone(&self) -> Box<dyn Query>;
|
||||
}
|
||||
|
||||
|
||||
@@ -21,51 +21,52 @@ use std::str::FromStr;
|
||||
use tantivy_query_grammar::{UserInputAST, UserInputBound, UserInputLeaf};
|
||||
|
||||
/// Possible error that may happen when parsing a query.
|
||||
#[derive(Debug, PartialEq, Eq, Fail)]
|
||||
#[derive(Debug, PartialEq, Eq, Error)]
|
||||
pub enum QueryParserError {
|
||||
/// Error in the query syntax
|
||||
#[fail(display = "Syntax Error")]
|
||||
#[error("Syntax Error")]
|
||||
SyntaxError,
|
||||
/// `FieldDoesNotExist(field_name: String)`
|
||||
/// The query references a field that is not in the schema
|
||||
#[fail(display = "File does not exists: '{:?}'", _0)]
|
||||
#[error("File does not exists: '{0:?}'")]
|
||||
FieldDoesNotExist(String),
|
||||
/// The query contains a term for a `u64` or `i64`-field, but the value
|
||||
/// is neither.
|
||||
#[fail(display = "Expected a valid integer: '{:?}'", _0)]
|
||||
#[error("Expected a valid integer: '{0:?}'")]
|
||||
ExpectedInt(ParseIntError),
|
||||
/// The query contains a term for a bytes field, but the value is not valid
|
||||
/// base64.
|
||||
#[error("Expected base64: '{0:?}'")]
|
||||
ExpectedBase64(base64::DecodeError),
|
||||
/// The query contains a term for a `f64`-field, but the value
|
||||
/// is not a f64.
|
||||
#[fail(display = "Invalid query: Only excluding terms given")]
|
||||
#[error("Invalid query: Only excluding terms given")]
|
||||
ExpectedFloat(ParseFloatError),
|
||||
/// It is forbidden queries that are only "excluding". (e.g. -title:pop)
|
||||
#[fail(display = "Invalid query: Only excluding terms given")]
|
||||
#[error("Invalid query: Only excluding terms given")]
|
||||
AllButQueryForbidden,
|
||||
/// If no default field is declared, running a query without any
|
||||
/// field specified is forbbidden.
|
||||
#[fail(display = "No default field declared and no field specified in query")]
|
||||
#[error("No default field declared and no field specified in query")]
|
||||
NoDefaultFieldDeclared,
|
||||
/// The field searched for is not declared
|
||||
/// as indexed in the schema.
|
||||
#[fail(display = "The field '{:?}' is not declared as indexed", _0)]
|
||||
#[error("The field '{0:?}' is not declared as indexed")]
|
||||
FieldNotIndexed(String),
|
||||
/// A phrase query was requested for a field that does not
|
||||
/// have any positions indexed.
|
||||
#[fail(display = "The field '{:?}' does not have positions indexed", _0)]
|
||||
#[error("The field '{0:?}' does not have positions indexed")]
|
||||
FieldDoesNotHavePositionsIndexed(String),
|
||||
/// The tokenizer for the given field is unknown
|
||||
/// The two argument strings are the name of the field, the name of the tokenizer
|
||||
#[fail(
|
||||
display = "The tokenizer '{:?}' for the field '{:?}' is unknown",
|
||||
_0, _1
|
||||
)]
|
||||
#[error("The tokenizer '{0:?}' for the field '{1:?}' is unknown")]
|
||||
UnknownTokenizer(String, String),
|
||||
/// The query contains a range query with a phrase as one of the bounds.
|
||||
/// Only terms can be used as bounds.
|
||||
#[fail(display = "A range query cannot have a phrase as one of the bounds")]
|
||||
#[error("A range query cannot have a phrase as one of the bounds")]
|
||||
RangeMustNotHavePhrase,
|
||||
/// The format for the date field is not RFC 3339 compliant.
|
||||
#[fail(display = "The date field has an invalid format")]
|
||||
#[error("The date field has an invalid format")]
|
||||
DateFormatError(chrono::ParseError),
|
||||
}
|
||||
|
||||
@@ -360,9 +361,10 @@ impl QueryParser {
|
||||
let facet = Facet::from_text(phrase);
|
||||
Ok(vec![(0, Term::from_field_text(field, facet.encoded_str()))])
|
||||
}
|
||||
FieldType::Bytes => {
|
||||
let field_name = self.schema.get_field_name(field).to_string();
|
||||
Err(QueryParserError::FieldNotIndexed(field_name))
|
||||
FieldType::Bytes(_) => {
|
||||
let bytes = base64::decode(phrase).map_err(QueryParserError::ExpectedBase64)?;
|
||||
let term = Term::from_field_bytes(field, &bytes);
|
||||
Ok(vec![(0, term)])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -555,7 +557,7 @@ fn convert_to_query(logical_ast: LogicalAST) -> Box<dyn Query> {
|
||||
!occur_subqueries.is_empty(),
|
||||
"Should not be empty after trimming"
|
||||
);
|
||||
Box::new(BooleanQuery::from(occur_subqueries))
|
||||
Box::new(BooleanQuery::new(occur_subqueries))
|
||||
}
|
||||
Some(LogicalAST::Leaf(trimmed_logical_literal)) => {
|
||||
convert_literal_to_query(*trimmed_logical_literal)
|
||||
@@ -604,6 +606,8 @@ mod test {
|
||||
schema_builder.add_date_field("date", INDEXED);
|
||||
schema_builder.add_f64_field("float", INDEXED);
|
||||
schema_builder.add_facet_field("facet");
|
||||
schema_builder.add_bytes_field("bytes", INDEXED);
|
||||
schema_builder.add_bytes_field("bytes_not_indexed", STORED);
|
||||
schema_builder.build()
|
||||
}
|
||||
|
||||
@@ -791,6 +795,37 @@ mod test {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_bytes() {
|
||||
test_parse_query_to_logical_ast_helper(
|
||||
"bytes:YnVidQ==",
|
||||
"Term(field=12,bytes=[98, 117, 98, 117])",
|
||||
false,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_bytes_not_indexed() {
|
||||
let error = parse_query_to_logical_ast("bytes_not_indexed:aaa", false).unwrap_err();
|
||||
assert!(matches!(error, QueryParserError::FieldNotIndexed(_)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_bytes_phrase() {
|
||||
test_parse_query_to_logical_ast_helper(
|
||||
"bytes:\"YnVidQ==\"",
|
||||
"Term(field=12,bytes=[98, 117, 98, 117])",
|
||||
false,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_bytes_invalid_base64() {
|
||||
let base64_err: QueryParserError =
|
||||
parse_query_to_logical_ast("bytes:aa", false).unwrap_err();
|
||||
assert!(matches!(base64_err, QueryParserError::ExpectedBase64(_)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_ab_c() {
|
||||
test_parse_query_to_logical_ast_helper(
|
||||
|
||||
@@ -296,13 +296,13 @@ impl Weight for RangeWeight {
|
||||
let max_doc = reader.max_doc();
|
||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||
|
||||
let inverted_index = reader.inverted_index(self.field);
|
||||
let inverted_index = reader.inverted_index(self.field)?;
|
||||
let term_dict = inverted_index.terms();
|
||||
let mut term_range = self.term_range(term_dict);
|
||||
while term_range.advance() {
|
||||
let term_info = term_range.value();
|
||||
let mut block_segment_postings = inverted_index
|
||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
|
||||
loop {
|
||||
let docs = block_segment_postings.docs();
|
||||
if docs.is_empty() {
|
||||
|
||||
@@ -9,12 +9,12 @@ pub use self::term_weight::TermWeight;
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use crate::assert_nearly_equals;
|
||||
use crate::collector::TopDocs;
|
||||
use crate::docset::DocSet;
|
||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
use crate::query::{Query, QueryParser, Scorer, TermQuery};
|
||||
use crate::schema::{Field, IndexRecordOption, Schema, STRING, TEXT};
|
||||
use crate::{assert_nearly_equals, DocAddress};
|
||||
use crate::{Index, Term, TERMINATED};
|
||||
|
||||
#[test]
|
||||
@@ -179,4 +179,40 @@ mod tests {
|
||||
"TermQuery(Term(field=1,bytes=[104, 101, 108, 108, 111]))"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_term_query_explain() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"b"));
|
||||
index_writer.add_document(doc!(text_field=>"a"));
|
||||
index_writer.add_document(doc!(text_field=>"a"));
|
||||
index_writer.add_document(doc!(text_field=>"b"));
|
||||
index_writer.commit()?;
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
let term_query = TermQuery::new(term_a, IndexRecordOption::Basic);
|
||||
let searcher = index.reader()?.searcher();
|
||||
{
|
||||
let explanation = term_query.explain(&searcher, DocAddress(0u32, 1u32))?;
|
||||
assert_nearly_equals!(explanation.value(), 0.6931472f32);
|
||||
}
|
||||
{
|
||||
let explanation_err = term_query.explain(&searcher, DocAddress(0u32, 0u32));
|
||||
assert!(matches!(
|
||||
explanation_err,
|
||||
Err(crate::TantivyError::InvalidArgument(_msg))
|
||||
));
|
||||
}
|
||||
{
|
||||
let explanation_err = term_query.explain(&searcher, DocAddress(0u32, 3u32));
|
||||
assert!(matches!(
|
||||
explanation_err,
|
||||
Err(crate::TantivyError::InvalidArgument(_msg))
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,21 +87,43 @@ impl TermQuery {
|
||||
/// While `.weight(...)` returns a boxed trait object,
|
||||
/// this method return a specific implementation.
|
||||
/// This is useful for optimization purpose.
|
||||
pub fn specialized_weight(&self, searcher: &Searcher, scoring_enabled: bool) -> TermWeight {
|
||||
pub fn specialized_weight(
|
||||
&self,
|
||||
searcher: &Searcher,
|
||||
scoring_enabled: bool,
|
||||
) -> crate::Result<TermWeight> {
|
||||
let field_entry = searcher
|
||||
.schema()
|
||||
.get_field_entry(self.term.field());
|
||||
if !field_entry.is_indexed() {
|
||||
let error_msg = format!("Field {:?} is not indexed.", field_entry.name());
|
||||
return Err(crate::TantivyError::SchemaError(error_msg));
|
||||
}
|
||||
let has_fieldnorms = searcher
|
||||
.schema()
|
||||
.get_field_entry(self.term.field())
|
||||
.has_fieldnorms();
|
||||
let term = self.term.clone();
|
||||
let bm25_weight = BM25Weight::for_terms(searcher, &[term]);
|
||||
let bm25_weight = BM25Weight::for_terms(searcher, &[term])?;
|
||||
let index_record_option = if scoring_enabled {
|
||||
self.index_record_option
|
||||
} else {
|
||||
IndexRecordOption::Basic
|
||||
};
|
||||
TermWeight::new(self.term.clone(), index_record_option, bm25_weight)
|
||||
Ok(TermWeight::new(
|
||||
self.term.clone(),
|
||||
index_record_option,
|
||||
bm25_weight,
|
||||
has_fieldnorms,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl Query for TermQuery {
|
||||
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
|
||||
Ok(Box::new(self.specialized_weight(searcher, scoring_enabled)))
|
||||
Ok(Box::new(
|
||||
self.specialized_weight(searcher, scoring_enabled)?,
|
||||
))
|
||||
}
|
||||
fn query_terms(&self, term_set: &mut BTreeSet<Term>) {
|
||||
term_set.insert(self.term.clone());
|
||||
|
||||
@@ -253,7 +253,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn test_block_wand_aux(term_query: &TermQuery, searcher: &Searcher) -> crate::Result<()> {
|
||||
let term_weight = term_query.specialized_weight(&searcher, true);
|
||||
let term_weight = term_query.specialized_weight(&searcher, true)?;
|
||||
for reader in searcher.segment_readers() {
|
||||
let mut block_max_scores = vec![];
|
||||
let mut block_max_scores_b = vec![];
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use super::term_scorer::TermScorer;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::docset::DocSet;
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::postings::SegmentPostings;
|
||||
use crate::query::bm25::BM25Weight;
|
||||
use crate::query::explanation::does_not_match;
|
||||
@@ -15,6 +16,7 @@ pub struct TermWeight {
|
||||
term: Term,
|
||||
index_record_option: IndexRecordOption,
|
||||
similarity_weight: BM25Weight,
|
||||
has_fieldnorms: bool,
|
||||
}
|
||||
|
||||
impl Weight for TermWeight {
|
||||
@@ -25,10 +27,16 @@ impl Weight for TermWeight {
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.specialized_scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
if scorer.doc() > doc || scorer.seek(doc) != doc {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
Ok(scorer.explain())
|
||||
let mut explanation = scorer.explain();
|
||||
explanation.add_context(format!(
|
||||
"Term ={:?}:{:?}",
|
||||
self.term.field(),
|
||||
self.term.value_bytes()
|
||||
));
|
||||
Ok(explanation)
|
||||
}
|
||||
|
||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||
@@ -36,11 +44,9 @@ impl Weight for TermWeight {
|
||||
Ok(self.scorer(reader, 1.0)?.count(delete_bitset))
|
||||
} else {
|
||||
let field = self.term.field();
|
||||
Ok(reader
|
||||
.inverted_index(field)
|
||||
.get_term_info(&self.term)
|
||||
.map(|term_info| term_info.doc_freq)
|
||||
.unwrap_or(0))
|
||||
let inv_index = reader.inverted_index(field)?;
|
||||
let term_info = inv_index.get_term_info(&self.term);
|
||||
Ok(term_info.map(|term_info| term_info.doc_freq).unwrap_or(0))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,11 +89,13 @@ impl TermWeight {
|
||||
term: Term,
|
||||
index_record_option: IndexRecordOption,
|
||||
similarity_weight: BM25Weight,
|
||||
has_fieldnorms: bool,
|
||||
) -> TermWeight {
|
||||
TermWeight {
|
||||
term,
|
||||
index_record_option,
|
||||
similarity_weight,
|
||||
has_fieldnorms,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,11 +105,15 @@ impl TermWeight {
|
||||
boost: Score,
|
||||
) -> crate::Result<TermScorer> {
|
||||
let field = self.term.field();
|
||||
let inverted_index = reader.inverted_index(field);
|
||||
let fieldnorm_reader = reader.get_fieldnorms_reader(field)?;
|
||||
let inverted_index = reader.inverted_index(field)?;
|
||||
let fieldnorm_reader = if self.has_fieldnorms {
|
||||
reader.get_fieldnorms_reader(field)?
|
||||
} else {
|
||||
FieldNormReader::const_fieldnorm_id(1u8, reader.num_docs())
|
||||
};
|
||||
let similarity_weight = self.similarity_weight.boost_by(boost);
|
||||
let postings_opt: Option<SegmentPostings> =
|
||||
inverted_index.read_postings(&self.term, self.index_record_option);
|
||||
inverted_index.read_postings(&self.term, self.index_record_option)?;
|
||||
if let Some(segment_postings) = postings_opt {
|
||||
Ok(TermScorer::new(
|
||||
segment_postings,
|
||||
|
||||
@@ -9,8 +9,8 @@ use crate::directory::META_LOCK;
|
||||
use crate::Index;
|
||||
use crate::Searcher;
|
||||
use crate::SegmentReader;
|
||||
use std::convert::TryInto;
|
||||
use std::sync::Arc;
|
||||
use std::{convert::TryInto, io};
|
||||
|
||||
/// Defines when a new version of the index should be reloaded.
|
||||
///
|
||||
@@ -138,11 +138,11 @@ impl InnerIndexReader {
|
||||
.collect::<crate::Result<_>>()?
|
||||
};
|
||||
let schema = self.index.schema();
|
||||
let searchers = std::iter::repeat_with(|| {
|
||||
let searchers: Vec<Searcher> = std::iter::repeat_with(|| {
|
||||
Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone())
|
||||
})
|
||||
.take(self.num_searchers)
|
||||
.collect();
|
||||
.collect::<io::Result<_>>()?;
|
||||
self.searcher_pool.publish_new_generation(searchers);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crossbeam::crossbeam_channel::unbounded;
|
||||
use crossbeam::{Receiver, RecvError, Sender};
|
||||
use crossbeam::channel::unbounded;
|
||||
use crossbeam::channel::{Receiver, RecvError, Sender};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
@@ -186,6 +186,7 @@ mod tests {
|
||||
|
||||
use super::Pool;
|
||||
use super::Queue;
|
||||
use crossbeam::channel;
|
||||
use std::{iter, mem};
|
||||
|
||||
#[test]
|
||||
@@ -230,11 +231,11 @@ mod tests {
|
||||
let mut threads = vec![];
|
||||
// spawn one more thread than there are elements in the pool
|
||||
|
||||
let (start_1_send, start_1_recv) = crossbeam::bounded(0);
|
||||
let (start_2_send, start_2_recv) = crossbeam::bounded(0);
|
||||
let (start_3_send, start_3_recv) = crossbeam::bounded(0);
|
||||
let (start_1_send, start_1_recv) = channel::bounded(0);
|
||||
let (start_2_send, start_2_recv) = channel::bounded(0);
|
||||
let (start_3_send, start_3_recv) = channel::bounded(0);
|
||||
|
||||
let (event_send1, event_recv) = crossbeam::unbounded();
|
||||
let (event_send1, event_recv) = channel::unbounded();
|
||||
let event_send2 = event_send1.clone();
|
||||
let event_send3 = event_send1.clone();
|
||||
|
||||
|
||||
164
src/schema/bytes_options.rs
Normal file
164
src/schema/bytes_options.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::ops::BitOr;
|
||||
|
||||
use super::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag};
|
||||
/// Define how an a bytes field should be handled by tantivy.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct BytesOptions {
|
||||
indexed: bool,
|
||||
fast: bool,
|
||||
stored: bool,
|
||||
}
|
||||
|
||||
impl BytesOptions {
|
||||
/// Returns true iff the value is indexed.
|
||||
pub fn is_indexed(&self) -> bool {
|
||||
self.indexed
|
||||
}
|
||||
|
||||
/// Returns true iff the value is a fast field.
|
||||
pub fn is_fast(&self) -> bool {
|
||||
self.fast
|
||||
}
|
||||
|
||||
/// Returns true iff the value is stored.
|
||||
pub fn is_stored(&self) -> bool {
|
||||
self.stored
|
||||
}
|
||||
|
||||
/// Set the field as indexed.
|
||||
///
|
||||
/// Setting an integer as indexed will generate
|
||||
/// a posting list for each value taken by the integer.
|
||||
pub fn set_indexed(mut self) -> BytesOptions {
|
||||
self.indexed = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the field as a single-valued fast field.
|
||||
///
|
||||
/// Fast fields are designed for random access.
|
||||
/// Access time are similar to a random lookup in an array.
|
||||
/// If more than one value is associated to a fast field, only the last one is
|
||||
/// kept.
|
||||
pub fn set_fast(mut self) -> BytesOptions {
|
||||
self.fast = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the field as stored.
|
||||
///
|
||||
/// Only the fields that are set as *stored* are
|
||||
/// persisted into the Tantivy's store.
|
||||
pub fn set_stored(mut self) -> BytesOptions {
|
||||
self.stored = true;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BytesOptions {
|
||||
fn default() -> BytesOptions {
|
||||
BytesOptions {
|
||||
indexed: false,
|
||||
fast: false,
|
||||
stored: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Into<BytesOptions>> BitOr<T> for BytesOptions {
|
||||
type Output = BytesOptions;
|
||||
|
||||
fn bitor(self, other: T) -> BytesOptions {
|
||||
let other = other.into();
|
||||
BytesOptions {
|
||||
indexed: self.indexed | other.indexed,
|
||||
stored: self.stored | other.stored,
|
||||
fast: self.fast | other.fast,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<()> for BytesOptions {
|
||||
fn from(_: ()) -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FastFlag> for BytesOptions {
|
||||
fn from(_: FastFlag) -> Self {
|
||||
BytesOptions {
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StoredFlag> for BytesOptions {
|
||||
fn from(_: StoredFlag) -> Self {
|
||||
BytesOptions {
|
||||
indexed: false,
|
||||
stored: true,
|
||||
fast: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IndexedFlag> for BytesOptions {
|
||||
fn from(_: IndexedFlag) -> Self {
|
||||
BytesOptions {
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Head, Tail> From<SchemaFlagList<Head, Tail>> for BytesOptions
|
||||
where
|
||||
Head: Clone,
|
||||
Tail: Clone,
|
||||
Self: BitOr<Output = Self> + From<Head> + From<Tail>,
|
||||
{
|
||||
fn from(head_tail: SchemaFlagList<Head, Tail>) -> Self {
|
||||
Self::from(head_tail.head) | Self::from(head_tail.tail)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::schema::{BytesOptions, FAST, INDEXED, STORED};
|
||||
|
||||
#[test]
|
||||
fn test_bytes_option_fast_flag() {
|
||||
assert_eq!(BytesOptions::default().set_fast(), FAST.into());
|
||||
assert_eq!(BytesOptions::default().set_indexed(), INDEXED.into());
|
||||
assert_eq!(BytesOptions::default().set_stored(), STORED.into());
|
||||
}
|
||||
#[test]
|
||||
fn test_bytes_option_fast_flag_composition() {
|
||||
assert_eq!(
|
||||
BytesOptions::default().set_fast().set_stored(),
|
||||
(FAST | STORED).into()
|
||||
);
|
||||
assert_eq!(
|
||||
BytesOptions::default().set_indexed().set_fast(),
|
||||
(INDEXED | FAST).into()
|
||||
);
|
||||
assert_eq!(
|
||||
BytesOptions::default().set_stored().set_indexed(),
|
||||
(STORED | INDEXED).into()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bytes_option_fast_() {
|
||||
assert!(!BytesOptions::default().is_stored());
|
||||
assert!(!BytesOptions::default().is_fast());
|
||||
assert!(!BytesOptions::default().is_indexed());
|
||||
assert!(BytesOptions::default().set_stored().is_stored());
|
||||
assert!(BytesOptions::default().set_fast().is_fast());
|
||||
assert!(BytesOptions::default().set_indexed().is_indexed());
|
||||
}
|
||||
}
|
||||
@@ -161,20 +161,16 @@ impl Document {
|
||||
}
|
||||
|
||||
/// Returns all of the `FieldValue`s associated the given field
|
||||
pub fn get_all(&self, field: Field) -> Vec<&Value> {
|
||||
pub fn get_all(&self, field: Field) -> impl Iterator<Item = &Value> {
|
||||
self.field_values
|
||||
.iter()
|
||||
.filter(|field_value| field_value.field() == field)
|
||||
.filter(move |field_value| field_value.field() == field)
|
||||
.map(FieldValue::value)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns the first `FieldValue` associated the given field
|
||||
pub fn get_first(&self, field: Field) -> Option<&Value> {
|
||||
self.field_values
|
||||
.iter()
|
||||
.find(|field_value| field_value.field() == field)
|
||||
.map(FieldValue::value)
|
||||
self.get_all(field).next()
|
||||
}
|
||||
|
||||
/// Prepares Document for being stored in the document store
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::schema::TextOptions;
|
||||
use crate::schema::{is_valid_field_name, IntOptions};
|
||||
|
||||
use crate::schema::bytes_options::BytesOptions;
|
||||
use crate::schema::FieldType;
|
||||
use serde::de::{self, MapAccess, Visitor};
|
||||
use serde::ser::SerializeStruct;
|
||||
@@ -81,11 +82,10 @@ impl FieldEntry {
|
||||
}
|
||||
|
||||
/// Creates a field entry for a bytes field
|
||||
pub fn new_bytes(field_name: String) -> FieldEntry {
|
||||
assert!(is_valid_field_name(&field_name));
|
||||
pub fn new_bytes(field_name: String, bytes_type: BytesOptions) -> FieldEntry {
|
||||
FieldEntry {
|
||||
name: field_name,
|
||||
field_type: FieldType::Bytes,
|
||||
field_type: FieldType::Bytes(bytes_type),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,15 +108,31 @@ impl FieldEntry {
|
||||
| FieldType::F64(ref options)
|
||||
| FieldType::Date(ref options) => options.is_indexed(),
|
||||
FieldType::HierarchicalFacet => true,
|
||||
FieldType::Bytes => false,
|
||||
FieldType::Bytes(ref options) => options.is_indexed(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_fieldnorms(&self) -> bool {
|
||||
match self.field_type {
|
||||
FieldType::Str(ref options) => options
|
||||
.get_indexing_options()
|
||||
.map(|options| options.fieldnorms())
|
||||
.unwrap_or(false),
|
||||
FieldType::U64(ref options)
|
||||
| FieldType::I64(ref options)
|
||||
| FieldType::F64(ref options)
|
||||
| FieldType::Date(ref options) => options.index_option().has_fieldnorms(),
|
||||
FieldType::HierarchicalFacet => false,
|
||||
FieldType::Bytes(ref _options) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true iff the field is a int (signed or unsigned) fast field
|
||||
pub fn is_int_fast(&self) -> bool {
|
||||
pub fn is_fast(&self) -> bool {
|
||||
match self.field_type {
|
||||
FieldType::U64(ref options)
|
||||
| FieldType::I64(ref options)
|
||||
| FieldType::Date(ref options)
|
||||
| FieldType::F64(ref options) => options.is_fast(),
|
||||
_ => false,
|
||||
}
|
||||
@@ -132,7 +148,7 @@ impl FieldEntry {
|
||||
FieldType::Str(ref options) => options.is_stored(),
|
||||
// TODO make stored hierarchical facet optional
|
||||
FieldType::HierarchicalFacet => true,
|
||||
FieldType::Bytes => false,
|
||||
FieldType::Bytes(ref options) => options.is_stored(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -169,8 +185,9 @@ impl Serialize for FieldEntry {
|
||||
FieldType::HierarchicalFacet => {
|
||||
s.serialize_field("type", "hierarchical_facet")?;
|
||||
}
|
||||
FieldType::Bytes => {
|
||||
FieldType::Bytes(ref options) => {
|
||||
s.serialize_field("type", "bytes")?;
|
||||
s.serialize_field("options", options)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,15 +238,12 @@ impl<'de> Deserialize<'de> for FieldEntry {
|
||||
if ty.is_some() {
|
||||
return Err(de::Error::duplicate_field("type"));
|
||||
}
|
||||
let type_string = map.next_value()?;
|
||||
match type_string {
|
||||
let type_string = map.next_value::<String>()?;
|
||||
match type_string.as_str() {
|
||||
"hierarchical_facet" => {
|
||||
field_type = Some(FieldType::HierarchicalFacet);
|
||||
}
|
||||
"bytes" => {
|
||||
field_type = Some(FieldType::Bytes);
|
||||
}
|
||||
"text" | "u64" | "i64" | "f64" | "date" => {
|
||||
"text" | "u64" | "i64" | "f64" | "date" | "bytes" => {
|
||||
// These types require additional options to create a field_type
|
||||
}
|
||||
_ => panic!("unhandled type"),
|
||||
@@ -242,12 +256,13 @@ impl<'de> Deserialize<'de> for FieldEntry {
|
||||
specified before `options`";
|
||||
return Err(de::Error::custom(msg));
|
||||
}
|
||||
Some(ty) => match ty {
|
||||
Some(ref ty) => match ty.as_str() {
|
||||
"text" => field_type = Some(FieldType::Str(map.next_value()?)),
|
||||
"u64" => field_type = Some(FieldType::U64(map.next_value()?)),
|
||||
"i64" => field_type = Some(FieldType::I64(map.next_value()?)),
|
||||
"f64" => field_type = Some(FieldType::F64(map.next_value()?)),
|
||||
"date" => field_type = Some(FieldType::Date(map.next_value()?)),
|
||||
"bytes" => field_type = Some(FieldType::Bytes(map.next_value()?)),
|
||||
_ => {
|
||||
let msg = format!("Unrecognised type {}", ty);
|
||||
return Err(de::Error::custom(msg));
|
||||
@@ -272,7 +287,8 @@ impl<'de> Deserialize<'de> for FieldEntry {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::schema::TEXT;
|
||||
use crate::schema::{Schema, STRING, TEXT};
|
||||
use crate::Index;
|
||||
use serde_json;
|
||||
|
||||
#[test]
|
||||
@@ -291,7 +307,8 @@ mod tests {
|
||||
"options": {
|
||||
"indexing": {
|
||||
"record": "position",
|
||||
"tokenizer": "default"
|
||||
"tokenizer": "default",
|
||||
"fieldnorms": true
|
||||
},
|
||||
"stored": false
|
||||
}
|
||||
@@ -309,4 +326,19 @@ mod tests {
|
||||
_ => panic!("expected FieldType::Str"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fieldnorms() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text = schema_builder.add_text_field("text", STRING);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(text=>"abc"));
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let err = searcher.segment_reader(0u32).get_fieldnorms_reader(text);
|
||||
assert!(matches!(err, Err(crate::TantivyError::SchemaError(_))));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use base64::decode;
|
||||
|
||||
use crate::schema::bytes_options::BytesOptions;
|
||||
use crate::schema::Facet;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::schema::TextFieldIndexing;
|
||||
@@ -63,7 +62,7 @@ pub enum FieldType {
|
||||
/// Hierachical Facet
|
||||
HierarchicalFacet,
|
||||
/// Bytes (one per document)
|
||||
Bytes,
|
||||
Bytes(BytesOptions),
|
||||
}
|
||||
|
||||
impl FieldType {
|
||||
@@ -76,7 +75,7 @@ impl FieldType {
|
||||
FieldType::F64(_) => Type::F64,
|
||||
FieldType::Date(_) => Type::Date,
|
||||
FieldType::HierarchicalFacet => Type::HierarchicalFacet,
|
||||
FieldType::Bytes => Type::Bytes,
|
||||
FieldType::Bytes(_) => Type::Bytes,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,7 +88,7 @@ impl FieldType {
|
||||
| FieldType::F64(ref int_options) => int_options.is_indexed(),
|
||||
FieldType::Date(ref date_options) => date_options.is_indexed(),
|
||||
FieldType::HierarchicalFacet => true,
|
||||
FieldType::Bytes => false,
|
||||
FieldType::Bytes(ref bytes_options) => bytes_options.is_indexed(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,7 +112,13 @@ impl FieldType {
|
||||
}
|
||||
}
|
||||
FieldType::HierarchicalFacet => Some(IndexRecordOption::Basic),
|
||||
FieldType::Bytes => None,
|
||||
FieldType::Bytes(ref bytes_options) => {
|
||||
if bytes_options.is_indexed() {
|
||||
Some(IndexRecordOption::Basic)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -140,7 +145,7 @@ impl FieldType {
|
||||
ValueParsingError::TypeError(format!("Expected an integer, got {:?}", json)),
|
||||
),
|
||||
FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))),
|
||||
FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| {
|
||||
FieldType::Bytes(_) => base64::decode(field_text).map(Value::Bytes).map_err(|_| {
|
||||
ValueParsingError::InvalidBase64(format!(
|
||||
"Expected base64 string, got {:?}",
|
||||
field_text
|
||||
@@ -172,7 +177,7 @@ impl FieldType {
|
||||
Err(ValueParsingError::OverflowError(msg))
|
||||
}
|
||||
}
|
||||
FieldType::Str(_) | FieldType::HierarchicalFacet | FieldType::Bytes => {
|
||||
FieldType::Str(_) | FieldType::HierarchicalFacet | FieldType::Bytes(_) => {
|
||||
let msg = format!("Expected a string, got {:?}", json);
|
||||
Err(ValueParsingError::TypeError(msg))
|
||||
}
|
||||
@@ -248,18 +253,18 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_bytes_value_from_json() {
|
||||
let result = FieldType::Bytes
|
||||
let result = FieldType::Bytes(Default::default())
|
||||
.value_from_json(&json!("dGhpcyBpcyBhIHRlc3Q="))
|
||||
.unwrap();
|
||||
assert_eq!(result, Value::Bytes("this is a test".as_bytes().to_vec()));
|
||||
|
||||
let result = FieldType::Bytes.value_from_json(&json!(521));
|
||||
let result = FieldType::Bytes(Default::default()).value_from_json(&json!(521));
|
||||
match result {
|
||||
Err(ValueParsingError::TypeError(_)) => {}
|
||||
_ => panic!("Expected parse failure for wrong type"),
|
||||
}
|
||||
|
||||
let result = FieldType::Bytes.value_from_json(&json!("-"));
|
||||
let result = FieldType::Bytes(Default::default()).value_from_json(&json!("-"));
|
||||
match result {
|
||||
Err(ValueParsingError::InvalidBase64(_)) => {}
|
||||
_ => panic!("Expected parse failure for invalid base64"),
|
||||
|
||||
@@ -8,7 +8,7 @@ pub struct StoredFlag;
|
||||
/// This flag can apply to any kind of field.
|
||||
///
|
||||
/// A stored fields of a document can be retrieved given its `DocId`.
|
||||
/// Stored field are stored together and LZ4 compressed.
|
||||
/// Stored field are stored together and compressed.
|
||||
/// Reading the stored fields of a document is relatively slow.
|
||||
/// (~ 100 microsecs)
|
||||
///
|
||||
|
||||
@@ -14,10 +14,50 @@ pub enum Cardinality {
|
||||
MultiValues,
|
||||
}
|
||||
|
||||
/// Define how an int field should be handled by tantivy.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum IntOptionIndex {
|
||||
#[serde(rename = "no_index")]
|
||||
NoIndex,
|
||||
#[serde(rename = "index_no_fieldnorms")]
|
||||
IndexNoFieldnorms,
|
||||
#[serde(rename = "index_with_fieldnorms")]
|
||||
IndexWithFieldnorms,
|
||||
}
|
||||
|
||||
impl BitOr<IntOptionIndex> for IntOptionIndex {
|
||||
type Output = IntOptionIndex;
|
||||
|
||||
fn bitor(self, other: IntOptionIndex) -> IntOptionIndex {
|
||||
match (self, other) {
|
||||
(_, Self::IndexWithFieldnorms) | (Self::IndexWithFieldnorms, _) => {
|
||||
Self::IndexWithFieldnorms
|
||||
}
|
||||
(_, Self::IndexNoFieldnorms) | (Self::IndexNoFieldnorms, _) => Self::IndexNoFieldnorms,
|
||||
(Self::NoIndex, Self::NoIndex) => Self::NoIndex,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IntOptionIndex {
|
||||
pub fn is_indexed(&self) -> bool {
|
||||
match *self {
|
||||
Self::NoIndex => false,
|
||||
Self::IndexNoFieldnorms | Self::IndexWithFieldnorms => true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_fieldnorms(&self) -> bool {
|
||||
match *self {
|
||||
Self::NoIndex | Self::IndexNoFieldnorms => false,
|
||||
Self::IndexWithFieldnorms => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Define how an u64, i64, of f64 field should be handled by tantivy.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct IntOptions {
|
||||
indexed: bool,
|
||||
indexed: IntOptionIndex,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
fast: Option<Cardinality>,
|
||||
stored: bool,
|
||||
@@ -31,7 +71,7 @@ impl IntOptions {
|
||||
|
||||
/// Returns true iff the value is indexed.
|
||||
pub fn is_indexed(&self) -> bool {
|
||||
self.indexed
|
||||
self.indexed.is_indexed()
|
||||
}
|
||||
|
||||
/// Returns true iff the value is a fast field.
|
||||
@@ -39,7 +79,7 @@ impl IntOptions {
|
||||
self.fast.is_some()
|
||||
}
|
||||
|
||||
/// Set the u64 options as stored.
|
||||
/// Set the field as stored.
|
||||
///
|
||||
/// Only the fields that are set as *stored* are
|
||||
/// persisted into the Tantivy's store.
|
||||
@@ -48,16 +88,25 @@ impl IntOptions {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the u64 options as indexed.
|
||||
///
|
||||
/// Setting an integer as indexed will generate
|
||||
/// a posting list for each value taken by the integer.
|
||||
pub fn index_option(&self) -> &IntOptionIndex {
|
||||
&self.indexed
|
||||
}
|
||||
|
||||
pub fn set_indexed(mut self) -> IntOptions {
|
||||
self.indexed = true;
|
||||
self.indexed = IntOptionIndex::IndexWithFieldnorms;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the u64 options as a single-valued fast field.
|
||||
/// Set the field as indexed.
|
||||
///
|
||||
/// Setting an integer as indexed will generate
|
||||
/// a posting list for each value taken by the integer.
|
||||
pub fn set_index_option(mut self, int_option_index: IntOptionIndex) -> IntOptions {
|
||||
self.indexed = int_option_index;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the field as a single-valued fast field.
|
||||
///
|
||||
/// Fast fields are designed for random access.
|
||||
/// Access time are similar to a random lookup in an array.
|
||||
@@ -80,7 +129,7 @@ impl IntOptions {
|
||||
impl Default for IntOptions {
|
||||
fn default() -> IntOptions {
|
||||
IntOptions {
|
||||
indexed: false,
|
||||
indexed: IntOptionIndex::NoIndex,
|
||||
stored: false,
|
||||
fast: None,
|
||||
}
|
||||
@@ -96,7 +145,7 @@ impl From<()> for IntOptions {
|
||||
impl From<FastFlag> for IntOptions {
|
||||
fn from(_: FastFlag) -> Self {
|
||||
IntOptions {
|
||||
indexed: false,
|
||||
indexed: IntOptionIndex::NoIndex,
|
||||
stored: false,
|
||||
fast: Some(Cardinality::SingleValue),
|
||||
}
|
||||
@@ -106,7 +155,7 @@ impl From<FastFlag> for IntOptions {
|
||||
impl From<StoredFlag> for IntOptions {
|
||||
fn from(_: StoredFlag) -> Self {
|
||||
IntOptions {
|
||||
indexed: false,
|
||||
indexed: IntOptionIndex::NoIndex,
|
||||
stored: true,
|
||||
fast: None,
|
||||
}
|
||||
@@ -116,7 +165,7 @@ impl From<StoredFlag> for IntOptions {
|
||||
impl From<IndexedFlag> for IntOptions {
|
||||
fn from(_: IndexedFlag) -> Self {
|
||||
IntOptions {
|
||||
indexed: true,
|
||||
indexed: IntOptionIndex::IndexWithFieldnorms,
|
||||
stored: false,
|
||||
fast: None,
|
||||
}
|
||||
@@ -127,12 +176,12 @@ impl<T: Into<IntOptions>> BitOr<T> for IntOptions {
|
||||
type Output = IntOptions;
|
||||
|
||||
fn bitor(self, other: T) -> IntOptions {
|
||||
let mut res = IntOptions::default();
|
||||
let other = other.into();
|
||||
res.indexed = self.indexed | other.indexed;
|
||||
res.stored = self.stored | other.stored;
|
||||
res.fast = self.fast.or(other.fast);
|
||||
res
|
||||
IntOptions {
|
||||
indexed: self.indexed | other.indexed,
|
||||
stored: self.stored | other.stored,
|
||||
fast: self.fast.or(other.fast),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -111,6 +111,7 @@ mod field_entry;
|
||||
mod field_type;
|
||||
mod field_value;
|
||||
|
||||
mod bytes_options;
|
||||
mod field;
|
||||
mod index_record_option;
|
||||
mod int_options;
|
||||
@@ -142,6 +143,7 @@ pub use self::text_options::TextOptions;
|
||||
pub use self::text_options::STRING;
|
||||
pub use self::text_options::TEXT;
|
||||
|
||||
pub use self::bytes_options::BytesOptions;
|
||||
pub use self::flags::{FAST, INDEXED, STORED};
|
||||
pub use self::int_options::Cardinality;
|
||||
pub use self::int_options::IntOptions;
|
||||
|
||||
@@ -4,6 +4,7 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::schema::bytes_options::BytesOptions;
|
||||
use serde::de::{SeqAccess, Visitor};
|
||||
use serde::ser::SerializeSeq;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
@@ -160,8 +161,12 @@ impl SchemaBuilder {
|
||||
/// some document features at scoring time.
|
||||
/// These can be serializing and stored as a bytes field to
|
||||
/// get access rapidly when scoring each document.
|
||||
pub fn add_bytes_field(&mut self, field_name: &str) -> Field {
|
||||
let field_entry = FieldEntry::new_bytes(field_name.to_string());
|
||||
pub fn add_bytes_field<T: Into<BytesOptions>>(
|
||||
&mut self,
|
||||
field_name: &str,
|
||||
field_options: T,
|
||||
) -> Field {
|
||||
let field_entry = FieldEntry::new_bytes(field_name.to_string(), field_options.into());
|
||||
self.add_field(field_entry)
|
||||
}
|
||||
|
||||
@@ -226,6 +231,10 @@ impl Schema {
|
||||
&self.0.fields[field.field_id() as usize]
|
||||
}
|
||||
|
||||
pub fn num_fields(&self) -> usize {
|
||||
self.0.fields.len()
|
||||
}
|
||||
|
||||
/// Return the field name for a given `Field`.
|
||||
pub fn get_field_name(&self, field: Field) -> &str {
|
||||
self.get_field_entry(field).name()
|
||||
@@ -381,19 +390,16 @@ impl<'de> Deserialize<'de> for Schema {
|
||||
|
||||
/// Error that may happen when deserializing
|
||||
/// a document from JSON.
|
||||
#[derive(Debug, Fail, PartialEq)]
|
||||
#[derive(Debug, Error, PartialEq)]
|
||||
pub enum DocParsingError {
|
||||
/// The payload given is not valid JSON.
|
||||
#[fail(display = "The provided string is not valid JSON")]
|
||||
#[error("The provided string is not valid JSON")]
|
||||
NotJSON(String),
|
||||
/// One of the value node could not be parsed.
|
||||
#[fail(display = "The field '{:?}' could not be parsed: {:?}", _0, _1)]
|
||||
#[error("The field '{0:?}' could not be parsed: {1:?}")]
|
||||
ValueError(String, ValueParsingError),
|
||||
/// The json-document contains a field that is not declared in the schema.
|
||||
#[fail(
|
||||
display = "The document contains a field that is not declared in the schema: {:?}",
|
||||
_0
|
||||
)]
|
||||
#[error("The document contains a field that is not declared in the schema: {0:?}")]
|
||||
NoSuchFieldInSchema(String),
|
||||
}
|
||||
|
||||
@@ -442,7 +448,8 @@ mod tests {
|
||||
"options": {
|
||||
"indexing": {
|
||||
"record": "position",
|
||||
"tokenizer": "default"
|
||||
"tokenizer": "default",
|
||||
"fieldnorms": true
|
||||
},
|
||||
"stored": false
|
||||
}
|
||||
@@ -453,7 +460,8 @@ mod tests {
|
||||
"options": {
|
||||
"indexing": {
|
||||
"record": "basic",
|
||||
"tokenizer": "raw"
|
||||
"tokenizer": "raw",
|
||||
"fieldnorms": false
|
||||
},
|
||||
"stored": false
|
||||
}
|
||||
@@ -462,7 +470,7 @@ mod tests {
|
||||
"name": "count",
|
||||
"type": "u64",
|
||||
"options": {
|
||||
"indexed": false,
|
||||
"indexed": "no_index",
|
||||
"fast": "single",
|
||||
"stored": true
|
||||
}
|
||||
@@ -471,7 +479,7 @@ mod tests {
|
||||
"name": "popularity",
|
||||
"type": "i64",
|
||||
"options": {
|
||||
"indexed": false,
|
||||
"indexed": "no_index",
|
||||
"fast": "single",
|
||||
"stored": true
|
||||
}
|
||||
@@ -480,7 +488,7 @@ mod tests {
|
||||
"name": "score",
|
||||
"type": "f64",
|
||||
"options": {
|
||||
"indexed": true,
|
||||
"indexed": "index_with_fieldnorms",
|
||||
"fast": "single",
|
||||
"stored": false
|
||||
}
|
||||
@@ -559,14 +567,14 @@ mod tests {
|
||||
.convert_named_doc(NamedFieldDocument(named_doc_map))
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
doc.get_all(title),
|
||||
doc.get_all(title).collect::<Vec<_>>(),
|
||||
vec![
|
||||
&Value::from("title1".to_string()),
|
||||
&Value::from("title2".to_string())
|
||||
]
|
||||
);
|
||||
assert_eq!(
|
||||
doc.get_all(val),
|
||||
doc.get_all(val).collect::<Vec<_>>(),
|
||||
vec![&Value::from(14u64), &Value::from(-1i64)]
|
||||
);
|
||||
}
|
||||
@@ -627,9 +635,15 @@ mod tests {
|
||||
doc.get_first(author_field).unwrap().text(),
|
||||
Some("fulmicoton")
|
||||
);
|
||||
assert_eq!(doc.get_first(count_field).unwrap().u64_value(), 4);
|
||||
assert_eq!(doc.get_first(popularity_field).unwrap().i64_value(), 10);
|
||||
assert_eq!(doc.get_first(score_field).unwrap().f64_value(), 80.5);
|
||||
assert_eq!(doc.get_first(count_field).unwrap().u64_value(), Some(4));
|
||||
assert_eq!(
|
||||
doc.get_first(popularity_field).unwrap().i64_value(),
|
||||
Some(10)
|
||||
);
|
||||
assert_eq!(
|
||||
doc.get_first(score_field).unwrap().f64_value(),
|
||||
Some(80.5f64)
|
||||
);
|
||||
}
|
||||
{
|
||||
let json_err = schema.parse_document(
|
||||
@@ -739,7 +753,8 @@ mod tests {
|
||||
"options": {
|
||||
"indexing": {
|
||||
"record": "position",
|
||||
"tokenizer": "default"
|
||||
"tokenizer": "default",
|
||||
"fieldnorms": true
|
||||
},
|
||||
"stored": false
|
||||
}
|
||||
@@ -748,7 +763,7 @@ mod tests {
|
||||
"name": "popularity",
|
||||
"type": "i64",
|
||||
"options": {
|
||||
"indexed": false,
|
||||
"indexed": "no_index",
|
||||
"fast": "single",
|
||||
"stored": true
|
||||
}
|
||||
@@ -769,7 +784,8 @@ mod tests {
|
||||
"options": {
|
||||
"indexing": {
|
||||
"record": "basic",
|
||||
"tokenizer": "raw"
|
||||
"tokenizer": "raw",
|
||||
"fieldnorms": false
|
||||
},
|
||||
"stored": true
|
||||
}
|
||||
@@ -778,7 +794,7 @@ mod tests {
|
||||
"name": "_timestamp",
|
||||
"type": "date",
|
||||
"options": {
|
||||
"indexed": true,
|
||||
"indexed": "index_with_fieldnorms",
|
||||
"fast": "single",
|
||||
"stored": true
|
||||
}
|
||||
@@ -789,7 +805,8 @@ mod tests {
|
||||
"options": {
|
||||
"indexing": {
|
||||
"record": "position",
|
||||
"tokenizer": "default"
|
||||
"tokenizer": "default",
|
||||
"fieldnorms": true
|
||||
},
|
||||
"stored": false
|
||||
}
|
||||
@@ -798,7 +815,7 @@ mod tests {
|
||||
"name": "popularity",
|
||||
"type": "i64",
|
||||
"options": {
|
||||
"indexed": false,
|
||||
"indexed": "no_index",
|
||||
"fast": "single",
|
||||
"stored": true
|
||||
}
|
||||
|
||||
@@ -96,7 +96,8 @@ impl Term {
|
||||
term
|
||||
}
|
||||
|
||||
pub(crate) fn from_field_bytes(field: Field, bytes: &[u8]) -> Term {
|
||||
/// Builds a term bytes.
|
||||
pub fn from_field_bytes(field: Field, bytes: &[u8]) -> Term {
|
||||
let mut term = Term::for_field(field);
|
||||
term.set_bytes(bytes);
|
||||
term
|
||||
@@ -112,7 +113,7 @@ impl Term {
|
||||
pub(crate) fn set_field(&mut self, field: Field) {
|
||||
self.0.clear();
|
||||
self.0
|
||||
.extend_from_slice(&field.field_id().to_be_bytes()[..]);
|
||||
.extend_from_slice(field.field_id().to_be_bytes().as_ref());
|
||||
}
|
||||
|
||||
/// Sets a u64 value in the term.
|
||||
@@ -123,7 +124,7 @@ impl Term {
|
||||
/// the natural order of the values.
|
||||
pub fn set_u64(&mut self, val: u64) {
|
||||
self.0.resize(INT_TERM_LEN, 0u8);
|
||||
self.0[4..12].copy_from_slice(val.to_be_bytes().as_ref());
|
||||
self.set_bytes(val.to_be_bytes().as_ref());
|
||||
}
|
||||
|
||||
/// Sets a `i64` value in the term.
|
||||
@@ -136,7 +137,8 @@ impl Term {
|
||||
self.set_u64(common::f64_to_u64(val));
|
||||
}
|
||||
|
||||
fn set_bytes(&mut self, bytes: &[u8]) {
|
||||
/// Sets the value of a `Bytes` field.
|
||||
pub fn set_bytes(&mut self, bytes: &[u8]) {
|
||||
self.0.resize(4, 0u8);
|
||||
self.0.extend(bytes);
|
||||
}
|
||||
@@ -151,7 +153,7 @@ impl<B> Term<B>
|
||||
where
|
||||
B: AsRef<[u8]>,
|
||||
{
|
||||
/// Wraps a source of data
|
||||
/// Wraps a object holding bytes
|
||||
pub fn wrap(data: B) -> Term<B> {
|
||||
Term(data)
|
||||
}
|
||||
|
||||
@@ -55,6 +55,7 @@ impl Default for TextOptions {
|
||||
pub struct TextFieldIndexing {
|
||||
record: IndexRecordOption,
|
||||
tokenizer: Cow<'static, str>,
|
||||
fieldnorms: bool,
|
||||
}
|
||||
|
||||
impl Default for TextFieldIndexing {
|
||||
@@ -62,6 +63,7 @@ impl Default for TextFieldIndexing {
|
||||
TextFieldIndexing {
|
||||
tokenizer: Cow::Borrowed("default"),
|
||||
record: IndexRecordOption::Basic,
|
||||
fieldnorms: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -78,6 +80,15 @@ impl TextFieldIndexing {
|
||||
&self.tokenizer
|
||||
}
|
||||
|
||||
pub fn set_fieldnorms(mut self, fieldnorms: bool) -> TextFieldIndexing {
|
||||
self.fieldnorms = fieldnorms;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn fieldnorms(&self) -> bool {
|
||||
self.fieldnorms
|
||||
}
|
||||
|
||||
/// Sets which information should be indexed with the tokens.
|
||||
///
|
||||
/// See [IndexRecordOption](./enum.IndexRecordOption.html) for more detail.
|
||||
@@ -99,6 +110,7 @@ pub const STRING: TextOptions = TextOptions {
|
||||
indexing: Some(TextFieldIndexing {
|
||||
tokenizer: Cow::Borrowed("raw"),
|
||||
record: IndexRecordOption::Basic,
|
||||
fieldnorms: false,
|
||||
}),
|
||||
stored: false,
|
||||
};
|
||||
@@ -108,6 +120,7 @@ pub const TEXT: TextOptions = TextOptions {
|
||||
indexing: Some(TextFieldIndexing {
|
||||
tokenizer: Cow::Borrowed("default"),
|
||||
record: IndexRecordOption::WithFreqsAndPositions,
|
||||
fieldnorms: true,
|
||||
}),
|
||||
stored: false,
|
||||
};
|
||||
@@ -117,10 +130,10 @@ impl<T: Into<TextOptions>> BitOr<T> for TextOptions {
|
||||
|
||||
fn bitor(self, other: T) -> TextOptions {
|
||||
let other = other.into();
|
||||
let mut res = TextOptions::default();
|
||||
res.indexing = self.indexing.or(other.indexing);
|
||||
res.stored = self.stored | other.stored;
|
||||
res
|
||||
TextOptions {
|
||||
indexing: self.indexing.or(other.indexing),
|
||||
stored: self.stored | other.stored,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::{cmp::Ordering, fmt};
|
||||
|
||||
/// Value represents the value of a any field.
|
||||
/// It is an enum over all over all of the possible field type.
|
||||
#[derive(Debug, Clone, PartialEq, PartialOrd)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum Value {
|
||||
/// The str type is used for any text information.
|
||||
Str(String),
|
||||
@@ -28,6 +28,11 @@ pub enum Value {
|
||||
}
|
||||
|
||||
impl Eq for Value {}
|
||||
impl PartialOrd for Value {
|
||||
fn partial_cmp(&self, other: &Value) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
impl Ord for Value {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match (self, other) {
|
||||
@@ -125,62 +130,76 @@ impl Value {
|
||||
/// Returns the text value, provided the value is of the `Str` type.
|
||||
/// (Returns None if the value is not of the `Str` type).
|
||||
pub fn text(&self) -> Option<&str> {
|
||||
match *self {
|
||||
Value::Str(ref text) => Some(text),
|
||||
_ => None,
|
||||
if let Value::Str(text) = self {
|
||||
Some(text)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the tokenized text, provided the value is of the `PreTokStr` type.
|
||||
/// (Returns None if the value is not of the `PreTokStr` type).
|
||||
///
|
||||
/// Returns None if the value is not of the `PreTokStr` type.
|
||||
pub fn tokenized_text(&self) -> Option<&PreTokenizedString> {
|
||||
match *self {
|
||||
Value::PreTokStr(ref tok_text) => Some(tok_text),
|
||||
_ => None,
|
||||
if let Value::PreTokStr(tokenized_text) = self {
|
||||
Some(tokenized_text)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the u64-value, provided the value is of the `U64` type.
|
||||
///
|
||||
/// # Panics
|
||||
/// If the value is not of type `U64`
|
||||
pub fn u64_value(&self) -> u64 {
|
||||
match *self {
|
||||
Value::U64(ref value) => *value,
|
||||
_ => panic!("This is not a u64 field."),
|
||||
/// Returns None if the value is not of the `U64` type.
|
||||
pub fn u64_value(&self) -> Option<u64> {
|
||||
if let Value::U64(val) = self {
|
||||
Some(*val)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the i64-value, provided the value is of the `I64` type.
|
||||
///
|
||||
/// # Panics
|
||||
/// If the value is not of type `I64`
|
||||
pub fn i64_value(&self) -> i64 {
|
||||
match *self {
|
||||
Value::I64(ref value) => *value,
|
||||
_ => panic!("This is not a i64 field."),
|
||||
/// Return None if the value is not of type `I64`.
|
||||
pub fn i64_value(&self) -> Option<i64> {
|
||||
if let Value::I64(val) = self {
|
||||
Some(*val)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the f64-value, provided the value is of the `F64` type.
|
||||
///
|
||||
/// # Panics
|
||||
/// If the value is not of type `F64`
|
||||
pub fn f64_value(&self) -> f64 {
|
||||
match *self {
|
||||
Value::F64(ref value) => *value,
|
||||
_ => panic!("This is not a f64 field."),
|
||||
/// Return None if the value is not of type `F64`.
|
||||
pub fn f64_value(&self) -> Option<f64> {
|
||||
if let Value::F64(value) = self {
|
||||
Some(*value)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the Date-value, provided the value is of the `Date` type.
|
||||
///
|
||||
/// # Panics
|
||||
/// If the value is not of type `Date`
|
||||
pub fn date_value(&self) -> &DateTime {
|
||||
match *self {
|
||||
Value::Date(ref value) => value,
|
||||
_ => panic!("This is not a date field."),
|
||||
/// Returns None if the value is not of type `Date`.
|
||||
pub fn date_value(&self) -> Option<&DateTime> {
|
||||
if let Value::Date(date) = self {
|
||||
Some(date)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the Bytes-value, provided the value is of the `Bytes` type.
|
||||
///
|
||||
/// Returns None if the value is not of type `Bytes`.
|
||||
pub fn bytes_value(&self) -> Option<&[u8]> {
|
||||
if let Value::Bytes(bytes) = self {
|
||||
Some(bytes)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,19 +263,17 @@ impl SnippetGenerator {
|
||||
) -> crate::Result<SnippetGenerator> {
|
||||
let mut terms = BTreeSet::new();
|
||||
query.query_terms(&mut terms);
|
||||
let terms_text: BTreeMap<String, Score> = terms
|
||||
.into_iter()
|
||||
.filter(|term| term.field() == field)
|
||||
.flat_map(|term| {
|
||||
let doc_freq = searcher.doc_freq(&term);
|
||||
let mut terms_text: BTreeMap<String, Score> = Default::default();
|
||||
for term in terms {
|
||||
if term.field() != field {
|
||||
continue;
|
||||
}
|
||||
let doc_freq = searcher.doc_freq(&term)?;
|
||||
if doc_freq > 0 {
|
||||
let score = 1.0 / (1.0 + doc_freq as Score);
|
||||
if doc_freq > 0 {
|
||||
Some((term.text().to_string(), score))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
terms_text.insert(term.text().to_string(), score);
|
||||
}
|
||||
}
|
||||
let tokenizer = searcher.index().tokenizer_for_field(field)?;
|
||||
Ok(SnippetGenerator {
|
||||
terms_text,
|
||||
@@ -302,7 +300,6 @@ impl SnippetGenerator {
|
||||
pub fn snippet_from_doc(&self, doc: &Document) -> Snippet {
|
||||
let text: String = doc
|
||||
.get_all(self.field)
|
||||
.into_iter()
|
||||
.flat_map(Value::text)
|
||||
.collect::<Vec<&str>>()
|
||||
.join(" ");
|
||||
|
||||
@@ -307,7 +307,7 @@ mod test {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let searcher_space_usage = searcher.space_usage();
|
||||
let searcher_space_usage = searcher.space_usage().unwrap();
|
||||
assert_eq!(0, searcher_space_usage.total());
|
||||
}
|
||||
|
||||
@@ -346,7 +346,7 @@ mod test {
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let searcher_space_usage = searcher.space_usage();
|
||||
let searcher_space_usage = searcher.space_usage().unwrap();
|
||||
assert!(searcher_space_usage.total() > 0);
|
||||
assert_eq!(1, searcher_space_usage.segments().len());
|
||||
|
||||
@@ -386,7 +386,7 @@ mod test {
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let searcher_space_usage = searcher.space_usage();
|
||||
let searcher_space_usage = searcher.space_usage().unwrap();
|
||||
assert!(searcher_space_usage.total() > 0);
|
||||
assert_eq!(1, searcher_space_usage.segments().len());
|
||||
|
||||
@@ -425,7 +425,7 @@ mod test {
|
||||
}
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let searcher_space_usage = searcher.space_usage();
|
||||
let searcher_space_usage = searcher.space_usage().unwrap();
|
||||
assert!(searcher_space_usage.total() > 0);
|
||||
assert_eq!(1, searcher_space_usage.segments().len());
|
||||
|
||||
@@ -446,49 +446,47 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deletes() {
|
||||
fn test_deletes() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let name = schema_builder.add_u64_field("name", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(name => 1u64));
|
||||
index_writer.add_document(doc!(name => 2u64));
|
||||
index_writer.add_document(doc!(name => 3u64));
|
||||
index_writer.add_document(doc!(name => 4u64));
|
||||
index_writer.commit().unwrap();
|
||||
index_writer.commit()?;
|
||||
}
|
||||
|
||||
{
|
||||
let mut index_writer2 = index.writer(50_000_000).unwrap();
|
||||
let mut index_writer2 = index.writer(50_000_000)?;
|
||||
index_writer2.delete_term(Term::from_field_u64(name, 2u64));
|
||||
index_writer2.delete_term(Term::from_field_u64(name, 3u64));
|
||||
|
||||
// ok, now we should have a deleted doc
|
||||
index_writer2.commit().unwrap();
|
||||
index_writer2.commit()?;
|
||||
}
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let searcher_space_usage = searcher.space_usage();
|
||||
let searcher_space_usage = searcher.space_usage()?;
|
||||
assert!(searcher_space_usage.total() > 0);
|
||||
assert_eq!(1, searcher_space_usage.segments().len());
|
||||
|
||||
let segment = &searcher_space_usage.segments()[0];
|
||||
assert!(segment.total() > 0);
|
||||
let segment_space_usage = &searcher_space_usage.segments()[0];
|
||||
assert!(segment_space_usage.total() > 0);
|
||||
|
||||
assert_eq!(2, segment.num_docs());
|
||||
assert_eq!(2, segment_space_usage.num_docs());
|
||||
|
||||
expect_single_field(segment.termdict(), &name, 1, 512);
|
||||
expect_single_field(segment.postings(), &name, 1, 512);
|
||||
assert_eq!(0, segment.positions().total());
|
||||
assert_eq!(0, segment.positions_skip_idx().total());
|
||||
assert_eq!(0, segment.fast_fields().total());
|
||||
expect_single_field(segment.fieldnorms(), &name, 1, 512);
|
||||
// TODO: understand why the following fails
|
||||
// assert_eq!(0, segment.store().total());
|
||||
assert!(segment.deletes() > 0);
|
||||
expect_single_field(segment_space_usage.termdict(), &name, 1, 512);
|
||||
expect_single_field(segment_space_usage.postings(), &name, 1, 512);
|
||||
assert_eq!(0, segment_space_usage.positions().total());
|
||||
assert_eq!(0, segment_space_usage.positions_skip_idx().total());
|
||||
assert_eq!(0, segment_space_usage.fast_fields().total());
|
||||
expect_single_field(segment_space_usage.fieldnorms(), &name, 1, 512);
|
||||
assert!(segment_space_usage.deletes() > 0);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
20
src/store/compression_brotli.rs
Normal file
20
src/store/compression_brotli.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use std::io;
|
||||
|
||||
/// Name of the compression scheme used in the doc store.
|
||||
///
|
||||
/// This name is appended to the version string of tantivy.
|
||||
pub const COMPRESSION: &'static str = "brotli";
|
||||
|
||||
pub fn compress(mut uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
|
||||
let mut params = brotli::enc::BrotliEncoderParams::default();
|
||||
params.quality = 5;
|
||||
compressed.clear();
|
||||
brotli::BrotliCompress(&mut uncompressed, compressed, ¶ms)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decompress(mut compressed: &[u8], decompressed: &mut Vec<u8>) -> io::Result<()> {
|
||||
decompressed.clear();
|
||||
brotli::BrotliDecompress(&mut compressed, decompressed)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -5,7 +5,7 @@ A field needs to be marked as stored in the schema in
|
||||
order to be handled in the `Store`.
|
||||
|
||||
Internally, documents (or rather their stored fields) are serialized to a buffer.
|
||||
When the buffer exceeds 16K, the buffer is compressed using `LZ4`
|
||||
When the buffer exceeds 16K, the buffer is compressed using `brotli`, `LZ4` or `snappy`
|
||||
and the resulting block is written to disk.
|
||||
|
||||
One can then request for a specific `DocId`.
|
||||
@@ -39,6 +39,9 @@ mod writer;
|
||||
pub use self::reader::StoreReader;
|
||||
pub use self::writer::StoreWriter;
|
||||
|
||||
#[cfg(all(feature = "lz4", feature = "brotli"))]
|
||||
compile_error!("feature `lz4` or `brotli` must not be enabled together.");
|
||||
|
||||
#[cfg(feature = "lz4")]
|
||||
mod compression_lz4;
|
||||
#[cfg(feature = "lz4")]
|
||||
@@ -46,11 +49,18 @@ pub use self::compression_lz4::COMPRESSION;
|
||||
#[cfg(feature = "lz4")]
|
||||
use self::compression_lz4::{compress, decompress};
|
||||
|
||||
#[cfg(not(feature = "lz4"))]
|
||||
#[cfg(feature = "brotli")]
|
||||
mod compression_brotli;
|
||||
#[cfg(feature = "brotli")]
|
||||
pub use self::compression_brotli::COMPRESSION;
|
||||
#[cfg(feature = "brotli")]
|
||||
use self::compression_brotli::{compress, decompress};
|
||||
|
||||
#[cfg(not(any(feature = "lz4", feature = "brotli")))]
|
||||
mod compression_snap;
|
||||
#[cfg(not(feature = "lz4"))]
|
||||
#[cfg(not(any(feature = "lz4", feature = "brotli")))]
|
||||
pub use self::compression_snap::COMPRESSION;
|
||||
#[cfg(not(feature = "lz4"))]
|
||||
#[cfg(not(any(feature = "lz4", feature = "brotli")))]
|
||||
use self::compression_snap::{compress, decompress};
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -103,19 +113,18 @@ pub mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store() {
|
||||
fn test_store() -> crate::Result<()> {
|
||||
let path = Path::new("store");
|
||||
let mut directory = RAMDirectory::create();
|
||||
let store_file = directory.open_write(path).unwrap();
|
||||
let schema = write_lorem_ipsum_store(store_file, 1_000);
|
||||
let directory = RAMDirectory::create();
|
||||
let store_wrt = directory.open_write(path)?;
|
||||
let schema = write_lorem_ipsum_store(store_wrt, 1_000);
|
||||
let field_title = schema.get_field("title").unwrap();
|
||||
let store_source = directory.open_read(path).unwrap();
|
||||
let store = StoreReader::from_source(store_source);
|
||||
let store_file = directory.open_read(path)?;
|
||||
let store = StoreReader::open(store_file)?;
|
||||
for i in 0..1_000 {
|
||||
assert_eq!(
|
||||
*store
|
||||
.get(i)
|
||||
.unwrap()
|
||||
.get(i)?
|
||||
.get_first(field_title)
|
||||
.unwrap()
|
||||
.text()
|
||||
@@ -123,6 +132,7 @@ pub mod tests {
|
||||
format!("Doc {}", i)
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,7 +149,7 @@ mod bench {
|
||||
#[bench]
|
||||
#[cfg(feature = "mmap")]
|
||||
fn bench_store_encode(b: &mut Bencher) {
|
||||
let mut directory = RAMDirectory::create();
|
||||
let directory = RAMDirectory::create();
|
||||
let path = Path::new("store");
|
||||
b.iter(|| {
|
||||
write_lorem_ipsum_store(directory.open_write(path).unwrap(), 1_000);
|
||||
@@ -149,11 +159,11 @@ mod bench {
|
||||
|
||||
#[bench]
|
||||
fn bench_store_decode(b: &mut Bencher) {
|
||||
let mut directory = RAMDirectory::create();
|
||||
let directory = RAMDirectory::create();
|
||||
let path = Path::new("store");
|
||||
write_lorem_ipsum_store(directory.open_write(path).unwrap(), 1_000);
|
||||
let store_source = directory.open_read(path).unwrap();
|
||||
let store = StoreReader::from_source(store_source);
|
||||
let store_file = directory.open_read(path).unwrap();
|
||||
let store = StoreReader::open(store_file).unwrap();
|
||||
b.iter(|| {
|
||||
store.get(12).unwrap();
|
||||
});
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use super::decompress;
|
||||
use super::skiplist::SkipList;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::VInt;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::common::{BinarySerializable, HasLen};
|
||||
use crate::directory::{FileSlice, OwnedBytes};
|
||||
use crate::schema::Document;
|
||||
use crate::space_usage::StoreSpaceUsage;
|
||||
use crate::DocId;
|
||||
@@ -13,8 +13,8 @@ use std::mem::size_of;
|
||||
/// Reads document off tantivy's [`Store`](./index.html)
|
||||
#[derive(Clone)]
|
||||
pub struct StoreReader {
|
||||
data: ReadOnlySource,
|
||||
offset_index_source: ReadOnlySource,
|
||||
data: FileSlice,
|
||||
offset_index_file: OwnedBytes,
|
||||
current_block_offset: RefCell<usize>,
|
||||
current_block: RefCell<Vec<u8>>,
|
||||
max_doc: DocId,
|
||||
@@ -22,19 +22,20 @@ pub struct StoreReader {
|
||||
|
||||
impl StoreReader {
|
||||
/// Opens a store reader
|
||||
pub fn from_source(data: ReadOnlySource) -> StoreReader {
|
||||
let (data_source, offset_index_source, max_doc) = split_source(data);
|
||||
StoreReader {
|
||||
data: data_source,
|
||||
offset_index_source,
|
||||
// TODO rename open
|
||||
pub fn open(store_file: FileSlice) -> io::Result<StoreReader> {
|
||||
let (data_file, offset_index_file, max_doc) = split_file(store_file)?;
|
||||
Ok(StoreReader {
|
||||
data: data_file,
|
||||
offset_index_file: offset_index_file.read_bytes()?,
|
||||
current_block_offset: RefCell::new(usize::max_value()),
|
||||
current_block: RefCell::new(Vec::new()),
|
||||
max_doc,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn block_index(&self) -> SkipList<'_, u64> {
|
||||
SkipList::from(self.offset_index_source.as_slice())
|
||||
SkipList::from(self.offset_index_file.as_slice())
|
||||
}
|
||||
|
||||
fn block_offset(&self, doc_id: DocId) -> (DocId, u64) {
|
||||
@@ -44,23 +45,22 @@ impl StoreReader {
|
||||
.unwrap_or((0u32, 0u64))
|
||||
}
|
||||
|
||||
pub(crate) fn block_data(&self) -> &[u8] {
|
||||
self.data.as_slice()
|
||||
pub(crate) fn block_data(&self) -> io::Result<OwnedBytes> {
|
||||
self.data.read_bytes()
|
||||
}
|
||||
|
||||
fn compressed_block(&self, addr: usize) -> &[u8] {
|
||||
let total_buffer = self.data.as_slice();
|
||||
let mut buffer = &total_buffer[addr..];
|
||||
let block_len = u32::deserialize(&mut buffer).expect("") as usize;
|
||||
&buffer[..block_len]
|
||||
fn compressed_block(&self, addr: usize) -> io::Result<OwnedBytes> {
|
||||
let (block_len_bytes, block_body) = self.data.slice_from(addr).split(4);
|
||||
let block_len = u32::deserialize(&mut block_len_bytes.read_bytes()?)?;
|
||||
block_body.slice_to(block_len as usize).read_bytes()
|
||||
}
|
||||
|
||||
fn read_block(&self, block_offset: usize) -> io::Result<()> {
|
||||
if block_offset != *self.current_block_offset.borrow() {
|
||||
let mut current_block_mut = self.current_block.borrow_mut();
|
||||
current_block_mut.clear();
|
||||
let compressed_block = self.compressed_block(block_offset);
|
||||
decompress(compressed_block, &mut current_block_mut)?;
|
||||
let compressed_block = self.compressed_block(block_offset)?;
|
||||
decompress(compressed_block.as_slice(), &mut current_block_mut)?;
|
||||
*self.current_block_offset.borrow_mut() = block_offset;
|
||||
}
|
||||
Ok(())
|
||||
@@ -69,7 +69,7 @@ impl StoreReader {
|
||||
/// Reads a given document.
|
||||
///
|
||||
/// Calling `.get(doc)` is relatively costly as it requires
|
||||
/// decompressing a LZ4-compressed block.
|
||||
/// decompressing a compressed block.
|
||||
///
|
||||
/// It should not be called to score documents
|
||||
/// for instance.
|
||||
@@ -89,21 +89,21 @@ impl StoreReader {
|
||||
|
||||
/// Summarize total space usage of this store reader.
|
||||
pub fn space_usage(&self) -> StoreSpaceUsage {
|
||||
StoreSpaceUsage::new(self.data.len(), self.offset_index_source.len())
|
||||
StoreSpaceUsage::new(self.data.len(), self.offset_index_file.len())
|
||||
}
|
||||
}
|
||||
|
||||
fn split_source(data: ReadOnlySource) -> (ReadOnlySource, ReadOnlySource, DocId) {
|
||||
fn split_file(data: FileSlice) -> io::Result<(FileSlice, FileSlice, DocId)> {
|
||||
let data_len = data.len();
|
||||
let footer_offset = data_len - size_of::<u64>() - size_of::<u32>();
|
||||
let serialized_offset: ReadOnlySource = data.slice(footer_offset, data_len);
|
||||
let serialized_offset: OwnedBytes = data.slice(footer_offset, data_len).read_bytes()?;
|
||||
let mut serialized_offset_buf = serialized_offset.as_slice();
|
||||
let offset = u64::deserialize(&mut serialized_offset_buf).unwrap();
|
||||
let offset = u64::deserialize(&mut serialized_offset_buf)?;
|
||||
let offset = offset as usize;
|
||||
let max_doc = u32::deserialize(&mut serialized_offset_buf).unwrap();
|
||||
(
|
||||
let max_doc = u32::deserialize(&mut serialized_offset_buf)?;
|
||||
Ok((
|
||||
data.slice(0, offset),
|
||||
data.slice(offset, footer_offset),
|
||||
max_doc,
|
||||
)
|
||||
))
|
||||
}
|
||||
|
||||
@@ -68,19 +68,17 @@ impl<T: BinarySerializable> SkipListBuilder<T> {
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, key: u64, dest: &T) -> io::Result<()> {
|
||||
let mut layer_id = 0;
|
||||
let mut skip_pointer = self.data_layer.insert(key, dest)?;
|
||||
loop {
|
||||
skip_pointer = match skip_pointer {
|
||||
Some((skip_doc_id, skip_offset)) => self
|
||||
for layer_id in 0.. {
|
||||
if let Some((skip_doc_id, skip_offset)) = skip_pointer {
|
||||
skip_pointer = self
|
||||
.get_skip_layer(layer_id)
|
||||
.insert(skip_doc_id, &skip_offset)?,
|
||||
None => {
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
layer_id += 1;
|
||||
.insert(skip_doc_id, &skip_offset)?;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write<W: Write>(self, output: &mut W) -> io::Result<()> {
|
||||
|
||||
@@ -75,7 +75,8 @@ impl StoreWriter {
|
||||
let start_offset = self.writer.written_bytes() as u64;
|
||||
|
||||
// just bulk write all of the block of the given reader.
|
||||
self.writer.write_all(store_reader.block_data())?;
|
||||
self.writer
|
||||
.write_all(store_reader.block_data()?.as_slice())?;
|
||||
|
||||
// concatenate the index of the `store_reader`, after translating
|
||||
// its start doc id and its start file offset.
|
||||
|
||||
@@ -36,9 +36,9 @@ pub use self::termdict::{TermDictionary, TermDictionaryBuilder};
|
||||
mod tests {
|
||||
use super::{TermDictionary, TermDictionaryBuilder, TermStreamer};
|
||||
use crate::core::Index;
|
||||
use crate::directory::{Directory, RAMDirectory, ReadOnlySource};
|
||||
use crate::directory::{Directory, FileSlice, RAMDirectory};
|
||||
use crate::postings::TermInfo;
|
||||
use crate::schema::{Document, Schema, TEXT};
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use std::path::PathBuf;
|
||||
use std::str;
|
||||
|
||||
@@ -59,7 +59,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_term_ordinals() {
|
||||
fn test_term_ordinals() -> crate::Result<()> {
|
||||
const COUNTRIES: [&'static str; 7] = [
|
||||
"San Marino",
|
||||
"Serbia",
|
||||
@@ -69,45 +69,40 @@ mod tests {
|
||||
"Sweden",
|
||||
"Switzerland",
|
||||
];
|
||||
let mut directory = RAMDirectory::create();
|
||||
let directory = RAMDirectory::create();
|
||||
let path = PathBuf::from("TermDictionary");
|
||||
{
|
||||
let write = directory.open_write(&path).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap();
|
||||
let write = directory.open_write(&path)?;
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(write)?;
|
||||
for term in COUNTRIES.iter() {
|
||||
term_dictionary_builder
|
||||
.insert(term.as_bytes(), &make_term_info(0u64))
|
||||
.unwrap();
|
||||
term_dictionary_builder.insert(term.as_bytes(), &make_term_info(0u64))?;
|
||||
}
|
||||
term_dictionary_builder.finish().unwrap();
|
||||
term_dictionary_builder.finish()?;
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let term_dict: TermDictionary = TermDictionary::from_source(&source);
|
||||
let term_file = directory.open_read(&path)?;
|
||||
let term_dict: TermDictionary = TermDictionary::open(term_file)?;
|
||||
for (term_ord, term) in COUNTRIES.iter().enumerate() {
|
||||
assert_eq!(term_dict.term_ord(term).unwrap(), term_ord as u64);
|
||||
let mut bytes = vec![];
|
||||
assert!(term_dict.ord_to_term(term_ord as u64, &mut bytes));
|
||||
assert_eq!(bytes, term.as_bytes());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_term_dictionary_simple() {
|
||||
let mut directory = RAMDirectory::create();
|
||||
fn test_term_dictionary_simple() -> crate::Result<()> {
|
||||
let directory = RAMDirectory::create();
|
||||
let path = PathBuf::from("TermDictionary");
|
||||
{
|
||||
let write = directory.open_write(&path).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap();
|
||||
term_dictionary_builder
|
||||
.insert("abc".as_bytes(), &make_term_info(34u64))
|
||||
.unwrap();
|
||||
term_dictionary_builder
|
||||
.insert("abcd".as_bytes(), &make_term_info(346u64))
|
||||
.unwrap();
|
||||
term_dictionary_builder.finish().unwrap();
|
||||
let write = directory.open_write(&path)?;
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(write)?;
|
||||
term_dictionary_builder.insert("abc".as_bytes(), &make_term_info(34u64))?;
|
||||
term_dictionary_builder.insert("abcd".as_bytes(), &make_term_info(346u64))?;
|
||||
term_dictionary_builder.finish()?;
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let term_dict: TermDictionary = TermDictionary::from_source(&source);
|
||||
let file = directory.open_read(&path)?;
|
||||
let term_dict: TermDictionary = TermDictionary::open(file)?;
|
||||
assert_eq!(term_dict.get("abc").unwrap().doc_freq, 34u32);
|
||||
assert_eq!(term_dict.get("abcd").unwrap().doc_freq, 346u32);
|
||||
let mut stream = term_dict.stream();
|
||||
@@ -130,43 +125,26 @@ mod tests {
|
||||
assert_eq!(stream.value().doc_freq, 346u32);
|
||||
}
|
||||
assert!(!stream.advance());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_term_iterator() {
|
||||
fn test_term_iterator() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
{
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "a b d f");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
{
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "a b c d f");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
{
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "e f");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(text_field=>"a b d f"));
|
||||
index_writer.commit()?;
|
||||
index_writer.add_document(doc!(text_field=>"a b c d f"));
|
||||
index_writer.commit()?;
|
||||
index_writer.add_document(doc!(text_field => "e f"));
|
||||
index_writer.commit()?;
|
||||
}
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let searcher = index.reader()?.searcher();
|
||||
|
||||
let field_searcher = searcher.field(text_field);
|
||||
let field_searcher = searcher.field(text_field)?;
|
||||
let mut term_it = field_searcher.terms();
|
||||
let mut term_string = String::new();
|
||||
while term_it.advance() {
|
||||
@@ -174,10 +152,11 @@ mod tests {
|
||||
term_string.push_str(str::from_utf8(term_it.key()).expect("test"));
|
||||
}
|
||||
assert_eq!(&*term_string, "abcdef");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_term_dictionary_stream() {
|
||||
fn test_term_dictionary_stream() -> crate::Result<()> {
|
||||
let ids: Vec<_> = (0u32..10_000u32)
|
||||
.map(|i| (format!("doc{:0>6}", i), i))
|
||||
.collect();
|
||||
@@ -190,8 +169,8 @@ mod tests {
|
||||
}
|
||||
term_dictionary_builder.finish().unwrap()
|
||||
};
|
||||
let source = ReadOnlySource::from(buffer);
|
||||
let term_dictionary: TermDictionary = TermDictionary::from_source(&source);
|
||||
let term_file = FileSlice::from(buffer);
|
||||
let term_dictionary: TermDictionary = TermDictionary::open(term_file)?;
|
||||
{
|
||||
let mut streamer = term_dictionary.stream();
|
||||
let mut i = 0;
|
||||
@@ -203,28 +182,26 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
let &(ref key, ref _v) = &ids[2047];
|
||||
term_dictionary.get(key.as_bytes());
|
||||
let &(ref key, ref val) = &ids[2047];
|
||||
assert_eq!(
|
||||
term_dictionary.get(key.as_bytes()),
|
||||
Some(make_term_info(*val as u64))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stream_high_range_prefix_suffix() {
|
||||
fn test_stream_high_range_prefix_suffix() -> crate::Result<()> {
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
||||
// term requires more than 16bits
|
||||
term_dictionary_builder
|
||||
.insert("abcdefghijklmnopqrstuvwxy", &make_term_info(1))
|
||||
.unwrap();
|
||||
term_dictionary_builder
|
||||
.insert("abcdefghijklmnopqrstuvwxyz", &make_term_info(2))
|
||||
.unwrap();
|
||||
term_dictionary_builder
|
||||
.insert("abr", &make_term_info(2))
|
||||
.unwrap();
|
||||
term_dictionary_builder.finish().unwrap()
|
||||
term_dictionary_builder.insert("abcdefghijklmnopqrstuvwxy", &make_term_info(1))?;
|
||||
term_dictionary_builder.insert("abcdefghijklmnopqrstuvwxyz", &make_term_info(2))?;
|
||||
term_dictionary_builder.insert("abr", &make_term_info(2))?;
|
||||
term_dictionary_builder.finish()?
|
||||
};
|
||||
let source = ReadOnlySource::from(buffer);
|
||||
let term_dictionary: TermDictionary = TermDictionary::from_source(&source);
|
||||
let term_dict_file = FileSlice::from(buffer);
|
||||
let term_dictionary: TermDictionary = TermDictionary::open(term_dict_file)?;
|
||||
let mut kv_stream = term_dictionary.stream();
|
||||
assert!(kv_stream.advance());
|
||||
assert_eq!(kv_stream.key(), "abcdefghijklmnopqrstuvwxy".as_bytes());
|
||||
@@ -235,10 +212,11 @@ mod tests {
|
||||
assert!(kv_stream.advance());
|
||||
assert_eq!(kv_stream.key(), "abr".as_bytes());
|
||||
assert!(!kv_stream.advance());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stream_range() {
|
||||
fn test_stream_range() -> crate::Result<()> {
|
||||
let ids: Vec<_> = (0u32..10_000u32)
|
||||
.map(|i| (format!("doc{:0>6}", i), i))
|
||||
.collect();
|
||||
@@ -252,9 +230,9 @@ mod tests {
|
||||
term_dictionary_builder.finish().unwrap()
|
||||
};
|
||||
|
||||
let source = ReadOnlySource::from(buffer);
|
||||
let file = FileSlice::from(buffer);
|
||||
|
||||
let term_dictionary: TermDictionary = TermDictionary::from_source(&source);
|
||||
let term_dictionary: TermDictionary = TermDictionary::open(file)?;
|
||||
{
|
||||
for i in (0..20).chain(6000..8_000) {
|
||||
let &(ref target_key, _) = &ids[i];
|
||||
@@ -305,10 +283,11 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_string() {
|
||||
fn test_empty_string() -> crate::Result<()> {
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
||||
term_dictionary_builder
|
||||
@@ -319,30 +298,29 @@ mod tests {
|
||||
.unwrap();
|
||||
term_dictionary_builder.finish().unwrap()
|
||||
};
|
||||
let source = ReadOnlySource::from(buffer);
|
||||
let term_dictionary: TermDictionary = TermDictionary::from_source(&source);
|
||||
let file = FileSlice::from(buffer);
|
||||
let term_dictionary: TermDictionary = TermDictionary::open(file)?;
|
||||
let mut stream = term_dictionary.stream();
|
||||
assert!(stream.advance());
|
||||
assert!(stream.key().is_empty());
|
||||
assert!(stream.advance());
|
||||
assert_eq!(stream.key(), &[1u8]);
|
||||
assert!(!stream.advance());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stream_range_boundaries() {
|
||||
fn test_stream_range_boundaries() -> crate::Result<()> {
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(Vec::new())?;
|
||||
for i in 0u8..10u8 {
|
||||
let number_arr = [i; 1];
|
||||
term_dictionary_builder
|
||||
.insert(&number_arr, &make_term_info(i as u64))
|
||||
.unwrap();
|
||||
term_dictionary_builder.insert(&number_arr, &make_term_info(i as u64))?;
|
||||
}
|
||||
term_dictionary_builder.finish().unwrap()
|
||||
term_dictionary_builder.finish()?
|
||||
};
|
||||
let source = ReadOnlySource::from(buffer);
|
||||
let term_dictionary: TermDictionary = TermDictionary::from_source(&source);
|
||||
let file = FileSlice::from(buffer);
|
||||
let term_dictionary: TermDictionary = TermDictionary::open(file)?;
|
||||
|
||||
let value_list = |mut streamer: TermStreamer<'_>, backwards: bool| {
|
||||
let mut res: Vec<u32> = vec![];
|
||||
@@ -430,10 +408,11 @@ mod tests {
|
||||
.into_stream();
|
||||
assert_eq!(value_list(range, true), vec![0u32, 1u32, 2u32, 3u32, 4u32]);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_automaton_search() {
|
||||
fn test_automaton_search() -> crate::Result<()> {
|
||||
use crate::query::DFAWrapper;
|
||||
use levenshtein_automata::LevenshteinAutomatonBuilder;
|
||||
|
||||
@@ -447,20 +426,18 @@ mod tests {
|
||||
"Switzerland",
|
||||
];
|
||||
|
||||
let mut directory = RAMDirectory::create();
|
||||
let directory = RAMDirectory::create();
|
||||
let path = PathBuf::from("TermDictionary");
|
||||
{
|
||||
let write = directory.open_write(&path).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap();
|
||||
let write = directory.open_write(&path)?;
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(write)?;
|
||||
for term in COUNTRIES.iter() {
|
||||
term_dictionary_builder
|
||||
.insert(term.as_bytes(), &make_term_info(0u64))
|
||||
.unwrap();
|
||||
term_dictionary_builder.insert(term.as_bytes(), &make_term_info(0u64))?;
|
||||
}
|
||||
term_dictionary_builder.finish().unwrap();
|
||||
term_dictionary_builder.finish()?;
|
||||
}
|
||||
let source = directory.open_read(&path).unwrap();
|
||||
let term_dict: TermDictionary = TermDictionary::from_source(&source);
|
||||
let file = directory.open_read(&path)?;
|
||||
let term_dict: TermDictionary = TermDictionary::open(file)?;
|
||||
|
||||
// We can now build an entire dfa.
|
||||
let lev_automaton_builder = LevenshteinAutomatonBuilder::new(2, true);
|
||||
@@ -472,5 +449,6 @@ mod tests {
|
||||
assert!(range.advance());
|
||||
assert_eq!("Spain".as_bytes(), range.key());
|
||||
assert!(!range.advance());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user