Compare commits

...

8 Commits

Author SHA1 Message Date
Paul Masurel
a7a98b11d7 exploratory 2019-05-22 10:18:53 +09:00
Paul Masurel
a18932165f for_each in union 2019-05-07 08:08:55 +09:00
Paul Masurel
8f82d0b773 Added impl for for_each specific to unions. 2019-05-05 17:31:32 +09:00
Paul Masurel
7102b363f5 Fix build 2019-05-05 14:19:54 +09:00
Paul Masurel
66b4615e4e Issue/542 (#543)
* Closes 542.

Fast fields are all loaded when the segment reader is created.
2019-05-05 13:52:43 +09:00
Paul Masurel
3df037961f Added more info to fast fields. 2019-04-30 13:14:01 +09:00
Paul Masurel
dac50c6aeb Dds merged (#539)
* add ascii folding support

* Minor change and added Changelog.

* add additional tests

* Add tests for ascii folding (#533)

* first tests for ascii folding

* use a `RawTokenizer` for tokens using punctuation

* add test for all (?) folding, inspired by Lucene

* Simplification of the unit test code
2019-04-26 10:25:08 +09:00
Paul Masurel
31b22c5acc Added logging when token is dropped. (#538) 2019-04-26 09:23:28 +09:00
46 changed files with 4719 additions and 268 deletions

View File

@@ -1,6 +1,10 @@
Tantivy 0.10.0
====================
=====================
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
- Added an ASCII folding filter (@drusellers)
- Bugfix in `query.count` in presence of deletes (@pmasurel)
Minor
---------
@@ -8,6 +12,22 @@ Minor
Calling .freq() or .doc() when .advance() has never
on segment postings should panic from now on.
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
- Fast fields are now preloaded when the `SegmentReader` is created.
## How to update?
Your existing indexes are usable as is. Your may or may need some
trivial updates.
### Fast fields
Fast fields used to be accessed directly from the `SegmentReader`.
The API changed, you are now required to acquire your fast field reader via the
`segment_reader.fast_fields()`, and use one of the typed method:
- `.u64()`, `.i64()` if your field is single-valued ;
- `.u64s()`, `.i64s()` if your field is multi-valued ;
- `.bytes()` if your field is bytes fast field.
Tantivy 0.9.0

View File

@@ -18,8 +18,8 @@ use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser;
use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::Index;
use tantivy::SegmentReader;
use tantivy::{Index, TantivyError};
#[derive(Default)]
struct Stats {
@@ -75,9 +75,18 @@ impl Collector for StatsCollector {
fn for_segment(
&self,
_segment_local_id: u32,
segment: &SegmentReader,
segment_reader: &SegmentReader,
) -> tantivy::Result<StatsSegmentCollector> {
let fast_field_reader = segment.fast_field_reader(self.field)?;
let fast_field_reader = segment_reader
.fast_fields()
.u64(self.field)
.ok_or_else(|| {
let field_name = segment_reader.schema().get_field_name(self.field);
TantivyError::SchemaError(format!(
"Field {:?} is not a u64 fast field.",
field_name
))
})?;
Ok(StatsSegmentCollector {
fast_field_reader,
stats: Stats::default(),

View File

@@ -17,6 +17,7 @@ use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
use TantivyError;
struct Hit<'a> {
count: u64,
@@ -264,7 +265,10 @@ impl Collector for FacetCollector {
_: SegmentLocalId,
reader: &SegmentReader,
) -> Result<FacetSegmentCollector> {
let facet_reader = reader.facet_reader(self.field)?;
let field_name = reader.schema().get_field_name(self.field);
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
})?;
let mut collapse_mapping = Vec::new();
let mut counts = Vec::new();

View File

@@ -2,6 +2,7 @@ use super::Collector;
use super::SegmentCollector;
use collector::Fruit;
use std::marker::PhantomData;
use std::ops::Deref;
use DocId;
use Result;
use Score;
@@ -199,7 +200,10 @@ impl<'a> Collector for MultiCollector<'a> {
}
fn requires_scoring(&self) -> bool {
self.collector_wrappers.iter().any(|c| c.requires_scoring())
self.collector_wrappers
.iter()
.map(Deref::deref)
.any(Collector::requires_scoring)
}
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> {

View File

@@ -114,11 +114,15 @@ impl Collector for FastFieldTestCollector {
fn for_segment(
&self,
_: SegmentLocalId,
reader: &SegmentReader,
segment_reader: &SegmentReader,
) -> Result<FastFieldSegmentCollector> {
let reader = segment_reader
.fast_fields()
.u64(self.field)
.expect("Requested field is not a fast field.");
Ok(FastFieldSegmentCollector {
vals: Vec::new(),
reader: reader.fast_field_reader(self.field)?,
reader,
})
}
@@ -170,11 +174,14 @@ impl Collector for BytesFastFieldTestCollector {
fn for_segment(
&self,
_segment_local_id: u32,
segment: &SegmentReader,
segment_reader: &SegmentReader,
) -> Result<BytesFastFieldSegmentCollector> {
Ok(BytesFastFieldSegmentCollector {
vals: Vec::new(),
reader: segment.bytes_fast_field_reader(self.field)?,
reader: segment_reader
.fast_fields()
.bytes(self.field)
.expect("Field is not a bytes fast field."),
})
}
@@ -191,7 +198,7 @@ impl SegmentCollector for BytesFastFieldSegmentCollector {
type Fruit = Vec<u8>;
fn collect(&mut self, doc: u32, _score: f32) {
let data = self.reader.get_val(doc);
let data = self.reader.get_bytes(doc);
self.vals.extend(data);
}

View File

@@ -98,11 +98,11 @@ where
.collect())
}
pub(crate) fn for_segment(
pub(crate) fn for_segment<F: PartialOrd>(
&self,
segment_id: SegmentLocalId,
_: &SegmentReader,
) -> Result<TopSegmentCollector<T>> {
) -> Result<TopSegmentCollector<F>> {
Ok(TopSegmentCollector::new(segment_id, self.limit))
}
}

View File

@@ -5,10 +5,12 @@ use collector::SegmentCollector;
use fastfield::FastFieldReader;
use fastfield::FastValue;
use schema::Field;
use std::marker::PhantomData;
use DocAddress;
use Result;
use SegmentLocalId;
use SegmentReader;
use TantivyError;
/// The Top Field Collector keeps track of the K documents
/// sorted by a fast field in the index
@@ -106,8 +108,15 @@ impl<T: FastValue + PartialOrd + Send + Sync + 'static> Collector for TopDocsByF
reader: &SegmentReader,
) -> Result<TopFieldSegmentCollector<T>> {
let collector = self.collector.for_segment(segment_local_id, reader)?;
let reader = reader.fast_field_reader(self.field)?;
Ok(TopFieldSegmentCollector { collector, reader })
let reader = reader.fast_fields().u64(self.field).ok_or_else(|| {
let field_name = reader.schema().get_field_name(self.field);
TantivyError::SchemaError(format!("Failed to find fast field reader {:?}", field_name))
})?;
Ok(TopFieldSegmentCollector {
collector,
reader,
_type: PhantomData,
})
}
fn requires_scoring(&self) -> bool {
@@ -122,9 +131,10 @@ impl<T: FastValue + PartialOrd + Send + Sync + 'static> Collector for TopDocsByF
}
}
pub struct TopFieldSegmentCollector<T: FastValue + PartialOrd> {
collector: TopSegmentCollector<T>,
reader: FastFieldReader<T>,
pub struct TopFieldSegmentCollector<T> {
collector: TopSegmentCollector<u64>,
reader: FastFieldReader<u64>,
_type: PhantomData<T>,
}
impl<T: FastValue + PartialOrd + Send + Sync + 'static> SegmentCollector
@@ -138,7 +148,11 @@ impl<T: FastValue + PartialOrd + Send + Sync + 'static> SegmentCollector
}
fn harvest(self) -> Vec<(T, DocAddress)> {
self.collector.harvest()
self.collector
.harvest()
.into_iter()
.map(|(val, doc_address)| (T::from_u64(val), doc_address))
.collect()
}
}
@@ -235,7 +249,7 @@ mod tests {
.for_segment(0, segment)
.map(|_| ())
.unwrap_err(),
TantivyError::FastFieldError(_)
TantivyError::SchemaError(_)
);
}

View File

@@ -340,7 +340,7 @@ impl Index {
Ok(self
.searchable_segment_metas()?
.iter()
.map(|segment_meta| segment_meta.id())
.map(SegmentMeta::id)
.collect())
}
}

View File

@@ -59,7 +59,7 @@ impl Searcher {
) -> Searcher {
let store_readers = segment_readers
.iter()
.map(|segment_reader| segment_reader.get_store_reader())
.map(SegmentReader::get_store_reader)
.collect();
Searcher {
schema,
@@ -218,7 +218,7 @@ impl fmt::Debug for Searcher {
let segment_ids = self
.segment_readers
.iter()
.map(|segment_reader| segment_reader.segment_id())
.map(SegmentReader::segment_id)
.collect::<Vec<_>>();
write!(f, "Searcher({:?})", segment_ids)
}

View File

@@ -5,14 +5,10 @@ use core::Segment;
use core::SegmentComponent;
use core::SegmentId;
use directory::ReadOnlySource;
use error::TantivyError;
use fastfield::DeleteBitSet;
use fastfield::FacetReader;
use fastfield::FastFieldReader;
use fastfield::{self, FastFieldNotAvailableError};
use fastfield::{BytesFastFieldReader, FastValue, MultiValueIntFastFieldReader};
use fastfield::FastFieldReaders;
use fieldnorm::FieldNormReader;
use schema::Cardinality;
use schema::Field;
use schema::FieldType;
use schema::Schema;
@@ -51,7 +47,7 @@ pub struct SegmentReader {
postings_composite: CompositeFile,
positions_composite: CompositeFile,
positions_idx_composite: CompositeFile,
fast_fields_composite: CompositeFile,
fast_fields_readers: Arc<FastFieldReaders>,
fieldnorms_composite: CompositeFile,
store_source: ReadOnlySource,
@@ -105,93 +101,21 @@ impl SegmentReader {
///
/// # Panics
/// May panic if the index is corrupted.
pub fn fast_field_reader<Item: FastValue>(
&self,
field: Field,
) -> fastfield::Result<FastFieldReader<Item>> {
let field_entry = self.schema.get_field_entry(field);
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::SingleValue)
{
self.fast_fields_composite
.open_read(field)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
.map(FastFieldReader::open)
} else {
Err(FastFieldNotAvailableError::new(field_entry))
}
}
pub(crate) fn fast_field_reader_with_idx<Item: FastValue>(
&self,
field: Field,
idx: usize,
) -> fastfield::Result<FastFieldReader<Item>> {
if let Some(ff_source) = self.fast_fields_composite.open_read_with_idx(field, idx) {
Ok(FastFieldReader::open(ff_source))
} else {
let field_entry = self.schema.get_field_entry(field);
Err(FastFieldNotAvailableError::new(field_entry))
}
}
/// Accessor to the `MultiValueIntFastFieldReader` associated to a given `Field`.
/// May panick if the field is not a multivalued fastfield of the type `Item`.
pub fn multi_fast_field_reader<Item: FastValue>(
&self,
field: Field,
) -> fastfield::Result<MultiValueIntFastFieldReader<Item>> {
let field_entry = self.schema.get_field_entry(field);
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::MultiValues)
{
let idx_reader = self.fast_field_reader_with_idx(field, 0)?;
let vals_reader = self.fast_field_reader_with_idx(field, 1)?;
Ok(MultiValueIntFastFieldReader::open(idx_reader, vals_reader))
} else {
Err(FastFieldNotAvailableError::new(field_entry))
}
}
/// Accessor to the `BytesFastFieldReader` associated to a given `Field`.
pub fn bytes_fast_field_reader(&self, field: Field) -> fastfield::Result<BytesFastFieldReader> {
let field_entry = self.schema.get_field_entry(field);
match *field_entry.field_type() {
FieldType::Bytes => {}
_ => return Err(FastFieldNotAvailableError::new(field_entry)),
}
let idx_reader = self
.fast_fields_composite
.open_read_with_idx(field, 0)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
.map(FastFieldReader::open)?;
let values = self
.fast_fields_composite
.open_read_with_idx(field, 1)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
Ok(BytesFastFieldReader::open(idx_reader, values))
pub fn fast_fields(&self) -> &FastFieldReaders {
&self.fast_fields_readers
}
/// Accessor to the `FacetReader` associated to a given `Field`.
pub fn facet_reader(&self, field: Field) -> Result<FacetReader> {
pub fn facet_reader(&self, field: Field) -> Option<FacetReader> {
let field_entry = self.schema.get_field_entry(field);
if field_entry.field_type() != &FieldType::HierarchicalFacet {
return Err(TantivyError::InvalidArgument(format!(
"The field {:?} is not a \
hierarchical facet.",
field_entry
)));
return None;
}
let term_ords_reader = self.multi_fast_field_reader(field)?;
let termdict_source = self.termdict_composite.open_read(field).ok_or_else(|| {
TantivyError::InvalidArgument(format!(
"The field \"{}\" is a hierarchical \
but this segment does not seem to have the field term \
dictionary.",
field_entry.name()
))
})?;
let term_ords_reader = self.fast_fields().u64s(field)?;
let termdict_source = self.termdict_composite.open_read(field)?;
let termdict = TermDictionary::from_source(&termdict_source);
let facet_reader = FacetReader::new(term_ords_reader, termdict);
Ok(facet_reader)
Some(facet_reader)
}
/// Accessor to the segment's `Field norms`'s reader.
@@ -247,8 +171,12 @@ impl SegmentReader {
}
};
let schema = segment.schema();
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
let fast_field_readers =
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
@@ -260,14 +188,13 @@ impl SegmentReader {
None
};
let schema = segment.schema();
Ok(SegmentReader {
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
max_doc: segment.meta().max_doc(),
num_docs: segment.meta().num_docs(),
termdict_composite,
postings_composite,
fast_fields_composite,
fast_fields_readers: fast_field_readers,
fieldnorms_composite,
segment_id: segment.id(),
store_source,
@@ -381,12 +308,12 @@ impl SegmentReader {
self.postings_composite.space_usage(),
self.positions_composite.space_usage(),
self.positions_idx_composite.space_usage(),
self.fast_fields_composite.space_usage(),
self.fast_fields_readers.space_usage(),
self.fieldnorms_composite.space_usage(),
self.get_store_reader().space_usage(),
self.delete_bitset_opt
.as_ref()
.map(|x| x.space_usage())
.map(DeleteBitSet::space_usage)
.unwrap_or(0),
)
}

View File

@@ -86,7 +86,7 @@ impl InnerDirectory {
self.fs
.get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
.map(|el| el.clone())
.map(Clone::clone)
}
fn delete(&mut self, path: &Path) -> result::Result<(), DeleteError> {

View File

@@ -142,6 +142,11 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
unboxed.size_hint()
}
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
let unboxed: &mut TDocSet = self.borrow_mut();
unboxed.append_to_bitset(bitset);
}
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
let unboxed: &mut TDocSet = self.borrow_mut();
unboxed.count(delete_bitset)
@@ -151,9 +156,4 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
let unboxed: &mut TDocSet = self.borrow_mut();
unboxed.count_including_deleted()
}
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
let unboxed: &mut TDocSet = self.borrow_mut();
unboxed.append_to_bitset(bitset);
}
}

View File

@@ -23,14 +23,14 @@ mod tests {
index_writer.add_document(doc!(field=>vec![0u8; 1000]));
assert!(index_writer.commit().is_ok());
let searcher = index.reader().unwrap().searcher();
let reader = searcher.segment_reader(0);
let bytes_reader = reader.bytes_fast_field_reader(field).unwrap();
let segment_reader = searcher.segment_reader(0);
let bytes_reader = segment_reader.fast_fields().bytes(field).unwrap();
assert_eq!(bytes_reader.get_val(0), &[0u8, 1, 2, 3]);
assert!(bytes_reader.get_val(1).is_empty());
assert_eq!(bytes_reader.get_val(2), &[255u8]);
assert_eq!(bytes_reader.get_val(3), &[1u8, 3, 5, 7, 9]);
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
assert!(bytes_reader.get_bytes(1).is_empty());
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
let long = vec![0u8; 1000];
assert_eq!(bytes_reader.get_val(4), long.as_slice());
assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
}
}

View File

@@ -14,6 +14,7 @@ use DocId;
///
/// Reading the value for a document is done by reading the start index for it,
/// and the start index for the next document, and keeping the bytes in between.
#[derive(Clone)]
pub struct BytesFastFieldReader {
idx_reader: FastFieldReader<u64>,
values: OwningRef<ReadOnlySource, [u8]>,
@@ -28,10 +29,20 @@ impl BytesFastFieldReader {
BytesFastFieldReader { idx_reader, values }
}
/// Returns the bytes associated to the given `doc`
pub fn get_val(&self, doc: DocId) -> &[u8] {
fn range(&self, doc: DocId) -> (usize, usize) {
let start = self.idx_reader.get(doc) as usize;
let stop = self.idx_reader.get(doc + 1) as usize;
(start, stop)
}
/// Returns the bytes associated to the given `doc`
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
let (start, stop) = self.range(doc);
&self.values[start..stop]
}
/// Returns the overall number of bytes in this bytes fast field.
pub fn total_num_bytes(&self) -> usize {
self.values.len()
}
}

View File

@@ -53,16 +53,18 @@ impl DeleteBitSet {
}
}
/// Returns whether the document has been marked as deleted.
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
pub fn is_alive(&self, doc: DocId) -> bool {
!self.is_deleted(doc)
}
/// Returns true iff the document has been marked as deleted.
#[inline(always)]
pub fn is_deleted(&self, doc: DocId) -> bool {
if self.len == 0 {
false
} else {
let byte_offset = doc / 8u32;
let b: u8 = (*self.data)[byte_offset as usize];
let shift = (doc & 7u32) as u8;
b & (1u8 << shift) != 0
}
let byte_offset = doc / 8u32;
let b: u8 = (*self.data)[byte_offset as usize];
let shift = (doc & 7u32) as u8;
b & (1u8 << shift) != 0
}
/// Summarize total space usage of this bitset.

View File

@@ -30,6 +30,7 @@ pub use self::error::{FastFieldNotAvailableError, Result};
pub use self::facet_reader::FacetReader;
pub use self::multivalued::{MultiValueIntFastFieldReader, MultiValueIntFastFieldWriter};
pub use self::reader::FastFieldReader;
pub use self::readers::FastFieldReaders;
pub use self::serializer::FastFieldSerializer;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use common;
@@ -43,6 +44,7 @@ mod error;
mod facet_reader;
mod multivalued;
mod reader;
mod readers;
mod serializer;
mod writer;
@@ -78,10 +80,6 @@ impl FastValue for u64 {
*self
}
fn as_u64(&self) -> u64 {
*self
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
@@ -89,6 +87,10 @@ impl FastValue for u64 {
_ => None,
}
}
fn as_u64(&self) -> u64 {
*self
}
}
impl FastValue for i64 {

View File

@@ -37,9 +37,7 @@ mod tests {
let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0);
let mut vals = Vec::new();
let multi_value_reader = segment_reader
.multi_fast_field_reader::<u64>(field)
.unwrap();
let multi_value_reader = segment_reader.fast_fields().u64s(field).unwrap();
{
multi_value_reader.get_vals(2, &mut vals);
assert_eq!(&vals, &[4u64]);
@@ -198,9 +196,9 @@ mod tests {
assert!(index_writer.commit().is_ok());
let searcher = index.reader().unwrap().searcher();
let reader = searcher.segment_reader(0);
let segment_reader = searcher.segment_reader(0);
let mut vals = Vec::new();
let multi_value_reader = reader.multi_fast_field_reader::<i64>(field).unwrap();
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
{
multi_value_reader.get_vals(2, &mut vals);
assert_eq!(&vals, &[-4i64]);

View File

@@ -26,6 +26,13 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
}
}
pub(crate) fn into_u64s_reader(self) -> MultiValueIntFastFieldReader<u64> {
MultiValueIntFastFieldReader {
idx_reader: self.idx_reader,
vals_reader: self.vals_reader.into_u64_reader(),
}
}
/// Returns `(start, stop)`, such that the values associated
/// to the given document are `start..stop`.
fn range(&self, doc: DocId) -> (u64, u64) {
@@ -41,13 +48,24 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
vals.resize(len, Item::default());
self.vals_reader.get_range_u64(start, &mut vals[..]);
}
/// Returns the number of values associated with the document `DocId`.
pub fn num_vals(&self, doc: DocId) -> usize {
let (start, stop) = self.range(doc);
(stop - start) as usize
}
/// Returns the overall number of values in this field .
pub fn total_num_vals(&self) -> u64 {
self.idx_reader.max_value()
}
}
#[cfg(test)]
mod tests {
use core::Index;
use schema::{Document, Facet, Schema};
use schema::{Facet, Schema};
#[test]
fn test_multifastfield_reader() {
@@ -58,22 +76,12 @@ mod tests {
let mut index_writer = index
.writer_with_num_threads(1, 30_000_000)
.expect("Failed to create index writer.");
{
let mut doc = Document::new();
doc.add_facet(facet_field, "/category/cat2");
doc.add_facet(facet_field, "/category/cat1");
index_writer.add_document(doc);
}
{
let mut doc = Document::new();
doc.add_facet(facet_field, "/category/cat2");
index_writer.add_document(doc);
}
{
let mut doc = Document::new();
doc.add_facet(facet_field, "/category/cat3");
index_writer.add_document(doc);
}
index_writer.add_document(doc!(
facet_field => Facet::from("/category/cat2"),
facet_field => Facet::from("/category/cat1"),
));
index_writer.add_document(doc!(facet_field => Facet::from("/category/cat2")));
index_writer.add_document(doc!(facet_field => Facet::from("/category/cat3")));
index_writer.commit().expect("Commit failed");
let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0);

View File

@@ -50,6 +50,15 @@ impl<Item: FastValue> FastFieldReader<Item> {
}
}
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
FastFieldReader {
bit_unpacker: self.bit_unpacker,
min_value_u64: self.min_value_u64,
max_value_u64: self.max_value_u64,
_phantom: PhantomData,
}
}
/// Return the value associated to the given document.
///
/// This accessor should return as fast as possible.

191
src/fastfield/readers.rs Normal file
View File

@@ -0,0 +1,191 @@
use common::CompositeFile;
use fastfield::BytesFastFieldReader;
use fastfield::MultiValueIntFastFieldReader;
use fastfield::{FastFieldNotAvailableError, FastFieldReader};
use schema::{Cardinality, Field, FieldType, Schema};
use space_usage::PerFieldSpaceUsage;
use std::collections::HashMap;
use Result;
/// Provides access to all of the FastFieldReader.
///
/// Internally, `FastFieldReaders` have preloaded fast field readers,
/// and just wraps several `HashMap`.
pub struct FastFieldReaders {
fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
fast_bytes: HashMap<Field, BytesFastFieldReader>,
fast_fields_composite: CompositeFile,
}
enum FastType {
I64,
U64,
}
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
match field_type {
FieldType::U64(options) => options
.get_fastfield_cardinality()
.map(|cardinality| (FastType::U64, cardinality)),
FieldType::I64(options) => options
.get_fastfield_cardinality()
.map(|cardinality| (FastType::I64, cardinality)),
FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)),
_ => None,
}
}
impl FastFieldReaders {
pub(crate) fn load_all(
schema: &Schema,
fast_fields_composite: &CompositeFile,
) -> Result<FastFieldReaders> {
let mut fast_field_readers = FastFieldReaders {
fast_field_i64: Default::default(),
fast_field_u64: Default::default(),
fast_field_i64s: Default::default(),
fast_field_u64s: Default::default(),
fast_bytes: Default::default(),
fast_fields_composite: fast_fields_composite.clone(),
};
for (field_id, field_entry) in schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
let field_type = field_entry.field_type();
if field_type == &FieldType::Bytes {
let idx_reader = fast_fields_composite
.open_read_with_idx(field, 0)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
.map(FastFieldReader::open)?;
let data = fast_fields_composite
.open_read_with_idx(field, 1)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
fast_field_readers
.fast_bytes
.insert(field, BytesFastFieldReader::open(idx_reader, data));
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
match cardinality {
Cardinality::SingleValue => {
if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
match fast_type {
FastType::U64 => {
let fast_field_reader = FastFieldReader::open(fast_field_data);
fast_field_readers
.fast_field_u64
.insert(field, fast_field_reader);
}
FastType::I64 => {
fast_field_readers.fast_field_i64.insert(
field,
FastFieldReader::open(fast_field_data.clone()),
);
}
}
} else {
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
}
}
Cardinality::MultiValues => {
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
let idx_reader = FastFieldReader::open(fast_field_idx);
match fast_type {
FastType::I64 => {
let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers
.fast_field_i64s
.insert(field, multivalued_int_fast_field);
}
FastType::U64 => {
let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers
.fast_field_u64s
.insert(field, multivalued_int_fast_field);
}
}
} else {
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
}
}
}
}
}
Ok(fast_field_readers)
}
pub(crate) fn space_usage(&self) -> PerFieldSpaceUsage {
self.fast_fields_composite.space_usage()
}
/// Returns the `u64` fast field reader reader associated to `field`.
///
/// If `field` is not a u64 fast field, this method returns `None`.
pub fn u64(&self, field: Field) -> Option<FastFieldReader<u64>> {
self.fast_field_u64.get(&field).cloned()
}
/// If the field is a u64-fast field return the associated reader.
/// If the field is a i64-fast field, return the associated u64 reader. Values are
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
///
/// This method is useful when merging segment reader.
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
if let Some(u64_ff_reader) = self.u64(field) {
return Some(u64_ff_reader);
}
if let Some(i64_ff_reader) = self.i64(field) {
return Some(i64_ff_reader.into_u64_reader());
}
None
}
/// Returns the `i64` fast field reader reader associated to `field`.
///
/// If `field` is not a i64 fast field, this method returns `None`.
pub fn i64(&self, field: Field) -> Option<FastFieldReader<i64>> {
self.fast_field_i64.get(&field).cloned()
}
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`.
///
/// If `field` is not a u64 multi-valued fast field, this method returns `None`.
pub fn u64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
self.fast_field_u64s.get(&field).cloned()
}
/// If the field is a u64s-fast field return the associated reader.
/// If the field is a i64s-fast field, return the associated u64s reader. Values are
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping.
///
/// This method is useful when merging segment reader.
pub(crate) fn u64s_lenient(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
if let Some(u64s_ff_reader) = self.u64s(field) {
return Some(u64s_ff_reader);
}
if let Some(i64s_ff_reader) = self.i64s(field) {
return Some(i64s_ff_reader.into_u64s_reader());
}
None
}
/// Returns a `i64s` multi-valued fast field reader reader associated to `field`.
///
/// If `field` is not a i64 multi-valued fast field, this method returns `None`.
pub fn i64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<i64>> {
self.fast_field_i64s.get(&field).cloned()
}
/// Returns the `bytes` fast field reader associated to `field`.
///
/// If `field` is not a bytes fast field, returns `None`.
pub fn bytes(&self, field: Field) -> Option<BytesFastFieldReader> {
self.fast_bytes.get(&field).cloned()
}
}

View File

@@ -52,7 +52,7 @@ impl MergePolicy for LogMergePolicy {
let mut size_sorted_tuples = segments
.iter()
.map(|x| x.num_docs())
.map(SegmentMeta::num_docs)
.enumerate()
.collect::<Vec<(usize, u32)>>();

View File

@@ -3,6 +3,7 @@ use core::Segment;
use core::SegmentReader;
use core::SerializableSegment;
use docset::DocSet;
use fastfield::BytesFastFieldReader;
use fastfield::DeleteBitSet;
use fastfield::FastFieldReader;
use fastfield::FastFieldSerializer;
@@ -72,7 +73,7 @@ fn compute_min_max_val(
// some deleted documents,
// we need to recompute the max / min
(0..max_doc)
.filter(|doc_id| !delete_bitset.is_deleted(*doc_id))
.filter(|doc_id| delete_bitset.is_alive(*doc_id))
.map(|doc_id| u64_reader.get(doc_id))
.minmax()
.into_option()
@@ -239,7 +240,10 @@ impl IndexMerger {
let mut max_value = u64::min_value();
for reader in &self.readers {
let u64_reader: FastFieldReader<u64> = reader.fast_field_reader(field)?;
let u64_reader: FastFieldReader<u64> = reader
.fast_fields()
.u64_lenient(field)
.expect("Failed to find a reader for single fast field. This is a tantivy bug and it should never happen.");
if let Some((seg_min_val, seg_max_val)) =
compute_min_max_val(&u64_reader, reader.max_doc(), reader.delete_bitset())
{
@@ -282,24 +286,28 @@ impl IndexMerger {
fast_field_serializer: &mut FastFieldSerializer,
) -> Result<()> {
let mut total_num_vals = 0u64;
let mut u64s_readers: Vec<MultiValueIntFastFieldReader<u64>> = Vec::new();
// In the first pass, we compute the total number of vals.
//
// This is required by the bitpacker, as it needs to know
// what should be the bit length use for bitpacking.
for reader in &self.readers {
let idx_reader = reader.fast_field_reader_with_idx::<u64>(field, 0)?;
let u64s_reader = reader.fast_fields()
.u64s_lenient(field)
.expect("Failed to find index for multivalued field. This is a bug in tantivy, please report.");
if let Some(delete_bitset) = reader.delete_bitset() {
for doc in 0u32..reader.max_doc() {
if !delete_bitset.is_deleted(doc) {
let start = idx_reader.get(doc);
let end = idx_reader.get(doc + 1);
total_num_vals += end - start;
if delete_bitset.is_alive(doc) {
let num_vals = u64s_reader.num_vals(doc) as u64;
total_num_vals += num_vals;
}
}
} else {
total_num_vals += idx_reader.max_value();
total_num_vals += u64s_reader.total_num_vals();
}
u64s_readers.push(u64s_reader);
}
// We can now create our `idx` serializer, and in a second pass,
@@ -307,13 +315,10 @@ impl IndexMerger {
let mut serialize_idx =
fast_field_serializer.new_u64_fast_field_with_idx(field, 0, total_num_vals, 0)?;
let mut idx = 0;
for reader in &self.readers {
let idx_reader = reader.fast_field_reader_with_idx::<u64>(field, 0)?;
for doc in reader.doc_ids_alive() {
for (segment_reader, u64s_reader) in self.readers.iter().zip(&u64s_readers) {
for doc in segment_reader.doc_ids_alive() {
serialize_idx.add_val(idx)?;
let start = idx_reader.get(doc);
let end = idx_reader.get(doc + 1);
idx += end - start;
idx += u64s_reader.num_vals(doc) as u64;
}
}
serialize_idx.add_val(idx)?;
@@ -344,8 +349,10 @@ impl IndexMerger {
for (segment_ord, segment_reader) in self.readers.iter().enumerate() {
let term_ordinal_mapping: &[TermOrdinal] =
term_ordinal_mappings.get_segment(segment_ord);
let ff_reader: MultiValueIntFastFieldReader<u64> =
segment_reader.multi_fast_field_reader(field)?;
let ff_reader: MultiValueIntFastFieldReader<u64> = segment_reader
.fast_fields()
.u64s(field)
.expect("Could not find multivalued u64 fast value reader.");
// TODO optimize if no deletes
for doc in segment_reader.doc_ids_alive() {
ff_reader.get_vals(doc, &mut vals);
@@ -377,6 +384,8 @@ impl IndexMerger {
let mut vals = Vec::with_capacity(100);
let mut ff_readers = Vec::new();
// Our values are bitpacked and we need to know what should be
// our bitwidth and our minimum value before serializing any values.
//
@@ -385,7 +394,10 @@ impl IndexMerger {
// maximum value and initialize our Serializer.
for reader in &self.readers {
let ff_reader: MultiValueIntFastFieldReader<u64> =
reader.multi_fast_field_reader(field)?;
reader.fast_fields().u64s_lenient(field).expect(
"Failed to find multivalued fast field reader. This is a bug in \
tantivy. Please report.",
);
for doc in reader.doc_ids_alive() {
ff_reader.get_vals(doc, &mut vals);
for &val in &vals {
@@ -393,6 +405,7 @@ impl IndexMerger {
max_value = cmp::max(val, max_value);
}
}
ff_readers.push(ff_reader);
// TODO optimize when no deletes
}
@@ -405,9 +418,7 @@ impl IndexMerger {
{
let mut serialize_vals = fast_field_serializer
.new_u64_fast_field_with_idx(field, min_value, max_value, 1)?;
for reader in &self.readers {
let ff_reader: MultiValueIntFastFieldReader<u64> =
reader.multi_fast_field_reader(field)?;
for (reader, ff_reader) in self.readers.iter().zip(ff_readers) {
// TODO optimize if no deletes
for doc in reader.doc_ids_alive() {
ff_reader.get_vals(doc, &mut vals);
@@ -426,19 +437,53 @@ impl IndexMerger {
field: Field,
fast_field_serializer: &mut FastFieldSerializer,
) -> Result<()> {
self.write_fast_field_idx(field, fast_field_serializer)?;
let mut total_num_vals = 0u64;
let mut bytes_readers: Vec<BytesFastFieldReader> = Vec::new();
for reader in &self.readers {
let bytes_reader = reader.fast_fields().bytes(field).expect(
"Failed to find bytes fast field reader. This is a bug in tantivy, please report.",
);
if let Some(delete_bitset) = reader.delete_bitset() {
for doc in 0u32..reader.max_doc() {
if delete_bitset.is_alive(doc) {
let num_vals = bytes_reader.get_bytes(doc).len() as u64;
total_num_vals += num_vals;
}
}
} else {
total_num_vals += bytes_reader.total_num_bytes() as u64;
}
bytes_readers.push(bytes_reader);
}
{
// We can now create our `idx` serializer, and in a second pass,
// can effectively push the different indexes.
let mut serialize_idx =
fast_field_serializer.new_u64_fast_field_with_idx(field, 0, total_num_vals, 0)?;
let mut idx = 0;
for (segment_reader, bytes_reader) in self.readers.iter().zip(&bytes_readers) {
for doc in segment_reader.doc_ids_alive() {
serialize_idx.add_val(idx)?;
idx += bytes_reader.get_bytes(doc).len() as u64;
}
}
serialize_idx.add_val(idx)?;
serialize_idx.close_field()?;
}
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field_with_idx(field, 1)?;
for reader in &self.readers {
let bytes_reader = reader.bytes_fast_field_reader(field)?;
for segment_reader in &self.readers {
let bytes_reader = segment_reader.fast_fields().bytes(field)
.expect("Failed to find bytes field in fast field reader. This is a bug in tantivy. Please report.");
// TODO: optimize if no deletes
for doc in reader.doc_ids_alive() {
let val = bytes_reader.get_val(doc);
for doc in segment_reader.doc_ids_alive() {
let val = bytes_reader.get_bytes(doc);
serialize_vals.write_all(val)?;
}
}
serialize_vals.flush()?;
Ok(())
}
@@ -979,14 +1024,16 @@ mod tests {
let score_field_reader = searcher
.segment_reader(0)
.fast_field_reader::<u64>(score_field)
.fast_fields()
.u64(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 4000);
assert_eq!(score_field_reader.max_value(), 7000);
let score_field_reader = searcher
.segment_reader(1)
.fast_field_reader::<u64>(score_field)
.fast_fields()
.u64(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 1);
assert_eq!(score_field_reader.max_value(), 3);
@@ -1037,7 +1084,8 @@ mod tests {
);
let score_field_reader = searcher
.segment_reader(0)
.fast_field_reader::<u64>(score_field)
.fast_fields()
.u64(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 3);
assert_eq!(score_field_reader.max_value(), 7000);
@@ -1083,7 +1131,8 @@ mod tests {
);
let score_field_reader = searcher
.segment_reader(0)
.fast_field_reader::<u64>(score_field)
.fast_fields()
.u64(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 3);
assert_eq!(score_field_reader.max_value(), 7000);
@@ -1135,7 +1184,8 @@ mod tests {
);
let score_field_reader = searcher
.segment_reader(0)
.fast_field_reader::<u64>(score_field)
.fast_fields()
.u64(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 6000);
assert_eq!(score_field_reader.max_value(), 7000);
@@ -1381,7 +1431,7 @@ mod tests {
{
let segment = searcher.segment_reader(0u32);
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[1, 2]);
@@ -1416,7 +1466,7 @@ mod tests {
{
let segment = searcher.segment_reader(1u32);
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[28, 27]);
@@ -1426,7 +1476,7 @@ mod tests {
{
let segment = searcher.segment_reader(2u32);
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[20]);
}
@@ -1459,7 +1509,7 @@ mod tests {
.collect::<Vec<_>>()
);
let segment = searcher.segment_reader(0u32);
let ff_reader = segment.multi_fast_field_reader(int_field).unwrap();
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[1, 2]);

View File

@@ -56,7 +56,7 @@ impl SegmentRegister {
.values()
.map(|segment_entry| segment_entry.meta().clone())
.collect();
segment_ids.sort_by_key(|meta| meta.id());
segment_ids.sort_by_key(SegmentMeta::id);
segment_ids
}

View File

@@ -5,6 +5,7 @@ use fastfield::FastFieldsWriter;
use fieldnorm::FieldNormsWriter;
use indexer::segment_serializer::SegmentSerializer;
use postings::MultiFieldPostingsWriter;
use schema::FieldEntry;
use schema::FieldType;
use schema::Schema;
use schema::Term;
@@ -53,7 +54,7 @@ impl SegmentWriter {
schema
.fields()
.iter()
.map(|field_entry| field_entry.field_type())
.map(FieldEntry::field_type)
.map(|field_type| match *field_type {
FieldType::Str(ref text_options) => text_options
.get_indexing_options()

View File

@@ -876,28 +876,28 @@ mod tests {
let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0);
{
let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(text_field);
assert!(fast_field_reader_res.is_err());
let fast_field_reader_opt = segment_reader.fast_fields().u64(text_field);
assert!(fast_field_reader_opt.is_none());
}
{
let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(stored_int_field);
assert!(fast_field_reader_res.is_err());
let fast_field_reader_opt = segment_reader.fast_fields().u64(stored_int_field);
assert!(fast_field_reader_opt.is_none());
}
{
let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(fast_field_signed);
assert!(fast_field_reader_res.is_err());
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_signed);
assert!(fast_field_reader_opt.is_none());
}
{
let fast_field_reader_res = segment_reader.fast_field_reader::<i64>(fast_field_signed);
assert!(fast_field_reader_res.is_ok());
let fast_field_reader = fast_field_reader_res.unwrap();
let fast_field_reader_opt = segment_reader.fast_fields().i64(fast_field_signed);
assert!(fast_field_reader_opt.is_some());
let fast_field_reader = fast_field_reader_opt.unwrap();
assert_eq!(fast_field_reader.get(0), 4i64)
}
{
let fast_field_reader_res = segment_reader.fast_field_reader::<i64>(fast_field_signed);
assert!(fast_field_reader_res.is_ok());
let fast_field_reader = fast_field_reader_res.unwrap();
let fast_field_reader_opt = segment_reader.fast_fields().i64(fast_field_signed);
assert!(fast_field_reader_opt.is_some());
let fast_field_reader = fast_field_reader_opt.unwrap();
assert_eq!(fast_field_reader.get(0), 4i64)
}
}

View File

@@ -214,6 +214,13 @@ pub trait PostingsWriter {
if token.text.len() <= MAX_TOKEN_LEN {
term.set_text(token.text.as_str());
self.subscribe(term_index, doc_id, token.position as u32, &term, heap);
} else {
info!(
"A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \
MAX_TOKEN_LEN in the documentation for more information.",
token.text.len(),
MAX_TOKEN_LEN
);
}
};
token_stream.process(&mut sink)

View File

@@ -175,7 +175,7 @@ impl<'a> FieldSerializer<'a> {
let positions_idx = self
.positions_serializer_opt
.as_ref()
.map(|positions_serializer| positions_serializer.positions_idx())
.map(PositionSerializer::positions_idx)
.unwrap_or(0u64);
TermInfo {
doc_freq: 0,

View File

@@ -214,6 +214,102 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
}
}
// `ahead` is assumed to be initialized (ahead.advance() has been called at least once,
// and this returned true).
//
// If behind is either uninitialized or `ahead.doc() > behind.doc()`.
fn next_in_intersection<'a, TScorer: Scorer>(
ahead: &'a mut TScorer,
behind: &'a mut TScorer,
) -> Option<DocId> {
let candidate = ahead.doc();
match behind.skip_next(candidate) {
SkipResult::Reached => Some(candidate),
SkipResult::OverStep => {
// yeah for tail-recursion
next_in_intersection(behind, ahead)
}
SkipResult::End => None,
}
}
enum SkipResultComplex {
Reached,
Overstep { other_ord: usize, candidate: DocId },
End,
}
fn skip_several_scorers<TDocSet: DocSet>(
others: &mut [TDocSet],
except_candidate_ord: usize,
target: DocId,
) -> SkipResultComplex {
for (ord, docset) in others.iter_mut().enumerate() {
// `candidate_ord` is already at the
// right position.
//
// Calling `skip_next` would advance this docset
// and miss it.
if ord == except_candidate_ord {
continue;
}
match docset.skip_next(target) {
SkipResult::Reached => {}
SkipResult::OverStep => {
return SkipResultComplex::Overstep {
other_ord: ord,
candidate: docset.doc(),
};
}
SkipResult::End => {
return SkipResultComplex::End;
}
}
}
SkipResultComplex::Reached
}
fn for_each<'a, TScorer: Scorer, TOtherscorer: Scorer>(
left: &'a mut TScorer,
right: &'a mut TScorer,
others: &'a mut [TOtherscorer],
callback: &mut FnMut(DocId, Score),
) {
let mut other_candidate_ord: usize = usize::max_value();
if !left.advance() {
return;
}
while let Some(candidate) = next_in_intersection(left, right) {
// test the remaining scorers
match skip_several_scorers(others, other_candidate_ord, candidate) {
SkipResultComplex::Reached => {
let intersection_score: Score = left.score()
+ right.score()
+ others.iter_mut().map(|other| other.score()).sum::<Score>();
callback(candidate, intersection_score);
if !left.advance() {
return;
}
}
SkipResultComplex::Overstep {
other_ord,
candidate,
} => match left.skip_next(candidate) {
SkipResult::End => {
return;
}
SkipResult::Reached => {
other_candidate_ord = other_ord;
}
SkipResult::OverStep => other_candidate_ord = usize::max_value(),
},
SkipResultComplex::End => {
return;
}
}
}
}
impl<TScorer, TOtherScorer> Scorer for Intersection<TScorer, TOtherScorer>
where
TScorer: Scorer,
@@ -224,6 +320,10 @@ where
+ self.right.score()
+ self.others.iter_mut().map(Scorer::score).sum::<Score>()
}
fn for_each(&mut self, callback: &mut FnMut(DocId, Score)) {
for_each(&mut self.left, &mut self.right, &mut self.others, callback);
}
}
#[cfg(test)]

View File

@@ -4,6 +4,7 @@ use error::TantivyError;
use query::bm25::BM25Weight;
use query::Query;
use query::Weight;
use schema::IndexRecordOption;
use schema::{Field, Term};
use std::collections::BTreeSet;
use Result;
@@ -83,7 +84,7 @@ impl Query for PhraseQuery {
let has_positions = field_entry
.field_type()
.get_index_record_option()
.map(|index_record_option| index_record_option.has_positions())
.map(IndexRecordOption::has_positions)
.unwrap_or(false);
if !has_positions {
let field_name = field_entry.name();

View File

@@ -1,6 +1,7 @@
#![cfg_attr(feature = "cargo-clippy", allow(clippy::unneeded_field_pattern))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::toplevel_ref_arg))]
use super::query_grammar;
use super::user_input_ast::*;
use combine::char::*;
use combine::error::StreamError;
@@ -22,7 +23,7 @@ parser! {
parser! {
fn word[I]()(I) -> String
where [I: Stream<Item = char>] {
many1(satisfy(|c: char| c.is_alphanumeric()))
many1(satisfy(char::is_alphanumeric))
.and_then(|s: String| {
match s.as_str() {
"OR" => Err(StreamErrorFor::<I>::unexpected_static_message("OR")),
@@ -62,7 +63,7 @@ parser! {
fn negative_number[I]()(I) -> String
where [I: Stream<Item = char>]
{
(char('-'), many1(satisfy(|c: char| c.is_numeric())))
(char('-'), many1(satisfy(char::is_numeric)))
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
}
}
@@ -184,7 +185,7 @@ parser! {
}
)
)
.map(|el| el.into_dnf())
.map(query_grammar::Element::into_dnf)
.map(|fnd| {
if fnd.len() == 1 {
UserInputAST::and(fnd.into_iter().next().unwrap()) //< safe

View File

@@ -16,6 +16,9 @@ pub trait Scorer: downcast_rs::Downcast + DocSet + 'static {
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
///
/// This method assumes that the Scorer is brand new, and `.advance()`
/// and `.skip()` haven't been called yet.
fn for_each(&mut self, callback: &mut FnMut(DocId, Score)) {
while self.advance() {
callback(self.doc(), self.score());

View File

@@ -96,7 +96,7 @@ fn refill<TScorer: Scorer, TScoreCombiner: ScoreCombiner>(
impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> Union<TScorer, TScoreCombiner> {
fn refill(&mut self) -> bool {
if let Some(min_doc) = self.docsets.iter_mut().map(|docset| docset.doc()).min() {
if let Some(min_doc) = self.docsets.iter().map(DocSet::doc).min() {
self.offset = min_doc;
self.cursor = 0;
refill(
@@ -260,6 +260,23 @@ where
fn score(&mut self) -> Score {
self.score
}
fn for_each(&mut self, callback: &mut FnMut(DocId, Score)) {
// TODO how do we deal with the fact that people may have called .advance() before.
while self.refill() {
let offset = self.offset;
for cursor in 0..HORIZON_NUM_TINYBITSETS {
while let Some(val) = self.bitsets[cursor].pop_lowest() {
let delta = val + (cursor as u32) * 64;
let doc = offset + delta;
let score_combiner = &mut self.scores[delta as usize];
let score = score_combiner.score();
callback(doc, score);
score_combiner.clear();
}
}
}
}
}
#[cfg(test)]

View File

@@ -128,7 +128,7 @@ impl Document {
self.field_values
.iter()
.filter(|field_value| field_value.field() == field)
.map(|field_value| field_value.value())
.map(FieldValue::value)
.collect()
}
@@ -137,7 +137,7 @@ impl Document {
self.field_values
.iter()
.find(|field_value| field_value.field() == field)
.map(|field_value| field_value.value())
.map(FieldValue::value)
}
}

View File

@@ -4,6 +4,7 @@ use schema::{IntOptions, TextOptions};
use schema::Facet;
use schema::IndexRecordOption;
use schema::TextFieldIndexing;
use schema::Value;
use serde_json::Value as JsonValue;
@@ -94,7 +95,7 @@ impl FieldType {
match *self {
FieldType::Str(ref text_options) => text_options
.get_indexing_options()
.map(|indexing_options| indexing_options.index_option()),
.map(TextFieldIndexing::index_option),
FieldType::U64(ref int_options)
| FieldType::I64(ref int_options)
| FieldType::Date(ref int_options) => {

View File

@@ -130,7 +130,16 @@ impl SchemaBuilder {
self.add_field(field_entry)
}
/// Adds a fast bytes field to the schema
/// Adds a fast bytes field to the schema.
///
/// Bytes field are not searchable and are only used
/// as fast field, to associate any kind of payload
/// to a document.
///
/// For instance, learning-to-rank often requires to access
/// some document features at scoring time.
/// These can be serializing and stored as a bytes field to
/// get access rapidly when scoring each document.
pub fn add_bytes_field(&mut self, field_name: &str) -> Field {
let field_entry = FieldEntry::new_bytes(field_name.to_string());
self.add_field(field_entry)
@@ -224,7 +233,7 @@ impl Schema {
let field_name = self.get_field_name(field);
let values: Vec<Value> = field_values
.into_iter()
.map(|field_val| field_val.value())
.map(FieldValue::value)
.cloned()
.collect();
field_map.insert(field_name.to_string(), values);

View File

@@ -1,6 +1,7 @@
use htmlescape::encode_minimal;
use query::Query;
use schema::Field;
use schema::Value;
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::collections::BTreeSet;
@@ -303,7 +304,7 @@ impl SnippetGenerator {
let text: String = doc
.get_all(self.field)
.into_iter()
.flat_map(|val| val.text())
.flat_map(Value::text)
.collect::<Vec<&str>>()
.join(" ");
self.snippet(&text)

View File

@@ -227,7 +227,7 @@ pub struct PerFieldSpaceUsage {
impl PerFieldSpaceUsage {
pub(crate) fn new(fields: HashMap<Field, FieldUsage>) -> PerFieldSpaceUsage {
let total = fields.values().map(|x| x.total()).sum();
let total = fields.values().map(FieldUsage::total).sum();
PerFieldSpaceUsage { fields, total }
}

File diff suppressed because it is too large Load Diff

View File

@@ -44,18 +44,17 @@ where
}
fn advance(&mut self) -> bool {
if self.tail.advance() {
if self.token_mut().text.is_ascii() {
// fast track for ascii.
self.token_mut().text.make_ascii_lowercase();
} else {
to_lowercase_unicode(&mut self.tail.token_mut().text, &mut self.buffer);
mem::swap(&mut self.tail.token_mut().text, &mut self.buffer);
}
true
} else {
false
if !self.tail.advance() {
return false;
}
if self.token_mut().text.is_ascii() {
// fast track for ascii.
self.token_mut().text.make_ascii_lowercase();
} else {
to_lowercase_unicode(&mut self.tail.token_mut().text, &mut self.buffer);
mem::swap(&mut self.tail.token_mut().text, &mut self.buffer);
}
true
}
}

View File

@@ -131,6 +131,7 @@
//! ```
//!
mod alphanum_only;
mod ascii_folding_filter;
mod facet_tokenizer;
mod lower_caser;
mod ngram_tokenizer;
@@ -144,6 +145,7 @@ mod tokenizer;
mod tokenizer_manager;
pub use self::alphanum_only::AlphaNumOnlyFilter;
pub use self::ascii_folding_filter::AsciiFoldingFilter;
pub use self::facet_tokenizer::FacetTokenizer;
pub use self::lower_caser::LowerCaser;
pub use self::ngram_tokenizer::NgramTokenizer;

View File

@@ -29,12 +29,9 @@ impl<'a> Tokenizer<'a> for RawTokenizer {
impl TokenStream for RawTokenStream {
fn advance(&mut self) -> bool {
if self.has_token {
self.has_token = false;
true
} else {
false
}
let result = self.has_token;
self.has_token = false;
result
}
fn token(&self) -> &Token {

View File

@@ -91,7 +91,6 @@ where
return true;
}
}
false
}
}

View File

@@ -38,23 +38,16 @@ impl<'a> TokenStream for SimpleTokenStream<'a> {
fn advance(&mut self) -> bool {
self.token.text.clear();
self.token.position = self.token.position.wrapping_add(1);
loop {
match self.chars.next() {
Some((offset_from, c)) => {
if c.is_alphanumeric() {
let offset_to = self.search_token_end();
self.token.offset_from = offset_from;
self.token.offset_to = offset_to;
self.token.text.push_str(&self.text[offset_from..offset_to]);
return true;
}
}
None => {
return false;
}
while let Some((offset_from, c)) = self.chars.next() {
if c.is_alphanumeric() {
let offset_to = self.search_token_end();
self.token.offset_from = offset_from;
self.token.offset_to = offset_to;
self.token.text.push_str(&self.text[offset_from..offset_to]);
return true;
}
}
false
}
fn token(&self) -> &Token {

View File

@@ -108,15 +108,14 @@ where
}
fn advance(&mut self) -> bool {
if self.tail.advance() {
// TODO remove allocation
let stemmed_str: String = self.stemmer.stem(&self.token().text).into_owned();
self.token_mut().text.clear();
self.token_mut().text.push_str(&stemmed_str);
true
} else {
false
if !self.tail.advance() {
return false;
}
// TODO remove allocation
let stemmed_str: String = self.stemmer.stem(&self.token().text).into_owned();
self.token_mut().text.clear();
self.token_mut().text.push_str(&stemmed_str);
true
}
}

View File

@@ -104,7 +104,6 @@ where
return true;
}
}
false
}
}

View File

@@ -1,4 +1,5 @@
use std::collections::HashMap;
use std::ops::Deref;
use std::sync::{Arc, RwLock};
use tokenizer::box_tokenizer;
use tokenizer::stemmer::Language;
@@ -46,7 +47,8 @@ impl TokenizerManager {
.read()
.expect("Acquiring the lock should never fail")
.get(tokenizer_name)
.map(|boxed_tokenizer| boxed_tokenizer.boxed_clone())
.map(Deref::deref)
.map(BoxedTokenizer::boxed_clone)
}
}