Compare commits

...

7 Commits
0.5.0 ... 0.5.1

Author SHA1 Message Date
Paul Masurel
97b7984200 Updated CHANGELOG 2018-03-10 14:08:11 +09:00
Paul Masurel
8683718159 Version bump 2018-03-10 14:01:30 +09:00
Paul Masurel
0cf274135b Clippy 2018-03-10 13:07:18 +09:00
Paul Masurel
a3b44773bb Bugfix and rustfmt 2018-03-10 12:21:50 +09:00
Paul Masurel
ec7c582109 NOBUG no-simd compression fix 2018-03-09 14:19:58 +09:00
Ewan Higgs
ee7ab72fb1 Support trailing commas using ',+ ,' trick from Blandy 2017. (#250) 2018-02-27 10:33:39 +09:00
Dylan DPC
e82859f2e6 Update Cargo.toml (#249) 2018-02-24 09:17:33 +09:00
33 changed files with 216 additions and 117 deletions

View File

@@ -1,3 +1,8 @@
Tantivy 0.5.1
==========================
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
Tantivy 0.5
==========================
- Faceting

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy"
version = "0.5.0"
version = "0.5.1"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
build = "build.rs"
license = "MIT"
@@ -32,7 +32,7 @@ itertools = "0.5.9"
lz4 = "1.20"
bit-set = "0.4.0"
time = "0.1"
uuid = { version = "0.5", features = ["v4", "serde"] }
uuid = { version = "0.6", features = ["v4", "serde"] }
chan = "0.1"
crossbeam = "0.3"
futures = "0.1"

View File

@@ -60,7 +60,7 @@ impl TopCollector {
panic!("Limit must be strictly greater than 0.");
}
TopCollector {
limit: limit,
limit,
heap: BinaryHeap::with_capacity(limit),
segment_id: 0,
}
@@ -119,7 +119,7 @@ impl Collector for TopCollector {
}
} else {
let wrapped_doc = GlobalScoredDoc {
score: score,
score,
doc_address: DocAddress(self.segment_id, doc),
};
self.heap.push(wrapped_doc);

View File

@@ -16,10 +16,7 @@ pub struct FileAddr {
impl FileAddr {
fn new(field: Field, idx: usize) -> FileAddr {
FileAddr {
field: field,
idx: idx,
}
FileAddr { field, idx }
}
}
@@ -34,8 +31,8 @@ impl BinarySerializable for FileAddr {
let field = Field::deserialize(reader)?;
let idx = VInt::deserialize(reader)?.0 as usize;
Ok(FileAddr {
field: field,
idx: idx,
field,
idx,
})
}
}
@@ -169,10 +166,7 @@ impl CompositeFile {
/// to a given `Field` and stored in a `CompositeFile`.
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
self.offsets_index
.get(&FileAddr {
field: field,
idx: idx,
})
.get(&FileAddr { field, idx, })
.map(|&(from, to)| self.data.slice(from, to))
}
}

View File

@@ -14,7 +14,7 @@ impl<'a> OpenTimer<'a> {
/// when the `OpenTimer` is dropped.
pub fn open(&mut self, name: &'static str) -> OpenTimer {
OpenTimer {
name: name,
name,
timer_tree: self.timer_tree,
start: PreciseTime::now(),
depth: self.depth + 1,
@@ -58,7 +58,7 @@ impl TimerTree {
/// Open a new named subtask
pub fn open(&mut self, name: &'static str) -> OpenTimer {
OpenTimer {
name: name,
name,
timer_tree: self,
start: PreciseTime::now(),
depth: 0,

View File

@@ -1,4 +1,4 @@
use common::bitpacker::compute_num_bits;
use common::compute_num_bits;
use common::bitpacker::{BitPacker, BitUnpacker};
use common::CountingWriter;
use std::cmp;
@@ -30,7 +30,7 @@ pub fn compress_sorted(vals: &mut [u32], output: &mut [u8], offset: u32) -> usiz
.unwrap();
}
let compressed_size = counting_writer.written_bytes();
assert_eq!(compressed_size, compute_block_size(num_bits));
assert_eq!(compressed_size, compressed_block_size(num_bits));
compressed_size
}
@@ -112,14 +112,14 @@ impl BlockDecoder {
) -> usize {
let consumed_size = {
let num_bits = compressed_data[0];
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits as usize);
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits);
for i in 0..COMPRESSION_BLOCK_SIZE {
let delta = bit_unpacker.get(i);
let val = offset + delta as u32;
self.output[i] = val;
offset = val;
}
compute_block_size(num_bits)
compressed_block_size(num_bits)
};
self.output_len = COMPRESSION_BLOCK_SIZE;
consumed_size
@@ -127,7 +127,7 @@ impl BlockDecoder {
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> usize {
let num_bits = compressed_data[0];
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits as usize);
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits);
for i in 0..COMPRESSION_BLOCK_SIZE {
self.output[i] = bit_unpacker.get(i) as u32;
}

View File

@@ -33,7 +33,12 @@ impl IndexMeta {
impl fmt::Debug for IndexMeta {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", serde_json::ser::to_string(self).expect("JSON serialization for IndexMeta should never fail."))
write!(
f,
"{}",
serde_json::ser::to_string(self)
.expect("JSON serialization for IndexMeta should never fail.")
)
}
}

View File

@@ -7,6 +7,7 @@ use schema::Term;
use fastfield::DeleteBitSet;
use compression::CompressedIntStream;
use postings::FreqReadingOption;
use schema::FieldType;
/// The inverted index reader is in charge of accessing
/// the inverted index associated to a specific field.
@@ -31,14 +32,14 @@ pub struct InvertedIndexReader {
impl InvertedIndexReader {
pub(crate) fn new(
termdict_source: ReadOnlySource,
termdict: TermDictionaryImpl,
postings_source: ReadOnlySource,
positions_source: ReadOnlySource,
delete_bitset: DeleteBitSet,
record_option: IndexRecordOption,
) -> InvertedIndexReader {
InvertedIndexReader {
termdict: TermDictionaryImpl::from_source(termdict_source),
termdict,
postings_source,
positions_source,
delete_bitset,
@@ -46,6 +47,21 @@ impl InvertedIndexReader {
}
}
/// Creates an empty `InvertedIndexReader` object, which
/// contains no terms at all.
pub fn empty(field_type: FieldType) -> InvertedIndexReader {
let record_option = field_type
.get_index_record_option()
.unwrap_or(IndexRecordOption::Basic);
InvertedIndexReader::new(
TermDictionaryImpl::empty(field_type),
ReadOnlySource::empty(),
ReadOnlySource::empty(),
DeleteBitSet::empty(),
record_option,
)
}
/// Returns the term info associated with the term.
pub fn get_term_info(&self, term: &Term) -> Option<TermInfo> {
self.termdict.get(term.value_bytes())

View File

@@ -8,7 +8,6 @@ use core::SegmentMeta;
use fastfield::{self, FastFieldNotAvailableError};
use fastfield::DeleteBitSet;
use store::StoreReader;
use directory::ReadOnlySource;
use schema::Document;
use DocId;
use std::sync::Arc;
@@ -97,7 +96,8 @@ impl SegmentReader {
field: Field,
) -> fastfield::Result<FastFieldReader<Item>> {
let field_entry = self.schema.get_field_entry(field);
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::SingleValue) {
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::SingleValue)
{
self.fast_fields_composite
.open_read(field)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
@@ -109,9 +109,13 @@ impl SegmentReader {
/// Accessor to the `MultiValueIntFastFieldReader` associated to a given `Field`.
/// May panick if the field is not a multivalued fastfield of the type `Item`.
pub fn multi_fast_field_reader<Item: FastValue>(&self, field: Field) -> fastfield::Result<MultiValueIntFastFieldReader<Item>> {
pub fn multi_fast_field_reader<Item: FastValue>(
&self,
field: Field,
) -> fastfield::Result<MultiValueIntFastFieldReader<Item>> {
let field_entry = self.schema.get_field_entry(field);
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::MultiValues) {
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::MultiValues)
{
let idx_reader = self.fast_fields_composite
.open_read_with_idx(field, 0)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
@@ -218,6 +222,8 @@ impl SegmentReader {
}
/// Returns a field reader associated to the field given in argument.
/// If the field was not present in the index during indexing time,
/// the InvertedIndexReader is empty.
///
/// The field reader is in charge of iterating through the
/// term dictionary associated to a specific field,
@@ -230,27 +236,38 @@ impl SegmentReader {
{
return Arc::clone(inv_idx_reader);
}
let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type();
let record_option_opt = field_type.get_index_record_option();
let record_option = self.schema
.get_field_entry(field)
.field_type()
.get_index_record_option()
.expect("Field does not seem indexed.");
if record_option_opt.is_none() {
panic!("Field {:?} does not seem indexed.", field_entry.name());
}
let termdict_source: ReadOnlySource = self.termdict_composite
let record_option = record_option_opt.unwrap();
let postings_source_opt = self.postings_composite.open_read(field);
if postings_source_opt.is_none() {
// no documents in the segment contained this field.
// As a result, no data is associated to the inverted index.
//
// Returns an empty inverted index.
return Arc::new(InvertedIndexReader::empty(field_type.clone()));
}
let postings_source = postings_source_opt.unwrap();
let termdict_source = self.termdict_composite
.open_read(field)
.expect("Failed to open field term dictionary in composite file. Is the field indexed");
let postings_source = self.postings_composite
.open_read(field)
.expect("Index corrupted. Failed to open field postings in composite file.");
let positions_source = self.positions_composite
.open_read(field)
.expect("Index corrupted. Failed to open field positions in composite file.");
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
termdict_source,
TermDictionaryImpl::from_source(termdict_source),
postings_source,
positions_source,
self.delete_bitset.clone(),

View File

@@ -118,9 +118,9 @@ struct QuadraticProbing {
impl QuadraticProbing {
fn compute(hash: usize, mask: usize) -> QuadraticProbing {
QuadraticProbing {
hash: hash,
hash,
i: 0,
mask: mask,
mask,
}
}
@@ -137,7 +137,7 @@ impl<'a> TermHashMap<'a> {
let table: Vec<KeyValue> = iter::repeat(KeyValue::default()).take(table_size).collect();
TermHashMap {
table: table.into_boxed_slice(),
heap: heap,
heap,
mask: table_size - 1,
occupied: Vec::with_capacity(table_size / 2),
}
@@ -158,11 +158,10 @@ impl<'a> TermHashMap<'a> {
(key_bytes, expull_addr)
}
pub fn set_bucket(&mut self, hash: u32, key_bytes_ref: BytesRef, bucket: usize) {
pub fn set_bucket(&mut self, hash: u32, key_value_addr: BytesRef, bucket: usize) {
self.occupied.push(bucket);
self.table[bucket] = KeyValue {
key_value_addr: key_bytes_ref,
hash: hash,
key_value_addr, hash
};
}
@@ -193,7 +192,10 @@ impl<'a> TermHashMap<'a> {
} else if kv.hash == hash {
let (stored_key, expull_addr): (&[u8], u32) = self.get_key_value(kv.key_value_addr);
if stored_key == key_bytes {
return (bucket as UnorderedTermId, self.heap.get_mut_ref(expull_addr));
return (
bucket as UnorderedTermId,
self.heap.get_mut_ref(expull_addr),
);
}
}
}

View File

@@ -106,7 +106,6 @@ pub trait DocSet {
}
}
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
fn advance(&mut self) -> bool {
let unboxed: &mut TDocSet = self.borrow_mut();
@@ -133,11 +132,8 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
unboxed.count()
}
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
let unboxed: &mut TDocSet = self.borrow_mut();
unboxed.append_to_bitset(bitset);
}
}

View File

@@ -36,7 +36,7 @@ impl FacetReader {
) -> FacetReader {
FacetReader {
term_ords,
term_dict
term_dict,
}
}

View File

@@ -67,7 +67,6 @@ pub trait FastValue: Default + Clone + Copy {
fn as_u64(&self) -> u64;
}
impl FastValue for u64 {
fn from_u64(val: u64) -> Self {
val
@@ -83,10 +82,8 @@ impl FastValue for u64 {
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::U64(ref integer_options) =>
integer_options.get_fastfield_cardinality(),
FieldType::HierarchicalFacet =>
Some(Cardinality::MultiValues),
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
FieldType::HierarchicalFacet => Some(Cardinality::MultiValues),
_ => None,
}
}
@@ -101,11 +98,9 @@ impl FastValue for i64 {
common::i64_to_u64(*self)
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::I64(ref integer_options) =>
integer_options.get_fastfield_cardinality(),
FieldType::I64(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
@@ -123,7 +118,6 @@ fn value_to_u64(value: &Value) -> u64 {
}
}
#[cfg(test)]
mod tests {

View File

@@ -17,7 +17,7 @@ mod tests {
let mut schema_builder = SchemaBuilder::default();
let field = schema_builder.add_u64_field(
"multifield",
IntOptions::default().set_fast(Cardinality::MultiValues)
IntOptions::default().set_fast(Cardinality::MultiValues),
);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -47,13 +47,12 @@ mod tests {
}
}
#[test]
fn test_multivalued_i64() {
let mut schema_builder = SchemaBuilder::default();
let field = schema_builder.add_i64_field(
"multifield",
IntOptions::default().set_fast(Cardinality::MultiValues)
IntOptions::default().set_fast(Cardinality::MultiValues),
);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -86,4 +85,4 @@ mod tests {
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
}
}
}
}

View File

@@ -1,7 +1,6 @@
use DocId;
use fastfield::{FastFieldReader, FastValue};
/// Reader for a multivalued `u64` fast field.
///
/// The reader is implemented as two `u64` fast field.
@@ -13,7 +12,7 @@ use fastfield::{FastFieldReader, FastValue};
#[derive(Clone)]
pub struct MultiValueIntFastFieldReader<Item: FastValue> {
idx_reader: FastFieldReader<u64>,
vals_reader: FastFieldReader<Item>
vals_reader: FastFieldReader<Item>,
}
impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
@@ -23,7 +22,7 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
) -> MultiValueIntFastFieldReader<Item> {
MultiValueIntFastFieldReader {
idx_reader,
vals_reader
vals_reader,
}
}

View File

@@ -7,12 +7,11 @@ use schema::{Document, Field};
use std::io;
use itertools::Itertools;
pub struct MultiValueIntFastFieldWriter {
field: Field,
vals: Vec<u64>,
doc_index: Vec<u64>,
is_facet: bool
is_facet: bool,
}
impl MultiValueIntFastFieldWriter {
@@ -22,7 +21,7 @@ impl MultiValueIntFastFieldWriter {
field,
vals: Vec::new(),
doc_index: Vec::new(),
is_facet
is_facet,
}
}
@@ -51,7 +50,6 @@ impl MultiValueIntFastFieldWriter {
}
}
}
}
/// Serializes fast field values by pushing them to the `FastFieldSerializer`.
@@ -85,8 +83,12 @@ impl MultiValueIntFastFieldWriter {
let mut value_serializer: FastSingleFieldSerializer<_>;
match mapping_opt {
Some(mapping) => {
value_serializer =
serializer.new_u64_fast_field_with_idx(self.field, 0u64, mapping.len() as u64, 1)?;
value_serializer = serializer.new_u64_fast_field_with_idx(
self.field,
0u64,
mapping.len() as u64,
1,
)?;
for val in &self.vals {
let remapped_val = *mapping.get(val).expect("Missing term ordinal") as u64;
value_serializer.add_val(remapped_val)?;

View File

@@ -24,11 +24,10 @@ pub struct FastFieldReader<Item: FastValue> {
bit_unpacker: BitUnpacker<OwningRef<ReadOnlySource, [u8]>>,
min_value_u64: u64,
max_value_u64: u64,
_phantom: PhantomData<Item>
_phantom: PhantomData<Item>,
}
impl<Item: FastValue> FastFieldReader<Item> {
/// Opens a fast field given a source.
pub fn open(data: ReadOnlySource) -> Self {
let min_value: u64;
@@ -48,11 +47,10 @@ impl<Item: FastValue> FastFieldReader<Item> {
min_value_u64: min_value,
max_value_u64: max_value,
bit_unpacker,
_phantom: PhantomData
_phantom: PhantomData,
}
}
/// Return the value associated to the given document.
///
/// This accessor should return as fast as possible.
@@ -73,7 +71,7 @@ impl<Item: FastValue> FastFieldReader<Item> {
///
/// May panic if `start + output.len()` is greater than
/// the segment's `maxdoc`.
pub fn get_range(&self, start: u32, output: &mut [Item]) {
pub fn get_range(&self, start: u32, output: &mut [Item]) {
let output_u64: &mut [u64] = unsafe { mem::transmute(output) };
self.bit_unpacker.get_range(start, output_u64);
for out in output_u64.iter_mut() {
@@ -137,4 +135,3 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
FastFieldReader::open(field_source)
}
}

View File

@@ -36,9 +36,7 @@ impl FastFieldSerializer {
pub fn from_write(write: WritePtr) -> io::Result<FastFieldSerializer> {
// just making room for the pointer to header.
let composite_write = CompositeWrite::wrap(write);
Ok(FastFieldSerializer {
composite_write
})
Ok(FastFieldSerializer { composite_write })
}
/// Start serializing a new u64 fast field

View File

@@ -53,7 +53,7 @@ impl FastFieldsWriter {
}
FastFieldsWriter {
single_value_writers,
multi_values_writers
multi_values_writers,
}
}

View File

@@ -11,9 +11,9 @@ pub struct PreparedCommit<'a> {
impl<'a> PreparedCommit<'a> {
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: u64) -> PreparedCommit {
PreparedCommit {
index_writer: index_writer,
index_writer,
payload: None,
opstamp: opstamp,
opstamp
}
}

View File

@@ -645,6 +645,22 @@ mod tests {
assert!(!postings.advance());
}
#[test]
fn test_indexedfield_not_in_documents() {
let mut schema_builder = SchemaBuilder::default();
let text_field = schema_builder.add_text_field("text", TEXT);
let absent_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(2, 40_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a"));
assert!(index_writer.commit().is_ok());
assert!(index.load_searchers().is_ok());
let searcher = index.searcher();
let segment_reader = searcher.segment_reader(0);
segment_reader.inverted_index(absent_field); //< should not panic
}
#[test]
fn test_delete_postings2() {
let mut schema_builder = SchemaBuilder::default();
@@ -859,31 +875,26 @@ mod tests {
let searcher = index.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0);
{
let fast_field_reader_res =
segment_reader.fast_field_reader::<u64>(text_field);
let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(text_field);
assert!(fast_field_reader_res.is_err());
}
{
let fast_field_reader_res =
segment_reader.fast_field_reader::<u64>(stored_int_field);
let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(stored_int_field);
assert!(fast_field_reader_res.is_err());
}
{
let fast_field_reader_res =
segment_reader.fast_field_reader::<u64>(fast_field_signed);
let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(fast_field_signed);
assert!(fast_field_reader_res.is_err());
}
{
let fast_field_reader_res =
segment_reader.fast_field_reader::<i64>(fast_field_signed);
let fast_field_reader_res = segment_reader.fast_field_reader::<i64>(fast_field_signed);
assert!(fast_field_reader_res.is_ok());
let fast_field_reader = fast_field_reader_res.unwrap();
assert_eq!(fast_field_reader.get(0), 4i64)
}
{
let fast_field_reader_res =
segment_reader.fast_field_reader::<i64>(fast_field_signed);
let fast_field_reader_res = segment_reader.fast_field_reader::<i64>(fast_field_signed);
assert!(fast_field_reader_res.is_ok());
let fast_field_reader = fast_field_reader_res.unwrap();
assert_eq!(fast_field_reader.get(0), 4i64)

View File

@@ -63,4 +63,41 @@ macro_rules! doc(
document
}
};
// if there is a trailing comma retry with the trailing comma stripped.
($($field:expr => $value:expr),+ ,) => {
doc!( $( $field => $value ), *);
};
);
#[cfg(test)]
mod test {
use schema::{SchemaBuilder, FAST, TEXT};
#[test]
fn test_doc_basic() {
let mut schema_builder = SchemaBuilder::new();
let title = schema_builder.add_text_field("title", TEXT);
let author = schema_builder.add_text_field("text", TEXT);
let likes = schema_builder.add_u64_field("num_u64", FAST);
let _schema = schema_builder.build();
let _doc = doc!(
title => "Life Aquatic",
author => "Wes Anderson",
likes => 4u64
);
}
#[test]
fn test_doc_trailing_comma() {
let mut schema_builder = SchemaBuilder::new();
let title = schema_builder.add_text_field("title", TEXT);
let author = schema_builder.add_text_field("text", TEXT);
let likes = schema_builder.add_u64_field("num_u64", FAST);
let _schema = schema_builder.build();
let _doc = doc!(
title => "Life Aquatic",
author => "Wes Anderson",
likes => 4u64,
);
}
}

View File

@@ -21,8 +21,6 @@ mod tests {
use query::RequiredOptionalScorer;
use query::score_combiner::SumWithCoordsCombiner;
fn aux_test_helper() -> (Index, Field) {
let mut schema_builder = SchemaBuilder::default();
let text_field = schema_builder.add_text_field("text", TEXT);
@@ -104,7 +102,9 @@ mod tests {
let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&*searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
assert!(Downcast::<RequiredOptionalScorer<Box<Scorer>, Box<Scorer>, SumWithCoordsCombiner>>::is_type(&*scorer));
assert!(Downcast::<
RequiredOptionalScorer<Box<Scorer>, Box<Scorer>, SumWithCoordsCombiner>,
>::is_type(&*scorer));
}
{
let query = query_parser.parse_query("+a b").unwrap();
@@ -116,7 +116,6 @@ mod tests {
#[test]
pub fn test_boolean_query() {
let (index, text_field) = aux_test_helper();
let make_term_query = |text: &str| {

View File

@@ -41,7 +41,8 @@ fn leaf<I>(input: I) -> ParseResult<UserInputAST, I>
where
I: Stream<Item = char>,
{
(char('-'), parser(leaf)).map(|(_, expr)| UserInputAST::Not(box expr))
(char('-'), parser(leaf))
.map(|(_, expr)| UserInputAST::Not(box expr))
.or((char('+'), parser(leaf)).map(|(_, expr)| UserInputAST::Must(box expr)))
.or((char('('), parser(parse_to_ast), char(')')).map(|(_, expr, _)| expr))
.or(parser(literal))

View File

@@ -155,7 +155,7 @@ impl QueryParser {
fn compute_logical_ast_for_leaf(
&self,
field: Field,
phrase: &str
phrase: &str,
) -> Result<Option<LogicalLiteral>, QueryParserError> {
let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type();
@@ -328,7 +328,7 @@ mod test {
use tokenizer::TokenizerManager;
use query::Query;
use schema::Field;
use schema::{TextOptions, TextFieldIndexing, IndexRecordOption};
use schema::{IndexRecordOption, TextFieldIndexing, TextOptions};
use super::QueryParser;
use super::QueryParserError;
use Index;
@@ -538,7 +538,9 @@ mod test {
let title = schema_builder.add_text_field("title", text_options);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
index.tokenizers().register("customtokenizer", SimpleTokenizer);
index
.tokenizers()
.register("customtokenizer", SimpleTokenizer);
let query_parser = QueryParser::for_index(&index, vec![title]);
assert!(query_parser.parse_query("title:\"happy tax\"").is_ok());
}

View File

@@ -51,8 +51,10 @@ impl DocSet for EmptyScorer {
}
fn doc(&self) -> DocId {
panic!("You may not call .doc() on a scorer \
where the last call to advance() did not return true.");
panic!(
"You may not call .doc() on a scorer \
where the last call to advance() did not return true."
);
}
fn size_hint(&self) -> u32 {

View File

@@ -110,7 +110,9 @@ impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> Union<TScorer, TScoreCombin
}
impl<TScorer, TScoreCombiner> DocSet for Union<TScorer, TScoreCombiner>
where TScorer: Scorer, TScoreCombiner: ScoreCombiner
where
TScorer: Scorer,
TScoreCombiner: ScoreCombiner,
{
fn advance(&mut self) -> bool {
if self.advance_buffered() {

View File

@@ -123,7 +123,6 @@ impl Default for SchemaBuilder {
}
}
struct InnerSchema {
fields: Vec<FieldEntry>,
fields_map: HashMap<String, Field>, // transient
@@ -243,7 +242,6 @@ impl Schema {
}
}
impl Serialize for Schema {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@@ -292,7 +290,6 @@ impl<'de> Deserialize<'de> for Schema {
}
}
/// Error that may happen when deserializing
/// a document from JSON.
#[derive(Debug)]
@@ -467,7 +464,10 @@ mod tests {
"jambon": "bayonne"
}"#,
);
assert_matches!(json_err, Err(DocParsingError::ValueError(_, ValueParsingError::TypeError(_))));
assert_matches!(
json_err,
Err(DocParsingError::ValueError(_, ValueParsingError::TypeError(_)))
);
}
{
let json_err = schema.parse_document(
@@ -478,7 +478,10 @@ mod tests {
"popularity": 10
}"#,
);
assert_matches!(json_err, Err(DocParsingError::ValueError(_, ValueParsingError::OverflowError(_))));
assert_matches!(
json_err,
Err(DocParsingError::ValueError(_, ValueParsingError::OverflowError(_)))
);
}
{
let json_err = schema.parse_document(
@@ -489,7 +492,10 @@ mod tests {
"popularity": 10
}"#,
);
assert!(!matches!(json_err, Err(DocParsingError::ValueError(_, ValueParsingError::OverflowError(_)))));
assert!(!matches!(
json_err,
Err(DocParsingError::ValueError(_, ValueParsingError::OverflowError(_)))
));
}
{
let json_err = schema.parse_document(
@@ -500,7 +506,10 @@ mod tests {
"popularity": 9223372036854775808
}"#,
);
assert_matches!(json_err, Err(DocParsingError::ValueError(_, ValueParsingError::OverflowError(_))));
assert_matches!(
json_err,
Err(DocParsingError::ValueError(_, ValueParsingError::OverflowError(_)))
);
}
{
let json_err = schema.parse_document(

View File

@@ -13,8 +13,8 @@ pub struct TermStreamerBuilderImpl<'a> {
impl<'a> TermStreamerBuilderImpl<'a> {
pub(crate) fn new(fst_map: &'a TermDictionaryImpl, stream_builder: StreamBuilder<'a>) -> Self {
TermStreamerBuilderImpl {
fst_map: fst_map,
stream_builder: stream_builder,
fst_map,
stream_builder,
}
}
}

View File

@@ -1,4 +1,3 @@
use std::io;
use std::cmp;
use std::io::{Read, Write};

View File

@@ -120,6 +120,16 @@ impl<'a> TermDictionary<'a> for TermDictionaryImpl {
}
}
fn empty(field_type: FieldType) -> Self {
let term_dictionary_data: Vec<u8> =
TermDictionaryBuilderImpl::new(Vec::<u8>::new(), field_type)
.expect("Creating a TermDictionaryBuilder in a Vec<u8> should never fail")
.finish()
.expect("Writing in a Vec<u8> should never fail");
let source = ReadOnlySource::from(term_dictionary_data);
Self::from_source(source)
}
fn num_terms(&self) -> usize {
self.term_info_store.num_terms()
}

View File

@@ -127,6 +127,9 @@ where
.lt(stop_term.as_slice())
.into_stream()
}
/// Creates an empty term dictionary which contains no terms.
fn empty(field_type: FieldType) -> Self;
}
/// Builder for the new term dictionary.

View File

@@ -31,7 +31,7 @@ impl<'a> Tokenizer<'a> for FacetTokenizer {
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
FacetTokenStream {
text: text,
text,
state: State::RootFacetNotEmitted, //< pos is the first char that has not been processed yet.
token: Token::default(),
}