Tiqb feature/2018 (#583)

* rust 2018

* Added CHANGELOG comment
This commit is contained in:
Paul Masurel
2019-07-01 10:01:46 +09:00
committed by GitHub
parent 185a5b8d31
commit 462774b15c
161 changed files with 1301 additions and 1318 deletions

View File

@@ -11,6 +11,7 @@ Tantivy 0.10.0
Minor
---------
- Switched to Rust 2018 (@uvd)
- Small simplification of the code.
Calling .freq() or .doc() when .advance() has never been called
on segment postings should panic from now on.

View File

@@ -10,6 +10,7 @@ homepage = "https://github.com/tantivy-search/tantivy"
repository = "https://github.com/tantivy-search/tantivy"
readme = "README.md"
keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies]
base64 = "0.10.0"

View File

@@ -10,8 +10,6 @@
// - search for the best document matchings "sea whale"
// - retrieve the best document original content.
extern crate tempdir;
// ---
// Importing tantivy...
#[macro_use]

View File

@@ -7,8 +7,6 @@
// Of course, you can have a look at the tantivy's built-in collectors
// such as the `CountCollector` for more examples.
extern crate tempdir;
// ---
// Importing tantivy...
#[macro_use]

View File

@@ -10,8 +10,6 @@
// - search for the best document matchings "sea whale"
// - retrieve the best document original content.
extern crate tempdir;
// ---
// Importing tantivy...
#[macro_use]

View File

@@ -23,8 +23,6 @@
// index a single document?), but aims at demonstrating the mechanism that makes indexing
// from several threads possible.
extern crate tempdir;
// ---
// Importing tantivy...
#[macro_use]

View File

@@ -4,7 +4,6 @@
// your hit result.
// Snippet are an extracted of a target document, and returned in HTML format.
// The keyword searched by the user are highlighted with a `<b>` tag.
extern crate tempdir;
// ---
// Importing tantivy...

View File

@@ -9,8 +9,6 @@
// - add a few stop words
// - index few documents in our index
extern crate tempdir;
// ---
// Importing tantivy...
#[macro_use]

View File

@@ -1,4 +1,4 @@
extern crate tantivy;
use tantivy;
use tantivy::schema::*;
// # Document from json

View File

@@ -1,10 +1,10 @@
use super::Collector;
use collector::SegmentCollector;
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
use crate::collector::SegmentCollector;
use crate::DocId;
use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
/// `CountCollector` collector only counts how many
/// documents match the query.
@@ -94,8 +94,8 @@ impl SegmentCollector for SegmentCountCollector {
#[cfg(test)]
mod tests {
use super::{Count, SegmentCountCollector};
use collector::Collector;
use collector::SegmentCollector;
use crate::collector::Collector;
use crate::collector::SegmentCollector;
#[test]
fn test_count_collect_does_not_requires_scoring() {

View File

@@ -1,9 +1,15 @@
use collector::Collector;
use collector::SegmentCollector;
use docset::SkipResult;
use fastfield::FacetReader;
use schema::Facet;
use schema::Field;
use crate::collector::Collector;
use crate::collector::SegmentCollector;
use crate::docset::SkipResult;
use crate::fastfield::FacetReader;
use crate::schema::Facet;
use crate::schema::Field;
use crate::DocId;
use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
use crate::TantivyError;
use std::cmp::Ordering;
use std::collections::btree_map;
use std::collections::BTreeMap;
@@ -12,12 +18,6 @@ use std::collections::BinaryHeap;
use std::collections::Bound;
use std::iter::Peekable;
use std::{u64, usize};
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
use TantivyError;
struct Hit<'a> {
count: u64,
@@ -27,13 +27,13 @@ struct Hit<'a> {
impl<'a> Eq for Hit<'a> {}
impl<'a> PartialEq<Hit<'a>> for Hit<'a> {
fn eq(&self, other: &Hit) -> bool {
fn eq(&self, other: &Hit<'_>) -> bool {
self.count == other.count
}
}
impl<'a> PartialOrd<Hit<'a>> for Hit<'a> {
fn partial_cmp(&self, other: &Hit) -> Option<Ordering> {
fn partial_cmp(&self, other: &Hit<'_>) -> Option<Ordering> {
Some(self.cmp(other))
}
}
@@ -398,7 +398,7 @@ impl<'a> Iterator for FacetChildIterator<'a> {
}
impl FacetCounts {
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
where
Facet: From<T>,
{
@@ -412,7 +412,8 @@ impl FacetCounts {
let facet_after = Facet::from_encoded_string(facet_after_bytes);
Bound::Excluded(facet_after)
};
let underlying: btree_map::Range<_, _> = self.facet_counts.range((left_bound, right_bound));
let underlying: btree_map::Range<'_, _, _> =
self.facet_counts.range((left_bound, right_bound));
FacetChildIterator { underlying }
}
@@ -453,12 +454,12 @@ impl FacetCounts {
#[cfg(test)]
mod tests {
use super::{FacetCollector, FacetCounts};
use core::Index;
use query::AllQuery;
use crate::core::Index;
use crate::query::AllQuery;
use crate::schema::{Document, Facet, Field, Schema};
use rand::distributions::Uniform;
use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng};
use schema::{Document, Facet, Field, Schema};
use std::iter;
#[test]

View File

@@ -85,12 +85,12 @@ See the `custom_collector` example.
*/
use crate::DocId;
use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
use downcast_rs;
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
mod count_collector;
pub use self::count_collector::Count;

View File

@@ -1,30 +1,30 @@
use super::Collector;
use super::SegmentCollector;
use collector::Fruit;
use crate::collector::Fruit;
use crate::DocId;
use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
use crate::TantivyError;
use std::marker::PhantomData;
use std::ops::Deref;
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
use TantivyError;
pub struct MultiFruit {
sub_fruits: Vec<Option<Box<Fruit>>>,
sub_fruits: Vec<Option<Box<dyn Fruit>>>,
}
pub struct CollectorWrapper<TCollector: Collector>(TCollector);
impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
type Fruit = Box<Fruit>;
type Child = Box<BoxableSegmentCollector>;
type Fruit = Box<dyn Fruit>;
type Child = Box<dyn BoxableSegmentCollector>;
fn for_segment(
&self,
segment_local_id: u32,
reader: &SegmentReader,
) -> Result<Box<BoxableSegmentCollector>> {
) -> Result<Box<dyn BoxableSegmentCollector>> {
let child = self.0.for_segment(segment_local_id, reader)?;
Ok(Box::new(SegmentCollectorWrapper(child)))
}
@@ -33,7 +33,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
self.0.requires_scoring()
}
fn merge_fruits(&self, children: Vec<<Self as Collector>::Fruit>) -> Result<Box<Fruit>> {
fn merge_fruits(&self, children: Vec<<Self as Collector>::Fruit>) -> Result<Box<dyn Fruit>> {
let typed_fruit: Vec<TCollector::Fruit> = children
.into_iter()
.map(|untyped_fruit| {
@@ -50,21 +50,21 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
}
}
impl SegmentCollector for Box<BoxableSegmentCollector> {
type Fruit = Box<Fruit>;
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
type Fruit = Box<dyn Fruit>;
fn collect(&mut self, doc: u32, score: f32) {
self.as_mut().collect(doc, score);
}
fn harvest(self) -> Box<Fruit> {
fn harvest(self) -> Box<dyn Fruit> {
BoxableSegmentCollector::harvest_from_box(self)
}
}
pub trait BoxableSegmentCollector {
fn collect(&mut self, doc: u32, score: f32);
fn harvest_from_box(self: Box<Self>) -> Box<Fruit>;
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
}
pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegmentCollector);
@@ -76,7 +76,7 @@ impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
self.0.collect(doc, score);
}
fn harvest_from_box(self: Box<Self>) -> Box<Fruit> {
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit> {
Box::new(self.0.harvest())
}
}
@@ -157,8 +157,9 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
#[allow(clippy::type_complexity)]
#[derive(Default)]
pub struct MultiCollector<'a> {
collector_wrappers:
Vec<Box<Collector<Child = Box<BoxableSegmentCollector>, Fruit = Box<Fruit>> + 'a>>,
collector_wrappers: Vec<
Box<dyn Collector<Child = Box<dyn BoxableSegmentCollector>, Fruit = Box<dyn Fruit>> + 'a>,
>,
}
impl<'a> MultiCollector<'a> {
@@ -207,7 +208,7 @@ impl<'a> Collector for MultiCollector<'a> {
}
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> {
let mut segment_fruits_list: Vec<Vec<Box<Fruit>>> = (0..self.collector_wrappers.len())
let mut segment_fruits_list: Vec<Vec<Box<dyn Fruit>>> = (0..self.collector_wrappers.len())
.map(|_| Vec::with_capacity(segments_multifruits.len()))
.collect::<Vec<_>>();
for segment_multifruit in segments_multifruits {
@@ -230,7 +231,7 @@ impl<'a> Collector for MultiCollector<'a> {
}
pub struct MultiCollectorChild {
children: Vec<Box<BoxableSegmentCollector>>,
children: Vec<Box<dyn BoxableSegmentCollector>>,
}
impl SegmentCollector for MultiCollectorChild {
@@ -257,12 +258,12 @@ impl SegmentCollector for MultiCollectorChild {
mod tests {
use super::*;
use collector::{Count, TopDocs};
use query::TermQuery;
use schema::IndexRecordOption;
use schema::{Schema, TEXT};
use Index;
use Term;
use crate::collector::{Count, TopDocs};
use crate::query::TermQuery;
use crate::schema::IndexRecordOption;
use crate::schema::{Schema, TEXT};
use crate::Index;
use crate::Term;
#[test]
fn test_multi_collector() {

View File

@@ -1,12 +1,12 @@
use super::*;
use core::SegmentReader;
use fastfield::BytesFastFieldReader;
use fastfield::FastFieldReader;
use schema::Field;
use DocAddress;
use DocId;
use Score;
use SegmentLocalId;
use crate::core::SegmentReader;
use crate::fastfield::BytesFastFieldReader;
use crate::fastfield::FastFieldReader;
use crate::schema::Field;
use crate::DocAddress;
use crate::DocId;
use crate::Score;
use crate::SegmentLocalId;
/// Stores all of the doc ids.
/// This collector is only used for tests.

View File

@@ -1,11 +1,11 @@
use crate::DocAddress;
use crate::DocId;
use crate::Result;
use crate::SegmentLocalId;
use crate::SegmentReader;
use serde::export::PhantomData;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use DocAddress;
use DocId;
use Result;
use SegmentLocalId;
use SegmentReader;
/// Contains a feature (field, score, etc.) of a document along with the document address.
///
@@ -178,8 +178,8 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
#[cfg(test)]
mod tests {
use super::{TopCollector, TopSegmentCollector};
use DocAddress;
use Score;
use crate::DocAddress;
use crate::Score;
#[test]
fn test_top_collector_not_at_capacity() {

View File

@@ -1,16 +1,16 @@
use super::Collector;
use collector::top_collector::TopCollector;
use collector::top_collector::TopSegmentCollector;
use collector::SegmentCollector;
use fastfield::FastFieldReader;
use fastfield::FastValue;
use schema::Field;
use crate::collector::top_collector::TopCollector;
use crate::collector::top_collector::TopSegmentCollector;
use crate::collector::SegmentCollector;
use crate::fastfield::FastFieldReader;
use crate::fastfield::FastValue;
use crate::schema::Field;
use crate::DocAddress;
use crate::Result;
use crate::SegmentLocalId;
use crate::SegmentReader;
use crate::TantivyError;
use std::marker::PhantomData;
use DocAddress;
use Result;
use SegmentLocalId;
use SegmentReader;
use TantivyError;
/// The Top Field Collector keeps track of the K documents
/// sorted by a fast field in the index
@@ -159,17 +159,17 @@ impl<T: FastValue + PartialOrd + Send + Sync + 'static> SegmentCollector
#[cfg(test)]
mod tests {
use super::TopDocsByField;
use collector::Collector;
use collector::TopDocs;
use query::Query;
use query::QueryParser;
use schema::Field;
use schema::IntOptions;
use schema::{Schema, FAST, TEXT};
use DocAddress;
use Index;
use IndexWriter;
use TantivyError;
use crate::collector::Collector;
use crate::collector::TopDocs;
use crate::query::Query;
use crate::query::QueryParser;
use crate::schema::Field;
use crate::schema::IntOptions;
use crate::schema::{Schema, FAST, TEXT};
use crate::DocAddress;
use crate::Index;
use crate::IndexWriter;
use crate::TantivyError;
const TITLE: &str = "title";
const SIZE: &str = "size";
@@ -258,7 +258,7 @@ mod tests {
query_field: Field,
schema: Schema,
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
) -> (Index, Box<Query>) {
) -> (Index, Box<dyn Query>) {
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();

View File

@@ -1,16 +1,16 @@
use super::Collector;
use collector::top_collector::TopCollector;
use collector::top_collector::TopSegmentCollector;
use collector::SegmentCollector;
use collector::TopDocsByField;
use fastfield::FastValue;
use schema::Field;
use DocAddress;
use DocId;
use Result;
use Score;
use SegmentLocalId;
use SegmentReader;
use crate::collector::top_collector::TopCollector;
use crate::collector::top_collector::TopSegmentCollector;
use crate::collector::SegmentCollector;
use crate::collector::TopDocsByField;
use crate::fastfield::FastValue;
use crate::schema::Field;
use crate::DocAddress;
use crate::DocId;
use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
/// The Top Score Collector keeps track of the K documents
/// sorted by their score.
@@ -128,12 +128,12 @@ impl SegmentCollector for TopScoreSegmentCollector {
#[cfg(test)]
mod tests {
use super::TopDocs;
use query::QueryParser;
use schema::Schema;
use schema::TEXT;
use DocAddress;
use Index;
use Score;
use crate::query::QueryParser;
use crate::schema::Schema;
use crate::schema::TEXT;
use crate::DocAddress;
use crate::Index;
use crate::Score;
fn make_index() -> Index {
let mut schema_builder = Schema::builder();

View File

@@ -5,7 +5,7 @@ use std::u64;
pub(crate) struct TinySet(u64);
impl fmt::Debug for TinySet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.into_iter().collect::<Vec<u32>>().fmt(f)
}
}
@@ -204,12 +204,12 @@ mod tests {
use super::BitSet;
use super::TinySet;
use docset::DocSet;
use query::BitSetDocSet;
use crate::docset::DocSet;
use crate::query::BitSetDocSet;
use crate::tests;
use crate::tests::generate_nonunique_unsorted;
use std::collections::BTreeSet;
use std::collections::HashSet;
use tests;
use tests::generate_nonunique_unsorted;
#[test]
fn test_tiny_set() {

View File

@@ -1,11 +1,11 @@
use common::BinarySerializable;
use common::CountingWriter;
use common::VInt;
use directory::ReadOnlySource;
use directory::WritePtr;
use schema::Field;
use space_usage::FieldUsage;
use space_usage::PerFieldSpaceUsage;
use crate::common::BinarySerializable;
use crate::common::CountingWriter;
use crate::common::VInt;
use crate::directory::ReadOnlySource;
use crate::directory::WritePtr;
use crate::schema::Field;
use crate::space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage;
use std::collections::HashMap;
use std::io::Write;
use std::io::{self, Read};
@@ -185,10 +185,10 @@ impl CompositeFile {
mod test {
use super::{CompositeFile, CompositeWrite};
use common::BinarySerializable;
use common::VInt;
use directory::{Directory, RAMDirectory};
use schema::Field;
use crate::common::BinarySerializable;
use crate::common::VInt;
use crate::directory::{Directory, RAMDirectory};
use crate::schema::Field;
use std::io::Write;
use std::path::Path;

View File

@@ -1,6 +1,6 @@
use crate::common::Endianness;
use crate::common::VInt;
use byteorder::{ReadBytesExt, WriteBytesExt};
use common::Endianness;
use common::VInt;
use std::fmt;
use std::io;
use std::io::Read;
@@ -136,7 +136,7 @@ impl BinarySerializable for String {
pub mod test {
use super::*;
use common::VInt;
use crate::common::VInt;
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();

View File

@@ -30,16 +30,16 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
let val = u64::from(val);
const STOP_BIT: u64 = 128u64;
match val {
0...STOP_1 => (val | STOP_BIT, 1),
START_2...STOP_2 => (
0..=STOP_1 => (val | STOP_BIT, 1),
START_2..=STOP_2 => (
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
2,
),
START_3...STOP_3 => (
START_3..=STOP_3 => (
(val & MASK_1) | ((val & MASK_2) << 1) | ((val & MASK_3) << 2) | (STOP_BIT << (8 * 2)),
3,
),
START_4...STOP_4 => (
START_4..=STOP_4 => (
(val & MASK_1)
| ((val & MASK_2) << 1)
| ((val & MASK_3) << 2)
@@ -171,8 +171,8 @@ mod tests {
use super::serialize_vint_u32;
use super::VInt;
use crate::common::BinarySerializable;
use byteorder::{ByteOrder, LittleEndian};
use common::BinarySerializable;
fn aux_test_vint(val: u64) {
let mut v = [14u8; 10];

View File

@@ -1,6 +1,6 @@
use crate::Result;
use crossbeam::channel;
use scoped_pool::{Pool, ThreadConfig};
use Result;
/// Search executor whether search request are single thread or multithread.
///

View File

@@ -1,38 +1,38 @@
use super::segment::create_segment;
use super::segment::Segment;
use core::Executor;
use core::IndexMeta;
use core::SegmentId;
use core::SegmentMeta;
use core::META_FILEPATH;
use directory::ManagedDirectory;
use crate::core::Executor;
use crate::core::IndexMeta;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::META_FILEPATH;
use crate::directory::ManagedDirectory;
#[cfg(feature = "mmap")]
use directory::MmapDirectory;
use directory::INDEX_WRITER_LOCK;
use directory::{Directory, RAMDirectory};
use error::DataCorruption;
use error::TantivyError;
use indexer::index_writer::open_index_writer;
use indexer::index_writer::HEAP_SIZE_MIN;
use indexer::segment_updater::save_new_metas;
use crate::directory::MmapDirectory;
use crate::directory::INDEX_WRITER_LOCK;
use crate::directory::{Directory, RAMDirectory};
use crate::error::DataCorruption;
use crate::error::TantivyError;
use crate::indexer::index_writer::open_index_writer;
use crate::indexer::index_writer::HEAP_SIZE_MIN;
use crate::indexer::segment_updater::save_new_metas;
use crate::reader::IndexReader;
use crate::reader::IndexReaderBuilder;
use crate::schema::Field;
use crate::schema::FieldType;
use crate::schema::Schema;
use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::TokenizerManager;
use crate::IndexWriter;
use crate::Result;
use num_cpus;
use reader::IndexReader;
use reader::IndexReaderBuilder;
use schema::Field;
use schema::FieldType;
use schema::Schema;
use serde_json;
use std::borrow::BorrowMut;
use std::fmt;
#[cfg(feature = "mmap")]
use std::path::Path;
use std::sync::Arc;
use tokenizer::BoxedTokenizer;
use tokenizer::TokenizerManager;
use IndexWriter;
use Result;
fn load_metas(directory: &Directory) -> Result<IndexMeta> {
fn load_metas(directory: &dyn Directory) -> Result<IndexMeta> {
let meta_data = directory.atomic_read(&META_FILEPATH)?;
let meta_string = String::from_utf8_lossy(&meta_data);
serde_json::from_str(&meta_string)
@@ -169,11 +169,11 @@ impl Index {
}
/// Helper to access the tokenizer associated to a specific field.
pub fn tokenizer_for_field(&self, field: Field) -> Result<Box<BoxedTokenizer>> {
pub fn tokenizer_for_field(&self, field: Field) -> Result<Box<dyn BoxedTokenizer>> {
let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type();
let tokenizer_manager: &TokenizerManager = self.tokenizers();
let tokenizer_name_opt: Option<Box<BoxedTokenizer>> = match field_type {
let tokenizer_name_opt: Option<Box<dyn BoxedTokenizer>> = match field_type {
FieldType::Str(text_options) => text_options
.get_indexing_options()
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
@@ -346,22 +346,22 @@ impl Index {
}
impl fmt::Debug for Index {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Index({:?})", self.directory)
}
}
#[cfg(test)]
mod tests {
use directory::RAMDirectory;
use schema::Field;
use schema::{Schema, INDEXED, TEXT};
use crate::directory::RAMDirectory;
use crate::schema::Field;
use crate::schema::{Schema, INDEXED, TEXT};
use crate::Index;
use crate::IndexReader;
use crate::IndexWriter;
use crate::ReloadPolicy;
use std::thread;
use std::time::Duration;
use Index;
use IndexReader;
use IndexWriter;
use ReloadPolicy;
#[test]
fn test_indexer_for_field() {

View File

@@ -1,8 +1,8 @@
use core::SegmentMeta;
use schema::Schema;
use crate::core::SegmentMeta;
use crate::schema::Schema;
use crate::Opstamp;
use serde_json;
use std::fmt;
use Opstamp;
/// Meta information about the `Index`.
///
@@ -46,7 +46,7 @@ impl IndexMeta {
}
impl fmt::Debug for IndexMeta {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
@@ -60,7 +60,7 @@ impl fmt::Debug for IndexMeta {
mod tests {
use super::IndexMeta;
use schema::{Schema, TEXT};
use crate::schema::{Schema, TEXT};
use serde_json;
#[test]

View File

@@ -1,13 +1,13 @@
use common::BinarySerializable;
use directory::ReadOnlySource;
use crate::common::BinarySerializable;
use crate::directory::ReadOnlySource;
use crate::positions::PositionReader;
use crate::postings::TermInfo;
use crate::postings::{BlockSegmentPostings, SegmentPostings};
use crate::schema::FieldType;
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::termdict::TermDictionary;
use owned_read::OwnedRead;
use positions::PositionReader;
use postings::TermInfo;
use postings::{BlockSegmentPostings, SegmentPostings};
use schema::FieldType;
use schema::IndexRecordOption;
use schema::Term;
use termdict::TermDictionary;
/// The inverted index reader is in charge of accessing
/// the inverted index associated to a specific field.

View File

@@ -1,26 +1,26 @@
use collector::Collector;
use collector::SegmentCollector;
use core::Executor;
use core::InvertedIndexReader;
use core::SegmentReader;
use query::Query;
use query::Scorer;
use query::Weight;
use schema::Document;
use schema::Schema;
use schema::{Field, Term};
use space_usage::SearcherSpaceUsage;
use crate::collector::Collector;
use crate::collector::SegmentCollector;
use crate::core::Executor;
use crate::core::InvertedIndexReader;
use crate::core::SegmentReader;
use crate::query::Query;
use crate::query::Scorer;
use crate::query::Weight;
use crate::schema::Document;
use crate::schema::Schema;
use crate::schema::{Field, Term};
use crate::space_usage::SearcherSpaceUsage;
use crate::store::StoreReader;
use crate::termdict::TermMerger;
use crate::DocAddress;
use crate::Index;
use crate::Result;
use std::fmt;
use std::sync::Arc;
use store::StoreReader;
use termdict::TermMerger;
use DocAddress;
use Index;
use Result;
fn collect_segment<C: Collector>(
collector: &C,
weight: &Weight,
weight: &dyn Weight,
segment_ord: u32,
segment_reader: &SegmentReader,
) -> Result<C::Fruit> {
@@ -132,7 +132,7 @@ impl Searcher {
///
/// Finally, the Collector merges each of the child collectors into itself for result usability
/// by the caller.
pub fn search<C: Collector>(&self, query: &Query, collector: &C) -> Result<C::Fruit> {
pub fn search<C: Collector>(&self, query: &dyn Query, collector: &C) -> Result<C::Fruit> {
let executor = self.index.search_executor();
self.search_with_executor(query, collector, executor)
}
@@ -151,7 +151,7 @@ impl Searcher {
/// hurt it. It will however, decrease the average response time.
pub fn search_with_executor<C: Collector>(
&self,
query: &Query,
query: &dyn Query,
collector: &C,
executor: &Executor,
) -> Result<C::Fruit> {
@@ -203,7 +203,7 @@ impl FieldSearcher {
/// Returns a Stream over all of the sorted unique terms of
/// for the given field.
pub fn terms(&self) -> TermMerger {
pub fn terms(&self) -> TermMerger<'_> {
let term_streamers: Vec<_> = self
.inv_index_readers
.iter()
@@ -214,7 +214,7 @@ impl FieldSearcher {
}
impl fmt::Debug for Searcher {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let segment_ids = self
.segment_readers
.iter()

View File

@@ -1,17 +1,17 @@
use super::SegmentComponent;
use core::Index;
use core::SegmentId;
use core::SegmentMeta;
use directory::error::{OpenReadError, OpenWriteError};
use directory::Directory;
use directory::{ReadOnlySource, WritePtr};
use indexer::segment_serializer::SegmentSerializer;
use schema::Schema;
use crate::core::Index;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::directory::error::{OpenReadError, OpenWriteError};
use crate::directory::Directory;
use crate::directory::{ReadOnlySource, WritePtr};
use crate::indexer::segment_serializer::SegmentSerializer;
use crate::schema::Schema;
use crate::Opstamp;
use crate::Result;
use std::fmt;
use std::path::PathBuf;
use std::result;
use Opstamp;
use Result;
/// A segment is a piece of the index.
#[derive(Clone)]
@@ -21,7 +21,7 @@ pub struct Segment {
}
impl fmt::Debug for Segment {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Segment({:?})", self.id().uuid_string())
}
}

View File

@@ -62,7 +62,7 @@ impl SegmentId {
}
impl fmt::Debug for SegmentId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Seg({:?})", self.short_uuid_string())
}
}

View File

@@ -1,11 +1,11 @@
use super::SegmentComponent;
use crate::core::SegmentId;
use crate::Opstamp;
use census::{Inventory, TrackedObject};
use core::SegmentId;
use serde;
use std::collections::HashSet;
use std::fmt;
use std::path::PathBuf;
use Opstamp;
lazy_static! {
static ref INVENTORY: Inventory<InnerSegmentMeta> = { Inventory::new() };
@@ -27,7 +27,7 @@ pub struct SegmentMeta {
}
impl fmt::Debug for SegmentMeta {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
self.tracked.fmt(f)
}
}

View File

@@ -1,26 +1,26 @@
use common::CompositeFile;
use common::HasLen;
use core::InvertedIndexReader;
use core::Segment;
use core::SegmentComponent;
use core::SegmentId;
use directory::ReadOnlySource;
use fastfield::DeleteBitSet;
use fastfield::FacetReader;
use fastfield::FastFieldReaders;
use fieldnorm::FieldNormReader;
use schema::Field;
use schema::FieldType;
use schema::Schema;
use space_usage::SegmentSpaceUsage;
use crate::common::CompositeFile;
use crate::common::HasLen;
use crate::core::InvertedIndexReader;
use crate::core::Segment;
use crate::core::SegmentComponent;
use crate::core::SegmentId;
use crate::directory::ReadOnlySource;
use crate::fastfield::DeleteBitSet;
use crate::fastfield::FacetReader;
use crate::fastfield::FastFieldReaders;
use crate::fieldnorm::FieldNormReader;
use crate::schema::Field;
use crate::schema::FieldType;
use crate::schema::Schema;
use crate::space_usage::SegmentSpaceUsage;
use crate::store::StoreReader;
use crate::termdict::TermDictionary;
use crate::DocId;
use crate::Result;
use std::collections::HashMap;
use std::fmt;
use std::sync::Arc;
use std::sync::RwLock;
use store::StoreReader;
use termdict::TermDictionary;
use DocId;
use Result;
/// Entry point to access all of the datastructures of the `Segment`
///
@@ -243,10 +243,9 @@ impl SegmentReader {
let postings_source = postings_source_opt.unwrap();
let termdict_source = self
.termdict_composite
.open_read(field)
.expect("Failed to open field term dictionary in composite file. Is the field indexed?");
let termdict_source = self.termdict_composite.open_read(field).expect(
"Failed to open field term dictionary in composite file. Is the field indexed?",
);
let positions_source = self
.positions_composite
@@ -296,7 +295,7 @@ impl SegmentReader {
}
/// Returns an iterator that will iterate over the alive document ids
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator {
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator<'_> {
SegmentReaderAliveDocsIterator::new(&self)
}
@@ -320,7 +319,7 @@ impl SegmentReader {
}
impl fmt::Debug for SegmentReader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "SegmentReader({:?})", self.segment_id)
}
}
@@ -373,9 +372,9 @@ impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
#[cfg(test)]
mod test {
use core::Index;
use schema::{Schema, Term, STORED, TEXT};
use DocId;
use crate::core::Index;
use crate::schema::{Schema, Term, STORED, TEXT};
use crate::DocId;
#[test]
fn test_alive_docs_iterator() {

View File

@@ -1,9 +1,9 @@
use directory::directory_lock::Lock;
use directory::error::LockError;
use directory::error::{DeleteError, OpenReadError, OpenWriteError};
use directory::WatchCallback;
use directory::WatchHandle;
use directory::{ReadOnlySource, WritePtr};
use crate::directory::directory_lock::Lock;
use crate::directory::error::LockError;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::WatchCallback;
use crate::directory::WatchHandle;
use crate::directory::{ReadOnlySource, WritePtr};
use std::fmt;
use std::io;
use std::io::Write;
@@ -48,10 +48,10 @@ impl RetryPolicy {
///
/// It is transparently associated to a lock file, that gets deleted
/// on `Drop.` The lock is released automatically on `Drop`.
pub struct DirectoryLock(Box<Drop + Send + Sync + 'static>);
pub struct DirectoryLock(Box<dyn Drop + Send + Sync + 'static>);
struct DirectoryLockGuard {
directory: Box<Directory>,
directory: Box<dyn Directory>,
path: PathBuf,
}
@@ -76,7 +76,7 @@ enum TryAcquireLockError {
fn try_acquire_lock(
filepath: &Path,
directory: &mut Directory,
directory: &mut dyn Directory,
) -> Result<DirectoryLock, TryAcquireLockError> {
let mut write = directory.open_write(filepath).map_err(|e| match e {
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
@@ -210,14 +210,14 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// DirectoryClone
pub trait DirectoryClone {
/// Clones the directory and boxes the clone
fn box_clone(&self) -> Box<Directory>;
fn box_clone(&self) -> Box<dyn Directory>;
}
impl<T> DirectoryClone for T
where
T: 'static + Directory + Clone,
{
fn box_clone(&self) -> Box<Directory> {
fn box_clone(&self) -> Box<dyn Directory> {
Box::new(self.clone())
}
}

View File

@@ -33,7 +33,7 @@ impl Into<io::Error> for IOError {
}
impl fmt::Display for IOError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.path {
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
None => write!(f, "io error occurred: '{}'", self.err),
@@ -46,7 +46,7 @@ impl StdError for IOError {
"io error occurred"
}
fn cause(&self) -> Option<&StdError> {
fn cause(&self) -> Option<&dyn StdError> {
Some(&self.err)
}
}
@@ -84,7 +84,7 @@ impl From<io::Error> for OpenDirectoryError {
}
impl fmt::Display for OpenDirectoryError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenDirectoryError::DoesNotExist(ref path) => {
write!(f, "the underlying directory '{:?}' does not exist", path)
@@ -106,7 +106,7 @@ impl StdError for OpenDirectoryError {
"error occurred while opening a directory"
}
fn cause(&self) -> Option<&StdError> {
fn cause(&self) -> Option<&dyn StdError> {
None
}
}
@@ -129,7 +129,7 @@ impl From<IOError> for OpenWriteError {
}
impl fmt::Display for OpenWriteError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenWriteError::FileAlreadyExists(ref path) => {
write!(f, "the file '{:?}' already exists", path)
@@ -148,7 +148,7 @@ impl StdError for OpenWriteError {
"error occurred while opening a file for writing"
}
fn cause(&self) -> Option<&StdError> {
fn cause(&self) -> Option<&dyn StdError> {
match *self {
OpenWriteError::FileAlreadyExists(_) => None,
OpenWriteError::IOError(ref err) => Some(err),
@@ -173,7 +173,7 @@ impl From<IOError> for OpenReadError {
}
impl fmt::Display for OpenReadError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenReadError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path)
@@ -192,7 +192,7 @@ impl StdError for OpenReadError {
"error occurred while opening a file for reading"
}
fn cause(&self) -> Option<&StdError> {
fn cause(&self) -> Option<&dyn StdError> {
match *self {
OpenReadError::FileDoesNotExist(_) => None,
OpenReadError::IOError(ref err) => Some(err),
@@ -217,7 +217,7 @@ impl From<IOError> for DeleteError {
}
impl fmt::Display for DeleteError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
DeleteError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path)
@@ -234,7 +234,7 @@ impl StdError for DeleteError {
"error occurred while deleting a file"
}
fn cause(&self) -> Option<&StdError> {
fn cause(&self) -> Option<&dyn StdError> {
match *self {
DeleteError::FileDoesNotExist(_) => None,
DeleteError::IOError(ref err) => Some(err),

View File

@@ -1,11 +1,13 @@
use core::MANAGED_FILEPATH;
use directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use directory::DirectoryLock;
use directory::Lock;
use directory::META_LOCK;
use directory::{ReadOnlySource, WritePtr};
use directory::{WatchCallback, WatchHandle};
use error::DataCorruption;
use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::DirectoryLock;
use crate::directory::Lock;
use crate::directory::META_LOCK;
use crate::directory::{ReadOnlySource, WritePtr};
use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption;
use crate::Directory;
use crate::Result;
use serde_json;
use std::collections::HashSet;
use std::io;
@@ -14,8 +16,6 @@ use std::path::{Path, PathBuf};
use std::result;
use std::sync::RwLockWriteGuard;
use std::sync::{Arc, RwLock};
use Directory;
use Result;
/// Returns true iff the file is "managed".
/// Non-managed file are not subject to garbage collection.
@@ -39,7 +39,7 @@ fn is_managed(path: &Path) -> bool {
/// useful anymore.
#[derive(Debug)]
pub struct ManagedDirectory {
directory: Box<Directory>,
directory: Box<dyn Directory>,
meta_informations: Arc<RwLock<MetaInformation>>,
}
@@ -51,8 +51,8 @@ struct MetaInformation {
/// Saves the file containing the list of existing files
/// that were created by tantivy.
fn save_managed_paths(
directory: &mut Directory,
wlock: &RwLockWriteGuard<MetaInformation>,
directory: &mut dyn Directory,
wlock: &RwLockWriteGuard<'_, MetaInformation>,
) -> io::Result<()> {
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
writeln!(&mut w)?;
@@ -272,7 +272,7 @@ mod tests {
static ref TEST_PATH2: &'static Path = Path::new("some_path_for_test2");
}
use directory::MmapDirectory;
use crate::directory::MmapDirectory;
use std::io::Write;
#[test]

View File

@@ -1,23 +1,25 @@
extern crate fs2;
extern crate notify;
use fs2;
use notify;
use self::fs2::FileExt;
use self::notify::RawEvent;
use self::notify::RecursiveMode;
use self::notify::Watcher;
use crate::core::META_FILEPATH;
use crate::directory::error::LockError;
use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::directory::read_only_source::BoxedData;
use crate::directory::Directory;
use crate::directory::DirectoryLock;
use crate::directory::Lock;
use crate::directory::ReadOnlySource;
use crate::directory::WatchCallback;
use crate::directory::WatchCallbackList;
use crate::directory::WatchHandle;
use crate::directory::WritePtr;
use atomicwrites;
use core::META_FILEPATH;
use directory::error::LockError;
use directory::error::{DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use directory::read_only_source::BoxedData;
use directory::Directory;
use directory::DirectoryLock;
use directory::Lock;
use directory::ReadOnlySource;
use directory::WatchCallback;
use directory::WatchCallbackList;
use directory::WatchHandle;
use directory::WritePtr;
use memmap::Mmap;
use std::collections::HashMap;
use std::convert::From;
@@ -254,7 +256,7 @@ impl MmapDirectoryInner {
}
impl fmt::Debug for MmapDirectory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "MmapDirectory({:?})", self.inner.root_path)
}
}
@@ -525,13 +527,13 @@ mod tests {
// The following tests are specific to the MmapDirectory
use super::*;
use schema::{Schema, SchemaBuilder, TEXT};
use crate::schema::{Schema, SchemaBuilder, TEXT};
use crate::Index;
use crate::ReloadPolicy;
use std::fs;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use std::time::Duration;
use Index;
use ReloadPolicy;
#[test]
fn test_open_non_existant_path() {

View File

@@ -39,7 +39,7 @@ impl<T: Seek + Write> SeekableWrite for T {}
///
/// `WritePtr` are required to implement both Write
/// and Seek.
pub type WritePtr = BufWriter<Box<SeekableWrite>>;
pub type WritePtr = BufWriter<Box<dyn SeekableWrite>>;
#[cfg(test)]
mod tests;

View File

@@ -1,8 +1,8 @@
use core::META_FILEPATH;
use directory::error::{DeleteError, OpenReadError, OpenWriteError};
use directory::WatchCallbackList;
use directory::WritePtr;
use directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use crate::core::META_FILEPATH;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::WatchCallbackList;
use crate::directory::WritePtr;
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use std::collections::HashMap;
use std::fmt;
use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
@@ -110,7 +110,7 @@ impl InnerDirectory {
}
impl fmt::Debug for RAMDirectory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "RAMDirectory")
}
}

View File

@@ -1,9 +1,9 @@
use common::HasLen;
use crate::common::HasLen;
use stable_deref_trait::{CloneStableDeref, StableDeref};
use std::ops::Deref;
use std::sync::Arc;
pub type BoxedData = Box<Deref<Target = [u8]> + Send + Sync + 'static>;
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
/// Read object that represents files in tantivy.
///

View File

@@ -34,7 +34,7 @@ fn ram_directory_panics_if_flush_forgotten() {
assert!(write_file.write_all(&[4]).is_ok());
}
fn test_simple(directory: &mut Directory) {
fn test_simple(directory: &mut dyn Directory) {
{
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
assert!(directory.exists(*TEST_PATH));
@@ -52,7 +52,7 @@ fn test_simple(directory: &mut Directory) {
assert!(!directory.exists(*TEST_PATH));
}
fn test_seek(directory: &mut Directory) {
fn test_seek(directory: &mut dyn Directory) {
{
{
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
@@ -69,7 +69,7 @@ fn test_seek(directory: &mut Directory) {
assert!(directory.delete(*TEST_PATH).is_ok());
}
fn test_rewrite_forbidden(directory: &mut Directory) {
fn test_rewrite_forbidden(directory: &mut dyn Directory) {
{
directory.open_write(*TEST_PATH).unwrap();
assert!(directory.exists(*TEST_PATH));
@@ -80,7 +80,7 @@ fn test_rewrite_forbidden(directory: &mut Directory) {
assert!(directory.delete(*TEST_PATH).is_ok());
}
fn test_write_create_the_file(directory: &mut Directory) {
fn test_write_create_the_file(directory: &mut dyn Directory) {
{
assert!(directory.open_read(*TEST_PATH).is_err());
let _w = directory.open_write(*TEST_PATH).unwrap();
@@ -90,7 +90,7 @@ fn test_write_create_the_file(directory: &mut Directory) {
}
}
fn test_directory_delete(directory: &mut Directory) {
fn test_directory_delete(directory: &mut dyn Directory) {
assert!(directory.open_read(*TEST_PATH).is_err());
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
write_file.write_all(&[1, 2, 3, 4]).unwrap();
@@ -118,7 +118,7 @@ fn test_directory_delete(directory: &mut Directory) {
assert!(directory.delete(*TEST_PATH).is_err());
}
fn test_directory(directory: &mut Directory) {
fn test_directory(directory: &mut dyn Directory) {
test_simple(directory);
test_seek(directory);
test_rewrite_forbidden(directory);
@@ -129,7 +129,7 @@ fn test_directory(directory: &mut Directory) {
test_watch(directory);
}
fn test_watch(directory: &mut Directory) {
fn test_watch(directory: &mut dyn Directory) {
let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone();
let watch_callback = Box::new(move || {
@@ -163,7 +163,7 @@ fn test_watch(directory: &mut Directory) {
assert_eq!(10, counter.load(Ordering::SeqCst));
}
fn test_lock_non_blocking(directory: &mut Directory) {
fn test_lock_non_blocking(directory: &mut dyn Directory) {
{
let lock_a_res = directory.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"),
@@ -188,7 +188,7 @@ fn test_lock_non_blocking(directory: &mut Directory) {
assert!(lock_a_res.is_ok());
}
fn test_lock_blocking(directory: &mut Directory) {
fn test_lock_blocking(directory: &mut dyn Directory) {
let lock_a_res = directory.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"),
is_blocking: true,

View File

@@ -3,7 +3,7 @@ use std::sync::RwLock;
use std::sync::Weak;
/// Type alias for callbacks registered when watching files of a `Directory`.
pub type WatchCallback = Box<Fn() -> () + Sync + Send>;
pub type WatchCallback = Box<dyn Fn() -> () + Sync + Send>;
/// Helper struct to implement the watch method in `Directory` implementations.
///
@@ -67,7 +67,7 @@ impl WatchCallbackList {
#[cfg(test)]
mod tests {
use directory::WatchCallbackList;
use crate::directory::WatchCallbackList;
use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;

View File

@@ -1,9 +1,9 @@
use common::BitSet;
use fastfield::DeleteBitSet;
use crate::common::BitSet;
use crate::fastfield::DeleteBitSet;
use crate::DocId;
use std::borrow::Borrow;
use std::borrow::BorrowMut;
use std::cmp::Ordering;
use DocId;
/// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`.
#[derive(PartialEq, Eq, Debug)]

View File

@@ -2,11 +2,11 @@
use std::io;
use directory::error::LockError;
use directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use fastfield::FastFieldNotAvailableError;
use query;
use schema;
use crate::directory::error::LockError;
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use crate::fastfield::FastFieldNotAvailableError;
use crate::query;
use crate::schema;
use serde_json;
use std::fmt;
use std::path::PathBuf;
@@ -34,7 +34,7 @@ impl DataCorruption {
}
impl fmt::Debug for DataCorruption {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "Data corruption: ")?;
if let Some(ref filepath) = &self.filepath {
write!(f, "(in file `{:?}`)", filepath)?;

View File

@@ -6,8 +6,8 @@ pub use self::writer::BytesFastFieldWriter;
#[cfg(test)]
mod tests {
use schema::Schema;
use Index;
use crate::schema::Schema;
use crate::Index;
#[test]
fn test_bytes() {

View File

@@ -1,8 +1,8 @@
use owning_ref::OwningRef;
use directory::ReadOnlySource;
use fastfield::FastFieldReader;
use DocId;
use crate::directory::ReadOnlySource;
use crate::fastfield::FastFieldReader;
use crate::DocId;
/// Reader for byte array fast fields
///

View File

@@ -1,8 +1,8 @@
use std::io;
use fastfield::serializer::FastFieldSerializer;
use schema::{Document, Field, Value};
use DocId;
use crate::fastfield::serializer::FastFieldSerializer;
use crate::schema::{Document, Field, Value};
use crate::DocId;
/// Writer for byte array (as in, any number of bytes per document) fast fields
///

View File

@@ -1,11 +1,11 @@
use crate::common::HasLen;
use crate::directory::ReadOnlySource;
use crate::directory::WritePtr;
use crate::space_usage::ByteCount;
use crate::DocId;
use bit_set::BitSet;
use common::HasLen;
use directory::ReadOnlySource;
use directory::WritePtr;
use space_usage::ByteCount;
use std::io;
use std::io::Write;
use DocId;
/// Write a delete `BitSet`
///
@@ -82,8 +82,8 @@ impl HasLen for DeleteBitSet {
#[cfg(test)]
mod tests {
use super::*;
use crate::directory::*;
use bit_set::BitSet;
use directory::*;
use std::path::PathBuf;
fn test_delete_bitset_helper(bitset: &BitSet) {

View File

@@ -1,4 +1,4 @@
use schema::FieldEntry;
use crate::schema::FieldEntry;
use std::result;
/// `FastFieldNotAvailableError` is returned when the

View File

@@ -1,9 +1,9 @@
use super::MultiValueIntFastFieldReader;
use schema::Facet;
use crate::schema::Facet;
use crate::termdict::TermDictionary;
use crate::termdict::TermOrdinal;
use crate::DocId;
use std::str;
use termdict::TermDictionary;
use termdict::TermOrdinal;
use DocId;
/// The facet reader makes it possible to access the list of
/// facets associated to a given document in a specific

View File

@@ -33,10 +33,10 @@ pub use self::reader::FastFieldReader;
pub use self::readers::FastFieldReaders;
pub use self::serializer::FastFieldSerializer;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use common;
use schema::Cardinality;
use schema::FieldType;
use schema::Value;
use crate::common;
use crate::schema::Cardinality;
use crate::schema::FieldType;
use crate::schema::Value;
mod bytes;
mod delete;
@@ -126,16 +126,16 @@ fn value_to_u64(value: &Value) -> u64 {
mod tests {
use super::*;
use common::CompositeFile;
use directory::{Directory, RAMDirectory, WritePtr};
use fastfield::FastFieldReader;
use crate::common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader;
use crate::schema::Document;
use crate::schema::Field;
use crate::schema::Schema;
use crate::schema::FAST;
use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
use rand::SeedableRng;
use schema::Document;
use schema::Field;
use schema::Schema;
use schema::FAST;
use std::collections::HashMap;
use std::path::Path;

View File

@@ -7,16 +7,16 @@ pub use self::writer::MultiValueIntFastFieldWriter;
#[cfg(test)]
mod tests {
extern crate time;
use time;
use self::time::Duration;
use collector::TopDocs;
use query::QueryParser;
use schema::Cardinality;
use schema::Facet;
use schema::IntOptions;
use schema::Schema;
use Index;
use crate::collector::TopDocs;
use crate::query::QueryParser;
use crate::schema::Cardinality;
use crate::schema::Facet;
use crate::schema::IntOptions;
use crate::schema::Schema;
use crate::Index;
#[test]
fn test_multivalued_u64() {

View File

@@ -1,5 +1,5 @@
use fastfield::{FastFieldReader, FastValue};
use DocId;
use crate::fastfield::{FastFieldReader, FastValue};
use crate::DocId;
/// Reader for a multivalued `u64` fast field.
///
@@ -64,8 +64,8 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
#[cfg(test)]
mod tests {
use core::Index;
use schema::{Facet, Schema};
use crate::core::Index;
use crate::schema::{Facet, Schema};
#[test]
fn test_multifastfield_reader() {

View File

@@ -1,13 +1,13 @@
use fastfield::serializer::FastSingleFieldSerializer;
use fastfield::value_to_u64;
use fastfield::FastFieldSerializer;
use crate::fastfield::serializer::FastSingleFieldSerializer;
use crate::fastfield::value_to_u64;
use crate::fastfield::FastFieldSerializer;
use crate::postings::UnorderedTermId;
use crate::schema::{Document, Field};
use crate::termdict::TermOrdinal;
use crate::DocId;
use itertools::Itertools;
use postings::UnorderedTermId;
use schema::{Document, Field};
use std::collections::HashMap;
use std::io;
use termdict::TermOrdinal;
use DocId;
/// Writer for multi-valued (as in, more than one value per document)
/// int fast field.
@@ -116,7 +116,7 @@ impl MultiValueIntFastFieldWriter {
}
{
// writing the values themselves.
let mut value_serializer: FastSingleFieldSerializer<_>;
let mut value_serializer: FastSingleFieldSerializer<'_, _>;
match mapping_opt {
Some(mapping) => {
value_serializer = serializer.new_u64_fast_field_with_idx(

View File

@@ -1,18 +1,18 @@
use super::FastValue;
use common::bitpacker::BitUnpacker;
use common::compute_num_bits;
use common::BinarySerializable;
use common::CompositeFile;
use directory::ReadOnlySource;
use directory::{Directory, RAMDirectory, WritePtr};
use fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::common::bitpacker::BitUnpacker;
use crate::common::compute_num_bits;
use crate::common::BinarySerializable;
use crate::common::CompositeFile;
use crate::directory::ReadOnlySource;
use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema;
use crate::schema::FAST;
use crate::DocId;
use owning_ref::OwningRef;
use schema::Schema;
use schema::FAST;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::path::Path;
use DocId;
/// Trait for accessing a fastfield.
///

View File

@@ -1,11 +1,11 @@
use common::CompositeFile;
use fastfield::BytesFastFieldReader;
use fastfield::MultiValueIntFastFieldReader;
use fastfield::{FastFieldNotAvailableError, FastFieldReader};
use schema::{Cardinality, Field, FieldType, Schema};
use space_usage::PerFieldSpaceUsage;
use crate::common::CompositeFile;
use crate::fastfield::BytesFastFieldReader;
use crate::fastfield::MultiValueIntFastFieldReader;
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
use crate::schema::{Cardinality, Field, FieldType, Schema};
use crate::space_usage::PerFieldSpaceUsage;
use crate::Result;
use std::collections::HashMap;
use Result;
/// Provides access to all of the FastFieldReader.
///

View File

@@ -1,10 +1,10 @@
use common::bitpacker::BitPacker;
use common::compute_num_bits;
use common::BinarySerializable;
use common::CompositeWrite;
use common::CountingWriter;
use directory::WritePtr;
use schema::Field;
use crate::common::bitpacker::BitPacker;
use crate::common::compute_num_bits;
use crate::common::BinarySerializable;
use crate::common::CompositeWrite;
use crate::common::CountingWriter;
use crate::directory::WritePtr;
use crate::schema::Field;
use std::io::{self, Write};
/// `FastFieldSerializer` is in charge of serializing
@@ -45,7 +45,7 @@ impl FastFieldSerializer {
field: Field,
min_value: u64,
max_value: u64,
) -> io::Result<FastSingleFieldSerializer<CountingWriter<WritePtr>>> {
) -> io::Result<FastSingleFieldSerializer<'_, CountingWriter<WritePtr>>> {
self.new_u64_fast_field_with_idx(field, min_value, max_value, 0)
}
@@ -56,7 +56,7 @@ impl FastFieldSerializer {
min_value: u64,
max_value: u64,
idx: usize,
) -> io::Result<FastSingleFieldSerializer<CountingWriter<WritePtr>>> {
) -> io::Result<FastSingleFieldSerializer<'_, CountingWriter<WritePtr>>> {
let field_write = self.composite_write.for_field_with_idx(field, idx);
FastSingleFieldSerializer::open(field_write, min_value, max_value)
}
@@ -66,7 +66,7 @@ impl FastFieldSerializer {
&mut self,
field: Field,
idx: usize,
) -> io::Result<FastBytesFieldSerializer<CountingWriter<WritePtr>>> {
) -> io::Result<FastBytesFieldSerializer<'_, CountingWriter<WritePtr>>> {
let field_write = self.composite_write.for_field_with_idx(field, idx);
FastBytesFieldSerializer::open(field_write)
}
@@ -79,7 +79,7 @@ impl FastFieldSerializer {
}
}
pub struct FastSingleFieldSerializer<'a, W: Write + 'a> {
pub struct FastSingleFieldSerializer<'a, W: Write> {
bit_packer: BitPacker,
write: &'a mut W,
min_value: u64,
@@ -127,7 +127,7 @@ impl<'a, W: Write> FastSingleFieldSerializer<'a, W> {
}
}
pub struct FastBytesFieldSerializer<'a, W: Write + 'a> {
pub struct FastBytesFieldSerializer<'a, W: Write> {
write: &'a mut W,
}

View File

@@ -1,13 +1,13 @@
use super::multivalued::MultiValueIntFastFieldWriter;
use common;
use common::BinarySerializable;
use common::VInt;
use fastfield::{BytesFastFieldWriter, FastFieldSerializer};
use postings::UnorderedTermId;
use schema::{Cardinality, Document, Field, FieldType, Schema};
use crate::common;
use crate::common::BinarySerializable;
use crate::common::VInt;
use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
use crate::postings::UnorderedTermId;
use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
use crate::termdict::TermOrdinal;
use std::collections::HashMap;
use std::io;
use termdict::TermOrdinal;
/// The fastfieldswriter regroup all of the fast field writers.
pub struct FastFieldsWriter {

View File

@@ -1,6 +1,6 @@
use super::{fieldnorm_to_id, id_to_fieldnorm};
use directory::ReadOnlySource;
use DocId;
use crate::directory::ReadOnlySource;
use crate::DocId;
/// Reads the fieldnorm associated to a document.
/// The fieldnorm represents the length associated to

View File

@@ -1,6 +1,6 @@
use common::CompositeWrite;
use directory::WritePtr;
use schema::Field;
use crate::common::CompositeWrite;
use crate::directory::WritePtr;
use crate::schema::Field;
use std::io;
use std::io::Write;

View File

@@ -1,9 +1,9 @@
use DocId;
use crate::DocId;
use super::fieldnorm_to_id;
use super::FieldNormsSerializer;
use schema::Field;
use schema::Schema;
use crate::schema::Field;
use crate::schema::Schema;
use std::io;
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte

View File

@@ -1,10 +1,10 @@
use rand::thread_rng;
use std::collections::HashSet;
use crate::schema::*;
use crate::Index;
use crate::Searcher;
use rand::Rng;
use schema::*;
use Index;
use Searcher;
fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
assert!(searcher.segment_readers().len() < 20);

View File

@@ -1,8 +1,8 @@
use super::operation::DeleteOperation;
use crate::Opstamp;
use std::mem;
use std::ops::DerefMut;
use std::sync::{Arc, RwLock};
use Opstamp;
// The DeleteQueue is similar in conceptually to a multiple
// consumer single producer broadcast channel.
@@ -250,7 +250,7 @@ impl DeleteCursor {
mod tests {
use super::{DeleteOperation, DeleteQueue};
use schema::{Field, Term};
use crate::schema::{Field, Term};
#[test]
fn test_deletequeue() {

View File

@@ -1,6 +1,6 @@
use crate::DocId;
use crate::Opstamp;
use std::sync::Arc;
use DocId;
use Opstamp;
// Doc to opstamp is used to identify which
// document should be deleted.

View File

@@ -1,37 +1,37 @@
use super::operation::{AddOperation, UserOperation};
use super::segment_updater::SegmentUpdater;
use super::PreparedCommit;
use crate::core::Index;
use crate::core::Segment;
use crate::core::SegmentComponent;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::SegmentReader;
use crate::directory::DirectoryLock;
use crate::docset::DocSet;
use crate::error::TantivyError;
use crate::fastfield::write_delete_bitset;
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping;
use crate::indexer::operation::DeleteOperation;
use crate::indexer::stamper::Stamper;
use crate::indexer::MergePolicy;
use crate::indexer::SegmentEntry;
use crate::indexer::SegmentWriter;
use crate::postings::compute_table_size;
use crate::schema::Document;
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::Opstamp;
use crate::Result;
use bit_set::BitSet;
use core::Index;
use core::Segment;
use core::SegmentComponent;
use core::SegmentId;
use core::SegmentMeta;
use core::SegmentReader;
use crossbeam::channel;
use directory::DirectoryLock;
use docset::DocSet;
use error::TantivyError;
use fastfield::write_delete_bitset;
use futures::{Canceled, Future};
use indexer::delete_queue::{DeleteCursor, DeleteQueue};
use indexer::doc_opstamp_mapping::DocToOpstampMapping;
use indexer::operation::DeleteOperation;
use indexer::stamper::Stamper;
use indexer::MergePolicy;
use indexer::SegmentEntry;
use indexer::SegmentWriter;
use postings::compute_table_size;
use schema::Document;
use schema::IndexRecordOption;
use schema::Term;
use std::mem;
use std::ops::Range;
use std::sync::Arc;
use std::thread;
use std::thread::JoinHandle;
use Opstamp;
use Result;
// Size of the margin for the heap. A segment is closed when the remaining memory
// in the heap goes below MARGIN_IN_BYTES.
@@ -268,7 +268,7 @@ fn index_documents(
memory_budget: usize,
segment: &Segment,
generation: usize,
document_iterator: &mut Iterator<Item = Vec<AddOperation>>,
document_iterator: &mut dyn Iterator<Item = Vec<AddOperation>>,
segment_updater: &mut SegmentUpdater,
mut delete_cursor: DeleteCursor,
) -> Result<bool> {
@@ -440,12 +440,12 @@ impl IndexWriter {
}
/// Accessor to the merge policy.
pub fn get_merge_policy(&self) -> Arc<Box<MergePolicy>> {
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
self.segment_updater.get_merge_policy()
}
/// Set the merge policy.
pub fn set_merge_policy(&self, merge_policy: Box<MergePolicy>) {
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
self.segment_updater.set_merge_policy(merge_policy);
}
@@ -603,7 +603,7 @@ impl IndexWriter {
/// It is also possible to add a payload to the `commit`
/// using this API.
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
pub fn prepare_commit(&mut self) -> Result<PreparedCommit> {
pub fn prepare_commit(&mut self) -> Result<PreparedCommit<'_>> {
// Here, because we join all of the worker threads,
// all of the segment update for this commit have been
// sent.
@@ -773,15 +773,15 @@ mod tests {
use super::super::operation::UserOperation;
use super::initial_table_size;
use collector::TopDocs;
use directory::error::LockError;
use error::*;
use indexer::NoMergePolicy;
use query::TermQuery;
use schema::{self, IndexRecordOption};
use Index;
use ReloadPolicy;
use Term;
use crate::collector::TopDocs;
use crate::directory::error::LockError;
use crate::error::*;
use crate::indexer::NoMergePolicy;
use crate::query::TermQuery;
use crate::schema::{self, IndexRecordOption};
use crate::Index;
use crate::ReloadPolicy;
use crate::Term;
#[test]
fn test_operations_group() {

View File

@@ -1,5 +1,5 @@
use super::merge_policy::{MergeCandidate, MergePolicy};
use core::SegmentMeta;
use crate::core::SegmentMeta;
use std::cmp;
use std::f64;
@@ -95,8 +95,8 @@ impl Default for LogMergePolicy {
#[cfg(test)]
mod tests {
use super::*;
use core::{SegmentId, SegmentMeta};
use indexer::merge_policy::MergePolicy;
use crate::core::{SegmentId, SegmentMeta};
use crate::indexer::merge_policy::MergePolicy;
fn test_merge_policy() -> LogMergePolicy {
let mut log_merge_policy = LogMergePolicy::default();

View File

@@ -1,7 +1,7 @@
use crate::Opstamp;
use crate::SegmentId;
use census::{Inventory, TrackedObject};
use std::collections::HashSet;
use Opstamp;
use SegmentId;
#[derive(Default)]
pub struct MergeOperationInventory(Inventory<InnerMergeOperation>);

View File

@@ -1,5 +1,5 @@
use core::SegmentId;
use core::SegmentMeta;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use std::fmt::Debug;
use std::marker;
@@ -39,8 +39,8 @@ impl MergePolicy for NoMergePolicy {
pub mod tests {
use super::*;
use core::SegmentId;
use core::SegmentMeta;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
/// `MergePolicy` useful for test purposes.
///

View File

@@ -1,31 +1,31 @@
use common::MAX_DOC_LIMIT;
use core::Segment;
use core::SegmentReader;
use core::SerializableSegment;
use docset::DocSet;
use fastfield::BytesFastFieldReader;
use fastfield::DeleteBitSet;
use fastfield::FastFieldReader;
use fastfield::FastFieldSerializer;
use fastfield::MultiValueIntFastFieldReader;
use fieldnorm::FieldNormReader;
use fieldnorm::FieldNormsSerializer;
use fieldnorm::FieldNormsWriter;
use indexer::SegmentSerializer;
use crate::common::MAX_DOC_LIMIT;
use crate::core::Segment;
use crate::core::SegmentReader;
use crate::core::SerializableSegment;
use crate::docset::DocSet;
use crate::fastfield::BytesFastFieldReader;
use crate::fastfield::DeleteBitSet;
use crate::fastfield::FastFieldReader;
use crate::fastfield::FastFieldSerializer;
use crate::fastfield::MultiValueIntFastFieldReader;
use crate::fieldnorm::FieldNormReader;
use crate::fieldnorm::FieldNormsSerializer;
use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::SegmentSerializer;
use crate::postings::InvertedIndexSerializer;
use crate::postings::Postings;
use crate::schema::Cardinality;
use crate::schema::FieldType;
use crate::schema::{Field, Schema};
use crate::store::StoreWriter;
use crate::termdict::TermMerger;
use crate::termdict::TermOrdinal;
use crate::DocId;
use crate::Result;
use crate::TantivyError;
use itertools::Itertools;
use postings::InvertedIndexSerializer;
use postings::Postings;
use schema::Cardinality;
use schema::FieldType;
use schema::{Field, Schema};
use std::cmp;
use std::collections::HashMap;
use store::StoreWriter;
use termdict::TermMerger;
use termdict::TermOrdinal;
use DocId;
use Result;
use TantivyError;
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
let mut total_tokens = 0u64;
@@ -692,28 +692,28 @@ impl SerializableSegment for IndexMerger {
#[cfg(test)]
mod tests {
use crate::collector::tests::TestCollector;
use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use crate::collector::{Count, FacetCollector};
use crate::core::Index;
use crate::query::AllQuery;
use crate::query::BooleanQuery;
use crate::query::TermQuery;
use crate::schema;
use crate::schema::Cardinality;
use crate::schema::Document;
use crate::schema::Facet;
use crate::schema::IndexRecordOption;
use crate::schema::IntOptions;
use crate::schema::Term;
use crate::schema::TextFieldIndexing;
use crate::schema::INDEXED;
use crate::DocAddress;
use crate::IndexWriter;
use crate::Searcher;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use collector::tests::TestCollector;
use collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use collector::{Count, FacetCollector};
use core::Index;
use futures::Future;
use query::AllQuery;
use query::BooleanQuery;
use query::TermQuery;
use schema;
use schema::Cardinality;
use schema::Document;
use schema::Facet;
use schema::IndexRecordOption;
use schema::IntOptions;
use schema::Term;
use schema::TextFieldIndexing;
use schema::INDEXED;
use std::io::Cursor;
use DocAddress;
use IndexWriter;
use Searcher;
#[test]
fn test_index_merger_no_deletes() {

View File

@@ -1,6 +1,6 @@
use schema::Document;
use schema::Term;
use Opstamp;
use crate::schema::Document;
use crate::schema::Term;
use crate::Opstamp;
/// Timestamped Delete operation.
#[derive(Clone, Eq, PartialEq, Debug)]

View File

@@ -1,6 +1,6 @@
use super::IndexWriter;
use Opstamp;
use Result;
use crate::Opstamp;
use crate::Result;
/// A prepared commit
pub struct PreparedCommit<'a> {
@@ -10,7 +10,7 @@ pub struct PreparedCommit<'a> {
}
impl<'a> PreparedCommit<'a> {
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit {
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit<'_> {
PreparedCommit {
index_writer,
payload: None,

View File

@@ -1,7 +1,7 @@
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::indexer::delete_queue::DeleteCursor;
use bit_set::BitSet;
use core::SegmentId;
use core::SegmentMeta;
use indexer::delete_queue::DeleteCursor;
use std::fmt;
/// A segment entry describes the state of
@@ -67,7 +67,7 @@ impl SegmentEntry {
}
impl fmt::Debug for SegmentEntry {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "SegmentEntry({:?})", self.meta)
}
}

View File

@@ -1,16 +1,16 @@
use super::segment_register::SegmentRegister;
use core::SegmentId;
use core::SegmentMeta;
use core::META_FILEPATH;
use error::TantivyError;
use indexer::delete_queue::DeleteCursor;
use indexer::SegmentEntry;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::META_FILEPATH;
use crate::error::TantivyError;
use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::SegmentEntry;
use crate::Result as TantivyResult;
use std::collections::hash_set::HashSet;
use std::fmt::{self, Debug, Formatter};
use std::path::PathBuf;
use std::sync::RwLock;
use std::sync::{RwLockReadGuard, RwLockWriteGuard};
use Result as TantivyResult;
#[derive(Default)]
struct SegmentRegisters {
@@ -29,7 +29,7 @@ pub struct SegmentManager {
}
impl Debug for SegmentManager {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> {
let lock = self.read();
write!(
f,
@@ -91,13 +91,13 @@ impl SegmentManager {
// Lock poisoning should never happen :
// The lock is acquired and released within this class,
// and the operations cannot panic.
fn read(&self) -> RwLockReadGuard<SegmentRegisters> {
fn read(&self) -> RwLockReadGuard<'_, SegmentRegisters> {
self.registers
.read()
.expect("Failed to acquire read lock on SegmentManager.")
}
fn write(&self) -> RwLockWriteGuard<SegmentRegisters> {
fn write(&self) -> RwLockWriteGuard<'_, SegmentRegisters> {
self.registers
.write()
.expect("Failed to acquire write lock on SegmentManager.")

View File

@@ -1,7 +1,7 @@
use core::SegmentId;
use core::SegmentMeta;
use indexer::delete_queue::DeleteCursor;
use indexer::segment_entry::SegmentEntry;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::segment_entry::SegmentEntry;
use std::collections::HashMap;
use std::collections::HashSet;
use std::fmt::{self, Debug, Formatter};
@@ -20,7 +20,7 @@ pub struct SegmentRegister {
}
impl Debug for SegmentRegister {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "SegmentRegister(")?;
for k in self.segment_states.keys() {
write!(f, "{}, ", k.short_uuid_string())?;
@@ -93,9 +93,9 @@ impl SegmentRegister {
#[cfg(test)]
mod tests {
use super::*;
use core::SegmentId;
use core::SegmentMeta;
use indexer::delete_queue::*;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::indexer::delete_queue::*;
fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> {
segment_register

View File

@@ -1,11 +1,11 @@
use Result;
use crate::Result;
use core::Segment;
use core::SegmentComponent;
use fastfield::FastFieldSerializer;
use fieldnorm::FieldNormsSerializer;
use postings::InvertedIndexSerializer;
use store::StoreWriter;
use crate::core::Segment;
use crate::core::SegmentComponent;
use crate::fastfield::FastFieldSerializer;
use crate::fieldnorm::FieldNormsSerializer;
use crate::postings::InvertedIndexSerializer;
use crate::store::StoreWriter;
/// Segment serializer is in charge of laying out on disk
/// the data accumulated and sorted by the `SegmentWriter`.

View File

@@ -1,29 +1,31 @@
use super::segment_manager::{get_mergeable_segments, SegmentManager};
use core::Index;
use core::IndexMeta;
use core::Segment;
use core::SegmentId;
use core::SegmentMeta;
use core::SerializableSegment;
use core::META_FILEPATH;
use directory::{Directory, DirectoryClone};
use error::TantivyError;
use crate::core::Index;
use crate::core::IndexMeta;
use crate::core::Segment;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::SerializableSegment;
use crate::core::META_FILEPATH;
use crate::directory::{Directory, DirectoryClone};
use crate::error::TantivyError;
use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::index_writer::advance_deletes;
use crate::indexer::merge_operation::MergeOperationInventory;
use crate::indexer::merger::IndexMerger;
use crate::indexer::stamper::Stamper;
use crate::indexer::MergeOperation;
use crate::indexer::SegmentEntry;
use crate::indexer::SegmentSerializer;
use crate::indexer::{DefaultMergePolicy, MergePolicy};
use crate::schema::Schema;
use crate::Opstamp;
use crate::Result;
use futures::oneshot;
use futures::sync::oneshot::Receiver;
use futures::Future;
use futures_cpupool::Builder as CpuPoolBuilder;
use futures_cpupool::CpuFuture;
use futures_cpupool::CpuPool;
use indexer::delete_queue::DeleteCursor;
use indexer::index_writer::advance_deletes;
use indexer::merge_operation::MergeOperationInventory;
use indexer::merger::IndexMerger;
use indexer::stamper::Stamper;
use indexer::MergeOperation;
use indexer::SegmentEntry;
use indexer::SegmentSerializer;
use indexer::{DefaultMergePolicy, MergePolicy};
use schema::Schema;
use serde_json;
use std::borrow::BorrowMut;
use std::collections::HashMap;
@@ -36,8 +38,6 @@ use std::sync::Arc;
use std::sync::RwLock;
use std::thread;
use std::thread::JoinHandle;
use Opstamp;
use Result;
/// Save the index meta file.
/// This operation is atomic :
@@ -48,7 +48,7 @@ use Result;
/// and flushed.
///
/// This method is not part of tantivy's public API
pub fn save_new_metas(schema: Schema, directory: &mut Directory) -> Result<()> {
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<()> {
save_metas(
&IndexMeta {
segments: Vec::new(),
@@ -69,7 +69,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut Directory) -> Result<()> {
/// and flushed.
///
/// This method is not part of tantivy's public API
fn save_metas(metas: &IndexMeta, directory: &mut Directory) -> Result<()> {
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> {
info!("save metas");
let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer.
@@ -142,7 +142,7 @@ struct InnerSegmentUpdater {
pool: CpuPool,
index: Index,
segment_manager: SegmentManager,
merge_policy: RwLock<Arc<Box<MergePolicy>>>,
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
merging_thread_id: AtomicUsize,
merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>,
generation: AtomicUsize,
@@ -179,11 +179,11 @@ impl SegmentUpdater {
})))
}
pub fn get_merge_policy(&self) -> Arc<Box<MergePolicy>> {
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
self.0.merge_policy.read().unwrap().clone()
}
pub fn set_merge_policy(&self, merge_policy: Box<MergePolicy>) {
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
let arc_merge_policy = Arc::new(merge_policy);
*self.0.merge_policy.write().unwrap() = arc_merge_policy;
}
@@ -533,9 +533,9 @@ impl SegmentUpdater {
#[cfg(test)]
mod tests {
use indexer::merge_policy::tests::MergeWheneverPossible;
use schema::*;
use Index;
use crate::indexer::merge_policy::tests::MergeWheneverPossible;
use crate::schema::*;
use crate::Index;
#[test]
fn test_delete_during_merge() {

View File

@@ -1,23 +1,23 @@
use super::operation::AddOperation;
use core::Segment;
use core::SerializableSegment;
use fastfield::FastFieldsWriter;
use fieldnorm::FieldNormsWriter;
use indexer::segment_serializer::SegmentSerializer;
use postings::MultiFieldPostingsWriter;
use schema::FieldEntry;
use schema::FieldType;
use schema::Schema;
use schema::Term;
use schema::Value;
use crate::core::Segment;
use crate::core::SerializableSegment;
use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::SegmentSerializer;
use crate::postings::MultiFieldPostingsWriter;
use crate::schema::FieldEntry;
use crate::schema::FieldType;
use crate::schema::Schema;
use crate::schema::Term;
use crate::schema::Value;
use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::FacetTokenizer;
use crate::tokenizer::{TokenStream, Tokenizer};
use crate::DocId;
use crate::Opstamp;
use crate::Result;
use std::io;
use std::str;
use tokenizer::BoxedTokenizer;
use tokenizer::FacetTokenizer;
use tokenizer::{TokenStream, Tokenizer};
use DocId;
use Opstamp;
use Result;
/// A `SegmentWriter` is in charge of creating segment index from a
/// set of documents.
@@ -31,7 +31,7 @@ pub struct SegmentWriter {
fast_field_writers: FastFieldsWriter,
fieldnorms_writer: FieldNormsWriter,
doc_opstamps: Vec<Opstamp>,
tokenizers: Vec<Option<Box<BoxedTokenizer>>>,
tokenizers: Vec<Option<Box<dyn BoxedTokenizer>>>,
}
impl SegmentWriter {

View File

@@ -1,7 +1,7 @@
use crate::Opstamp;
use std::ops::Range;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use Opstamp;
/// Stamper provides Opstamps, which is just an auto-increment id to label
/// an operation.

View File

@@ -125,31 +125,8 @@ extern crate failure;
#[cfg(feature = "mmap")]
extern crate atomicwrites;
extern crate base64;
extern crate bit_set;
extern crate bitpacking;
extern crate byteorder;
extern crate combine;
extern crate crossbeam;
extern crate fnv;
extern crate futures;
extern crate futures_cpupool;
extern crate htmlescape;
extern crate itertools;
extern crate levenshtein_automata;
#[cfg(feature = "mmap")]
extern crate memmap;
extern crate num_cpus;
extern crate owning_ref;
extern crate regex;
extern crate rust_stemmers;
extern crate scoped_pool;
extern crate serde;
extern crate stable_deref_trait;
extern crate tantivy_fst;
extern crate tempdir;
extern crate tempfile;
extern crate uuid;
#[cfg(test)]
#[macro_use]
@@ -181,14 +158,11 @@ mod functional_test;
#[macro_use]
mod macros;
pub use error::TantivyError;
pub use crate::error::TantivyError;
#[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")]
pub use error::TantivyError as Error;
extern crate census;
pub extern crate chrono;
extern crate owned_read;
pub use crate::error::TantivyError as Error;
pub use chrono;
/// Tantivy result.
pub type Result<T> = std::result::Result<T, error::TantivyError>;
@@ -225,15 +199,15 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset;
pub use self::docset::{DocSet, SkipResult};
pub use core::SegmentComponent;
pub use core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
pub use core::{InvertedIndexReader, SegmentReader};
pub use directory::Directory;
pub use indexer::IndexWriter;
pub use postings::Postings;
pub use schema::{Document, Term};
pub use crate::core::SegmentComponent;
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
pub use crate::core::{InvertedIndexReader, SegmentReader};
pub use crate::directory::Directory;
pub use crate::indexer::IndexWriter;
pub use crate::postings::Postings;
pub use crate::schema::{Document, Term};
pub use common::{i64_to_u64, u64_to_i64};
pub use crate::common::{i64_to_u64, u64_to_i64};
/// Expose the current version of tantivy, as well
/// whether it was compiled with the simd compression.
@@ -243,10 +217,10 @@ pub fn version() -> &'static str {
/// Defines tantivy's merging strategy
pub mod merge_policy {
pub use indexer::DefaultMergePolicy;
pub use indexer::LogMergePolicy;
pub use indexer::MergePolicy;
pub use indexer::NoMergePolicy;
pub use crate::indexer::DefaultMergePolicy;
pub use crate::indexer::LogMergePolicy;
pub use crate::indexer::MergePolicy;
pub use crate::indexer::NoMergePolicy;
}
/// A `u32` identifying a document within a segment.
@@ -304,20 +278,20 @@ pub struct DocAddress(pub SegmentLocalId, pub DocId);
#[cfg(test)]
mod tests {
use collector::tests::TestCollector;
use core::SegmentReader;
use docset::DocSet;
use query::BooleanQuery;
use crate::collector::tests::TestCollector;
use crate::core::SegmentReader;
use crate::docset::DocSet;
use crate::query::BooleanQuery;
use crate::schema::*;
use crate::DocAddress;
use crate::Index;
use crate::IndexWriter;
use crate::Postings;
use crate::ReloadPolicy;
use rand::distributions::Bernoulli;
use rand::distributions::Uniform;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use schema::*;
use DocAddress;
use Index;
use IndexWriter;
use Postings;
use ReloadPolicy;
pub fn assert_nearly_equals(expected: f32, val: f32) {
assert!(
@@ -480,7 +454,7 @@ mod tests {
}
}
fn advance_undeleted(docset: &mut DocSet, reader: &SegmentReader) -> bool {
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
while docset.advance() {
if !reader.is_deleted(docset.doc()) {
return true;

View File

@@ -67,7 +67,7 @@ macro_rules! doc(
#[cfg(test)]
mod test {
use schema::{Schema, FAST, TEXT};
use crate::schema::{Schema, FAST, TEXT};
#[test]
fn test_doc_basic() {

View File

@@ -38,8 +38,8 @@ const LONG_SKIP_INTERVAL: u64 = (LONG_SKIP_IN_BLOCKS * COMPRESSION_BLOCK_SIZE) a
pub mod tests {
use super::{PositionReader, PositionSerializer};
use directory::ReadOnlySource;
use positions::COMPRESSION_BLOCK_SIZE;
use crate::directory::ReadOnlySource;
use crate::positions::COMPRESSION_BLOCK_SIZE;
use std::iter;
fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) {

View File

@@ -1,3 +1,9 @@
use crate::common::{BinarySerializable, FixedSize};
use crate::directory::ReadOnlySource;
use crate::positions::COMPRESSION_BLOCK_SIZE;
use crate::positions::LONG_SKIP_INTERVAL;
use crate::positions::LONG_SKIP_IN_BLOCKS;
use crate::postings::compression::compressed_block_size;
/// Positions works as a long sequence of compressed block.
/// All terms are chained one after the other.
///
@@ -19,13 +25,7 @@
/// so skipping a block without decompressing it is just a matter of advancing that many
/// bytes.
use bitpacking::{BitPacker, BitPacker4x};
use common::{BinarySerializable, FixedSize};
use directory::ReadOnlySource;
use owned_read::OwnedRead;
use positions::COMPRESSION_BLOCK_SIZE;
use positions::LONG_SKIP_INTERVAL;
use positions::LONG_SKIP_IN_BLOCKS;
use postings::compression::compressed_block_size;
struct Positions {
bit_packer: BitPacker4x,

View File

@@ -1,8 +1,8 @@
use crate::common::BinarySerializable;
use crate::common::CountingWriter;
use crate::positions::{COMPRESSION_BLOCK_SIZE, LONG_SKIP_INTERVAL};
use bitpacking::BitPacker;
use bitpacking::BitPacker4x;
use common::BinarySerializable;
use common::CountingWriter;
use positions::{COMPRESSION_BLOCK_SIZE, LONG_SKIP_INTERVAL};
use std::io::{self, Write};
pub struct PositionSerializer<W: io::Write> {

View File

@@ -1,4 +1,4 @@
use postings::compression::AlignedBuffer;
use crate::postings::compression::AlignedBuffer;
/// This modules define the logic used to search for a doc in a given
/// block. (at most 128 docs)
@@ -8,7 +8,7 @@ use postings::compression::AlignedBuffer;
#[cfg(target_arch = "x86_64")]
mod sse2 {
use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
use std::arch::x86_64::__m128i as DataType;
use std::arch::x86_64::_mm_add_epi32 as op_add;
use std::arch::x86_64::_mm_cmplt_epi32 as op_lt;
@@ -49,7 +49,7 @@ mod sse2 {
#[cfg(test)]
mod test {
use super::linear_search_sse2_128;
use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
#[test]
fn test_linear_search_sse2_128_u32() {
@@ -140,7 +140,7 @@ impl BlockSearcher {
) -> usize {
#[cfg(target_arch = "x86_64")]
{
use postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
if self == BlockSearcher::SSE2 && len == COMPRESSION_BLOCK_SIZE {
return sse2::linear_search_sse2_128(block_docs, target);
}
@@ -166,7 +166,7 @@ mod tests {
use super::exponential_search;
use super::linear_search;
use super::BlockSearcher;
use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
#[test]
fn test_linear_search() {

View File

@@ -1,5 +1,5 @@
use crate::common::FixedSize;
use bitpacking::{BitPacker, BitPacker4x};
use common::FixedSize;
pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
const COMPRESSED_BLOCK_MAX_SIZE: usize = COMPRESSION_BLOCK_SIZE * u32::SIZE_IN_BYTES;

View File

@@ -31,7 +31,7 @@ pub use self::segment_postings::{BlockSegmentPostings, SegmentPostings};
pub(crate) use self::stacker::compute_table_size;
pub use common::HasLen;
pub use crate::common::HasLen;
pub(crate) const USE_SKIP_INFO_LIMIT: u32 = COMPRESSION_BLOCK_SIZE as u32;
pub(crate) type UnorderedTermId = u64;
@@ -48,24 +48,24 @@ pub(crate) enum FreqReadingOption {
pub mod tests {
use super::*;
use core::Index;
use core::SegmentComponent;
use core::SegmentReader;
use docset::{DocSet, SkipResult};
use fieldnorm::FieldNormReader;
use indexer::operation::AddOperation;
use indexer::SegmentWriter;
use merge_policy::NoMergePolicy;
use query::Scorer;
use crate::core::Index;
use crate::core::SegmentComponent;
use crate::core::SegmentReader;
use crate::docset::{DocSet, SkipResult};
use crate::fieldnorm::FieldNormReader;
use crate::indexer::operation::AddOperation;
use crate::indexer::SegmentWriter;
use crate::merge_policy::NoMergePolicy;
use crate::query::Scorer;
use crate::schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
use crate::schema::{Field, TextOptions};
use crate::schema::{IndexRecordOption, TextFieldIndexing};
use crate::tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN};
use crate::DocId;
use crate::Score;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
use schema::{Field, TextOptions};
use schema::{IndexRecordOption, TextFieldIndexing};
use std::iter;
use tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN};
use DocId;
use Score;
#[test]
pub fn test_position_write() {
@@ -589,7 +589,7 @@ pub mod tests {
}
}
pub fn test_skip_against_unoptimized<F: Fn() -> Box<DocSet>>(
pub fn test_skip_against_unoptimized<F: Fn() -> Box<dyn DocSet>>(
postings_factory: F,
targets: Vec<u32>,
) {

View File

@@ -1,4 +1,4 @@
use docset::DocSet;
use crate::docset::DocSet;
/// Postings (also called inverted list)
///

View File

@@ -1,23 +1,23 @@
use super::stacker::{Addr, MemoryArena, TermHashMap};
use postings::recorder::{
use crate::postings::recorder::{
BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder,
};
use postings::UnorderedTermId;
use postings::{FieldSerializer, InvertedIndexSerializer};
use schema::IndexRecordOption;
use schema::{Field, FieldEntry, FieldType, Schema, Term};
use crate::postings::UnorderedTermId;
use crate::postings::{FieldSerializer, InvertedIndexSerializer};
use crate::schema::IndexRecordOption;
use crate::schema::{Field, FieldEntry, FieldType, Schema, Term};
use crate::termdict::TermOrdinal;
use crate::tokenizer::TokenStream;
use crate::tokenizer::{Token, MAX_TOKEN_LEN};
use crate::DocId;
use crate::Result;
use std::collections::HashMap;
use std::io;
use std::marker::PhantomData;
use std::ops::DerefMut;
use termdict::TermOrdinal;
use tokenizer::TokenStream;
use tokenizer::{Token, MAX_TOKEN_LEN};
use DocId;
use Result;
fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<PostingsWriter> {
fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<dyn PostingsWriter> {
match *field_entry.field_type() {
FieldType::Str(ref text_options) => text_options
.get_indexing_options()
@@ -49,7 +49,7 @@ pub struct MultiFieldPostingsWriter {
heap: MemoryArena,
schema: Schema,
term_index: TermHashMap,
per_field_postings_writers: Vec<Box<PostingsWriter>>,
per_field_postings_writers: Vec<Box<dyn PostingsWriter>>,
}
fn make_field_partition(
@@ -99,7 +99,12 @@ impl MultiFieldPostingsWriter {
self.term_index.mem_usage() + self.heap.mem_usage()
}
pub fn index_text(&mut self, doc: DocId, field: Field, token_stream: &mut TokenStream) -> u32 {
pub fn index_text(
&mut self,
doc: DocId,
field: Field,
token_stream: &mut dyn TokenStream,
) -> u32 {
let postings_writer = self.per_field_postings_writers[field.0 as usize].deref_mut();
postings_writer.index_text(
&mut self.term_index,
@@ -138,10 +143,10 @@ impl MultiFieldPostingsWriter {
FieldType::Str(_) | FieldType::HierarchicalFacet => {
// populating the (unordered term ord) -> (ordered term ord) mapping
// for the field.
let mut unordered_term_ids = term_offsets[start..stop]
let unordered_term_ids = term_offsets[start..stop]
.iter()
.map(|&(_, _, bucket)| bucket);
let mut mapping: HashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
let mapping: HashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
.enumerate()
.map(|(term_ord, unord_term_id)| {
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal)
@@ -194,7 +199,7 @@ pub trait PostingsWriter {
fn serialize(
&self,
term_addrs: &[(&[u8], Addr, UnorderedTermId)],
serializer: &mut FieldSerializer,
serializer: &mut FieldSerializer<'_>,
term_heap: &MemoryArena,
heap: &MemoryArena,
) -> io::Result<()>;
@@ -205,7 +210,7 @@ pub trait PostingsWriter {
term_index: &mut TermHashMap,
doc_id: DocId,
field: Field,
token_stream: &mut TokenStream,
token_stream: &mut dyn TokenStream,
heap: &mut MemoryArena,
) -> u32 {
let mut term = Term::for_field(field);
@@ -246,7 +251,7 @@ impl<Rec: Recorder + 'static> SpecializedPostingsWriter<Rec> {
}
/// Builds a `SpecializedPostingsWriter` storing its data in a heap.
pub fn new_boxed() -> Box<PostingsWriter> {
pub fn new_boxed() -> Box<dyn PostingsWriter> {
Box::new(SpecializedPostingsWriter::<Rec>::new())
}
}
@@ -283,7 +288,7 @@ impl<Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<Rec>
fn serialize(
&self,
term_addrs: &[(&[u8], Addr, UnorderedTermId)],
serializer: &mut FieldSerializer,
serializer: &mut FieldSerializer<'_>,
termdict_heap: &MemoryArena,
heap: &MemoryArena,
) -> io::Result<()> {

View File

@@ -1,8 +1,8 @@
use super::stacker::{ExpUnrolledLinkedList, MemoryArena};
use common::{read_u32_vint, write_u32_vint};
use postings::FieldSerializer;
use crate::common::{read_u32_vint, write_u32_vint};
use crate::postings::FieldSerializer;
use crate::DocId;
use std::io;
use DocId;
const POSITION_END: u32 = 0;
@@ -72,7 +72,7 @@ pub(crate) trait Recorder: Copy + 'static {
fn serialize(
&self,
buffer_lender: &mut BufferLender,
serializer: &mut FieldSerializer,
serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena,
) -> io::Result<()>;
}
@@ -108,7 +108,7 @@ impl Recorder for NothingRecorder {
fn serialize(
&self,
buffer_lender: &mut BufferLender,
serializer: &mut FieldSerializer,
serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena,
) -> io::Result<()> {
let buffer = buffer_lender.lend_u8();
@@ -159,7 +159,7 @@ impl Recorder for TermFrequencyRecorder {
fn serialize(
&self,
buffer_lender: &mut BufferLender,
serializer: &mut FieldSerializer,
serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena,
) -> io::Result<()> {
let buffer = buffer_lender.lend_u8();
@@ -208,7 +208,7 @@ impl Recorder for TFAndPositionRecorder {
fn serialize(
&self,
buffer_lender: &mut BufferLender,
serializer: &mut FieldSerializer,
serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena,
) -> io::Result<()> {
let (buffer_u8, buffer_positions) = buffer_lender.lend_all();

View File

@@ -1,21 +1,21 @@
use common::BitSet;
use common::HasLen;
use common::{BinarySerializable, VInt};
use docset::{DocSet, SkipResult};
use crate::common::BitSet;
use crate::common::HasLen;
use crate::common::{BinarySerializable, VInt};
use crate::docset::{DocSet, SkipResult};
use crate::positions::PositionReader;
use crate::postings::compression::{compressed_block_size, AlignedBuffer};
use crate::postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
use crate::postings::serializer::PostingsSerializer;
use crate::postings::BlockSearcher;
use crate::postings::FreqReadingOption;
use crate::postings::Postings;
use crate::postings::SkipReader;
use crate::postings::USE_SKIP_INFO_LIMIT;
use crate::schema::IndexRecordOption;
use crate::DocId;
use owned_read::OwnedRead;
use positions::PositionReader;
use postings::compression::{compressed_block_size, AlignedBuffer};
use postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
use postings::serializer::PostingsSerializer;
use postings::BlockSearcher;
use postings::FreqReadingOption;
use postings::Postings;
use postings::SkipReader;
use postings::USE_SKIP_INFO_LIMIT;
use schema::IndexRecordOption;
use std::cmp::Ordering;
use tantivy_fst::Streamer;
use DocId;
struct PositionComputer {
// store the amount of position int
@@ -611,17 +611,17 @@ mod tests {
use super::BlockSegmentPostings;
use super::BlockSegmentPostingsSkipResult;
use super::SegmentPostings;
use common::HasLen;
use core::Index;
use docset::DocSet;
use postings::postings::Postings;
use schema::IndexRecordOption;
use schema::Schema;
use schema::Term;
use schema::INDEXED;
use crate::common::HasLen;
use crate::core::Index;
use crate::docset::DocSet;
use crate::postings::postings::Postings;
use crate::schema::IndexRecordOption;
use crate::schema::Schema;
use crate::schema::Term;
use crate::schema::INDEXED;
use crate::DocId;
use crate::SkipResult;
use tantivy_fst::Streamer;
use DocId;
use SkipResult;
#[test]
fn test_empty_segment_postings() {

View File

@@ -1,18 +1,18 @@
use super::TermInfo;
use common::{BinarySerializable, VInt};
use common::{CompositeWrite, CountingWriter};
use core::Segment;
use directory::WritePtr;
use positions::PositionSerializer;
use postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
use postings::skip::SkipSerializer;
use postings::USE_SKIP_INFO_LIMIT;
use schema::Schema;
use schema::{Field, FieldEntry, FieldType};
use crate::common::{BinarySerializable, VInt};
use crate::common::{CompositeWrite, CountingWriter};
use crate::core::Segment;
use crate::directory::WritePtr;
use crate::positions::PositionSerializer;
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
use crate::postings::skip::SkipSerializer;
use crate::postings::USE_SKIP_INFO_LIMIT;
use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::DocId;
use crate::Result;
use std::io::{self, Write};
use termdict::{TermDictionaryBuilder, TermOrdinal};
use DocId;
use Result;
/// `InvertedIndexSerializer` is in charge of serializing
/// postings on disk, in the
@@ -73,7 +73,7 @@ impl InvertedIndexSerializer {
/// Open a new `PostingsSerializer` for the given segment
pub fn open(segment: &mut Segment) -> Result<InvertedIndexSerializer> {
use SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS};
use crate::SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS};
InvertedIndexSerializer::create(
CompositeWrite::wrap(segment.open_write(TERMS)?),
CompositeWrite::wrap(segment.open_write(POSTINGS)?),
@@ -91,7 +91,7 @@ impl InvertedIndexSerializer {
&mut self,
field: Field,
total_num_tokens: u64,
) -> io::Result<FieldSerializer> {
) -> io::Result<FieldSerializer<'_>> {
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
let term_dictionary_write = self.terms_write.for_field(field);
let postings_write = self.postings_write.for_field(field);

View File

@@ -1,8 +1,8 @@
use common::BinarySerializable;
use crate::common::BinarySerializable;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::schema::IndexRecordOption;
use crate::DocId;
use owned_read::OwnedRead;
use postings::compression::COMPRESSION_BLOCK_SIZE;
use schema::IndexRecordOption;
use DocId;
pub struct SkipSerializer {
buffer: Vec<u8>,

View File

@@ -1,7 +1,7 @@
use super::{Addr, MemoryArena};
use postings::stacker::memory_arena::load;
use postings::stacker::memory_arena::store;
use crate::postings::stacker::memory_arena::load;
use crate::postings::stacker::memory_arena::store;
use std::io;
use std::mem;
@@ -16,8 +16,8 @@ enum CapacityResult {
fn len_to_capacity(len: u32) -> CapacityResult {
match len {
0...15 => CapacityResult::Available(FIRST_BLOCK as u32 - len),
16...MAX_BLOCK_LEN => {
0..=15 => CapacityResult::Available(FIRST_BLOCK as u32 - len),
16..=MAX_BLOCK_LEN => {
let cap = 1 << (32u32 - (len - 1u32).leading_zeros());
let available = cap - len;
if available == 0 {

View File

@@ -1,11 +1,11 @@
extern crate murmurhash32;
use murmurhash32;
use self::murmurhash32::murmurhash2;
use super::{Addr, MemoryArena};
use crate::postings::stacker::memory_arena::store;
use crate::postings::UnorderedTermId;
use byteorder::{ByteOrder, NativeEndian};
use postings::stacker::memory_arena::store;
use postings::UnorderedTermId;
use std::iter;
use std::mem;
use std::slice;
@@ -154,7 +154,7 @@ impl TermHashMap {
unordered_term_id
}
pub fn iter(&self) -> Iter {
pub fn iter(&self) -> Iter<'_> {
Iter {
inner: self.occupied.iter(),
hashmap: &self,

View File

@@ -1,4 +1,4 @@
use common::{BinarySerializable, FixedSize};
use crate::common::{BinarySerializable, FixedSize};
use std::io;
/// `TermInfo` wraps the metadata associated to a Term.
@@ -45,7 +45,7 @@ impl BinarySerializable for TermInfo {
mod tests {
use super::TermInfo;
use common::test::fixed_size_test;
use crate::common::test::fixed_size_test;
#[test]
fn test_fixed_size() {

View File

@@ -1,11 +1,11 @@
use core::Searcher;
use core::SegmentReader;
use docset::DocSet;
use query::explanation::does_not_match;
use query::{Explanation, Query, Scorer, Weight};
use DocId;
use Result;
use Score;
use crate::core::Searcher;
use crate::core::SegmentReader;
use crate::docset::DocSet;
use crate::query::explanation::does_not_match;
use crate::query::{Explanation, Query, Scorer, Weight};
use crate::DocId;
use crate::Result;
use crate::Score;
/// Query that matches all of the documents.
///
@@ -14,7 +14,7 @@ use Score;
pub struct AllQuery;
impl Query for AllQuery {
fn weight(&self, _: &Searcher, _: bool) -> Result<Box<Weight>> {
fn weight(&self, _: &Searcher, _: bool) -> Result<Box<dyn Weight>> {
Ok(Box::new(AllWeight))
}
}
@@ -23,7 +23,7 @@ impl Query for AllQuery {
pub struct AllWeight;
impl Weight for AllWeight {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>> {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
Ok(Box::new(AllScorer {
state: State::NotStarted,
doc: 0u32,
@@ -93,9 +93,9 @@ impl Scorer for AllScorer {
mod tests {
use super::AllQuery;
use query::Query;
use schema::{Schema, TEXT};
use Index;
use crate::query::Query;
use crate::schema::{Schema, TEXT};
use crate::Index;
#[test]
fn test_all_query() {

View File

@@ -1,14 +1,14 @@
use common::BitSet;
use core::SegmentReader;
use query::ConstScorer;
use query::{BitSetDocSet, Explanation};
use query::{Scorer, Weight};
use schema::{Field, IndexRecordOption};
use crate::common::BitSet;
use crate::core::SegmentReader;
use crate::query::ConstScorer;
use crate::query::{BitSetDocSet, Explanation};
use crate::query::{Scorer, Weight};
use crate::schema::{Field, IndexRecordOption};
use crate::termdict::{TermDictionary, TermStreamer};
use crate::DocId;
use crate::TantivyError;
use crate::{Result, SkipResult};
use tantivy_fst::Automaton;
use termdict::{TermDictionary, TermStreamer};
use DocId;
use TantivyError;
use {Result, SkipResult};
/// A weight struct for Fuzzy Term and Regex Queries
pub struct AutomatonWeight<A>
@@ -38,7 +38,7 @@ impl<A> Weight for AutomatonWeight<A>
where
A: Automaton + Send + Sync + 'static,
{
fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>> {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc);

View File

@@ -1,7 +1,7 @@
use common::{BitSet, TinySet};
use docset::{DocSet, SkipResult};
use crate::common::{BitSet, TinySet};
use crate::docset::{DocSet, SkipResult};
use crate::DocId;
use std::cmp::Ordering;
use DocId;
/// A `BitSetDocSet` makes it possible to iterate through a bitset as if it was a `DocSet`.
///
@@ -121,9 +121,9 @@ impl DocSet for BitSetDocSet {
#[cfg(test)]
mod tests {
use super::BitSetDocSet;
use common::BitSet;
use docset::{DocSet, SkipResult};
use DocId;
use crate::common::BitSet;
use crate::docset::{DocSet, SkipResult};
use crate::DocId;
fn create_docbitset(docs: &[DocId], max_doc: DocId) -> BitSetDocSet {
let mut docset = BitSet::with_max_value(max_doc);

View File

@@ -1,8 +1,8 @@
use fieldnorm::FieldNormReader;
use query::Explanation;
use Score;
use Searcher;
use Term;
use crate::fieldnorm::FieldNormReader;
use crate::query::Explanation;
use crate::Score;
use crate::Searcher;
use crate::Term;
const K1: f32 = 1.2;
const B: f32 = 0.75;
@@ -131,7 +131,7 @@ impl BM25Weight {
mod tests {
use super::idf;
use tests::assert_nearly_equals;
use crate::tests::assert_nearly_equals;
#[test]
fn test_idf() {

View File

@@ -1,13 +1,13 @@
use super::boolean_weight::BooleanWeight;
use query::Occur;
use query::Query;
use query::TermQuery;
use query::Weight;
use schema::IndexRecordOption;
use schema::Term;
use crate::query::Occur;
use crate::query::Query;
use crate::query::TermQuery;
use crate::query::Weight;
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::Result;
use crate::Searcher;
use std::collections::BTreeSet;
use Result;
use Searcher;
/// The boolean query combines a set of queries
///
@@ -21,7 +21,7 @@ use Searcher;
/// a `MustNot` occurence.
#[derive(Debug)]
pub struct BooleanQuery {
subqueries: Vec<(Occur, Box<Query>)>,
subqueries: Vec<(Occur, Box<dyn Query>)>,
}
impl Clone for BooleanQuery {
@@ -34,14 +34,14 @@ impl Clone for BooleanQuery {
}
}
impl From<Vec<(Occur, Box<Query>)>> for BooleanQuery {
fn from(subqueries: Vec<(Occur, Box<Query>)>) -> BooleanQuery {
impl From<Vec<(Occur, Box<dyn Query>)>> for BooleanQuery {
fn from(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery {
BooleanQuery { subqueries }
}
}
impl Query for BooleanQuery {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result<Box<Weight>> {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result<Box<dyn Weight>> {
let sub_weights = self
.subqueries
.iter()
@@ -63,10 +63,10 @@ impl BooleanQuery {
/// Helper method to create a boolean query matching a given list of terms.
/// The resulting query is a disjunction of the terms.
pub fn new_multiterms_query(terms: Vec<Term>) -> BooleanQuery {
let occur_term_queries: Vec<(Occur, Box<Query>)> = terms
let occur_term_queries: Vec<(Occur, Box<dyn Query>)> = terms
.into_iter()
.map(|term| {
let term_query: Box<Query> =
let term_query: Box<dyn Query> =
Box::new(TermQuery::new(term, IndexRecordOption::WithFreqs));
(Occur::Should, term_query)
})
@@ -75,7 +75,7 @@ impl BooleanQuery {
}
/// Deconstructed view of the clauses making up this query.
pub fn clauses(&self) -> &[(Occur, Box<Query>)] {
pub fn clauses(&self) -> &[(Occur, Box<dyn Query>)] {
&self.subqueries[..]
}
}

View File

@@ -1,20 +1,20 @@
use core::SegmentReader;
use query::explanation::does_not_match;
use query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
use query::term_query::TermScorer;
use query::EmptyScorer;
use query::Exclude;
use query::Occur;
use query::RequiredOptionalScorer;
use query::Scorer;
use query::Union;
use query::Weight;
use query::{intersect_scorers, Explanation};
use crate::core::SegmentReader;
use crate::query::explanation::does_not_match;
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
use crate::query::term_query::TermScorer;
use crate::query::EmptyScorer;
use crate::query::Exclude;
use crate::query::Occur;
use crate::query::RequiredOptionalScorer;
use crate::query::Scorer;
use crate::query::Union;
use crate::query::Weight;
use crate::query::{intersect_scorers, Explanation};
use crate::Result;
use crate::{DocId, SkipResult};
use std::collections::HashMap;
use Result;
use {DocId, SkipResult};
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<Scorer>>) -> Box<Scorer>
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> Box<dyn Scorer>
where
TScoreCombiner: ScoreCombiner,
{
@@ -30,22 +30,23 @@ where
.into_iter()
.map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap()))
.collect();
let scorer: Box<Scorer> = Box::new(Union::<TermScorer, TScoreCombiner>::from(scorers));
let scorer: Box<dyn Scorer> =
Box::new(Union::<TermScorer, TScoreCombiner>::from(scorers));
return scorer;
}
}
let scorer: Box<Scorer> = Box::new(Union::<_, TScoreCombiner>::from(scorers));
let scorer: Box<dyn Scorer> = Box::new(Union::<_, TScoreCombiner>::from(scorers));
scorer
}
pub struct BooleanWeight {
weights: Vec<(Occur, Box<Weight>)>,
weights: Vec<(Occur, Box<dyn Weight>)>,
scoring_enabled: bool,
}
impl BooleanWeight {
pub fn new(weights: Vec<(Occur, Box<Weight>)>, scoring_enabled: bool) -> BooleanWeight {
pub fn new(weights: Vec<(Occur, Box<dyn Weight>)>, scoring_enabled: bool) -> BooleanWeight {
BooleanWeight {
weights,
scoring_enabled,
@@ -55,10 +56,10 @@ impl BooleanWeight {
fn per_occur_scorers(
&self,
reader: &SegmentReader,
) -> Result<HashMap<Occur, Vec<Box<Scorer>>>> {
let mut per_occur_scorers: HashMap<Occur, Vec<Box<Scorer>>> = HashMap::new();
) -> Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
for &(ref occur, ref subweight) in &self.weights {
let sub_scorer: Box<Scorer> = subweight.scorer(reader)?;
let sub_scorer: Box<dyn Scorer> = subweight.scorer(reader)?;
per_occur_scorers
.entry(*occur)
.or_insert_with(Vec::new)
@@ -70,22 +71,22 @@ impl BooleanWeight {
fn complex_scorer<TScoreCombiner: ScoreCombiner>(
&self,
reader: &SegmentReader,
) -> Result<Box<Scorer>> {
) -> Result<Box<dyn Scorer>> {
let mut per_occur_scorers = self.per_occur_scorers(reader)?;
let should_scorer_opt: Option<Box<Scorer>> = per_occur_scorers
let should_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::Should)
.map(scorer_union::<TScoreCombiner>);
let exclude_scorer_opt: Option<Box<Scorer>> = per_occur_scorers
let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::MustNot)
.map(scorer_union::<TScoreCombiner>);
let must_scorer_opt: Option<Box<Scorer>> = per_occur_scorers
let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::Must)
.map(intersect_scorers);
let positive_scorer: Box<Scorer> = match (should_scorer_opt, must_scorer_opt) {
let positive_scorer: Box<dyn Scorer> = match (should_scorer_opt, must_scorer_opt) {
(Some(should_scorer), Some(must_scorer)) => {
if self.scoring_enabled {
Box::new(RequiredOptionalScorer::<_, _, TScoreCombiner>::new(
@@ -112,7 +113,7 @@ impl BooleanWeight {
}
impl Weight for BooleanWeight {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>> {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
if self.weights.is_empty() {
Ok(Box::new(EmptyScorer))
} else if self.weights.len() == 1 {

View File

@@ -7,19 +7,19 @@ pub use self::boolean_query::BooleanQuery;
mod tests {
use super::*;
use collector::tests::TestCollector;
use query::score_combiner::SumWithCoordsCombiner;
use query::term_query::TermScorer;
use query::Intersection;
use query::Occur;
use query::Query;
use query::QueryParser;
use query::RequiredOptionalScorer;
use query::Scorer;
use query::TermQuery;
use schema::*;
use Index;
use {DocAddress, DocId};
use crate::collector::tests::TestCollector;
use crate::query::score_combiner::SumWithCoordsCombiner;
use crate::query::term_query::TermScorer;
use crate::query::Intersection;
use crate::query::Occur;
use crate::query::Query;
use crate::query::QueryParser;
use crate::query::RequiredOptionalScorer;
use crate::query::Scorer;
use crate::query::TermQuery;
use crate::schema::*;
use crate::Index;
use crate::{DocAddress, DocId};
fn aux_test_helper() -> (Index, Field) {
let mut schema_builder = Schema::builder();
@@ -89,7 +89,7 @@ mod tests {
let query = query_parser.parse_query("+a +(b c)").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
assert!(scorer.is::<Intersection<Box<Scorer>>>());
assert!(scorer.is::<Intersection<Box<dyn Scorer>>>());
}
}
@@ -102,8 +102,11 @@ mod tests {
let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
assert!(scorer
.is::<RequiredOptionalScorer<Box<Scorer>, Box<Scorer>, SumWithCoordsCombiner>>());
assert!(scorer.is::<RequiredOptionalScorer<
Box<dyn Scorer>,
Box<dyn Scorer>,
SumWithCoordsCombiner,
>>());
}
{
let query = query_parser.parse_query("+a b").unwrap();
@@ -122,13 +125,13 @@ mod tests {
Term::from_field_text(text_field, text),
IndexRecordOption::Basic,
);
let query: Box<Query> = Box::new(term_query);
let query: Box<dyn Query> = Box::new(term_query);
query
};
let reader = index.reader().unwrap();
let matching_docs = |boolean_query: &Query| {
let matching_docs = |boolean_query: &dyn Query| {
reader
.searcher()
.search(boolean_query, &TestCollector)
@@ -185,11 +188,11 @@ mod tests {
Term::from_field_text(text_field, text),
IndexRecordOption::Basic,
);
let query: Box<Query> = Box::new(term_query);
let query: Box<dyn Query> = Box::new(term_query);
query
};
let reader = index.reader().unwrap();
let score_docs = |boolean_query: &Query| {
let score_docs = |boolean_query: &dyn Query| {
let fruit = reader
.searcher()
.search(boolean_query, &TestCollector)

View File

@@ -1,13 +1,13 @@
use super::Scorer;
use query::explanation::does_not_match;
use query::Weight;
use query::{Explanation, Query};
use DocId;
use DocSet;
use Result;
use Score;
use Searcher;
use SegmentReader;
use crate::query::explanation::does_not_match;
use crate::query::Weight;
use crate::query::{Explanation, Query};
use crate::DocId;
use crate::DocSet;
use crate::Result;
use crate::Score;
use crate::Searcher;
use crate::SegmentReader;
/// `EmptyQuery` is a dummy `Query` in which no document matches.
///
@@ -16,7 +16,7 @@ use SegmentReader;
pub struct EmptyQuery;
impl Query for EmptyQuery {
fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<Weight>> {
fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<dyn Weight>> {
Ok(Box::new(EmptyWeight))
}
@@ -30,7 +30,7 @@ impl Query for EmptyQuery {
/// It is useful for tests and handling edge cases.
pub struct EmptyWeight;
impl Weight for EmptyWeight {
fn scorer(&self, _reader: &SegmentReader) -> Result<Box<Scorer>> {
fn scorer(&self, _reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
Ok(Box::new(EmptyScorer))
}
@@ -69,8 +69,8 @@ impl Scorer for EmptyScorer {
#[cfg(test)]
mod tests {
use query::EmptyScorer;
use DocSet;
use crate::query::EmptyScorer;
use crate::DocSet;
#[test]
fn test_empty_scorer() {

Some files were not shown because too many files have changed in this diff Show More