Compare commits

...

5 Commits

Author SHA1 Message Date
Paul Masurel
52c24e1c06 first stab 2019-11-01 09:05:35 +09:00
Alberto Piai
3a65dc84c8 TopDocs: ensure stable sorting on equal score (#675)
* TopDocs: ensure stable sorting on equal score

When selecting the top K documents by score, we need to ensure stable
sorting. Until now, for documents with the same score, we were relying
on the (arbitrary) order returned by the BinaryHeap used to implement
the collectors.

This patch fixes the problem by explicitly using the doc address when
harvesting the `TopSegmentCollector` and when merging the results in
`TopCollector::merge_fruits()`.

This is important (for example) to implement pagination correctly using
the TopDocs collector. If sorting isn't stable, documents that have the
same score might be ranked in different positions depending on the
specific K that was used, thus appearing in two different pages, or in
none at all.

Fixes gh-671

* TMP: alternative solution (see previous commit)

If we add the constrait that D is also PartialOrd in ComparableDoc<T,
D>, then we can move the comparison by doc address directly in the cmp
implementation of ComparableDoc.

* TMP rebase as first commit: add benchmarks for TopSegmentCollector

* fixup! TMP: alternative solution (see previous commit)

* TMP add changelog entry

* TMP run cargo fmt
2019-10-26 15:27:25 +09:00
dependabot-preview[bot]
ce42bbf5c9 Update base64 requirement from 0.10.0 to 0.11.0 (#676)
Updates the requirements on [base64](https://github.com/marshallpierce/rust-base64) to permit the latest version.
- [Release notes](https://github.com/marshallpierce/rust-base64/releases)
- [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md)
- [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.10.0...v0.11.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-10-26 15:24:47 +09:00
Paul Masurel
7b21b3f25a Refactoring around Field (#673)
* Refactoring around Field

Removing the contract about the order of the field, and the
field id allocation.

* Update delete_queue.rs

* Update field.rs
2019-10-25 09:06:44 +09:00
Paul Masurel
46caec1040 Updating uuid to 0.8 (#674) 2019-10-25 09:02:00 +09:00
25 changed files with 273 additions and 88 deletions

View File

@@ -9,6 +9,7 @@ Tantivy 0.11.0
- API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
## How to update?

View File

@@ -13,7 +13,7 @@ keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies]
base64 = "0.10.0"
base64 = "0.11.0"
byteorder = "1.0"
crc32fast = "1.2.0"
once_cell = "1.0"
@@ -34,7 +34,7 @@ itertools = "0.8"
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
notify = {version="4", optional=true}
bit-set = "0.5"
uuid = { version = "0.7.2", features = ["v4", "serde"] }
uuid = { version = "0.8", features = ["v4", "serde"] }
crossbeam = "0.7"
futures = "0.1"
futures-cpupool = "0.1"

View File

@@ -515,7 +515,7 @@ mod tests {
#[should_panic(expected = "Tried to add a facet which is a descendant of \
an already added facet.")]
fn test_misused_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field(0));
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/country/europe"));
}
@@ -546,7 +546,7 @@ mod tests {
#[test]
fn test_non_used_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field(0));
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/countryeurope"));
}

View File

@@ -12,6 +12,9 @@ use std::collections::BinaryHeap;
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
/// default Rust heap is a max heap, whereas a min heap is needed.
///
/// Additionally, it guarantees stable sorting: in case of a tie on the feature, the document
/// address is used.
///
/// WARNING: equality is not what you would expect here.
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
/// is equal. This should be perfectly fine for this usage, but let's make sure this
@@ -21,29 +24,37 @@ struct ComparableDoc<T, D> {
doc: D,
}
impl<T: PartialOrd, D> PartialOrd for ComparableDoc<T, D> {
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: PartialOrd, D> Ord for ComparableDoc<T, D> {
impl<T: PartialOrd, D: PartialOrd> Ord for ComparableDoc<T, D> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
other
// Reversed to make BinaryHeap work as a min-heap
let by_feature = other
.feature
.partial_cmp(&self.feature)
.unwrap_or_else(|| Ordering::Equal)
.unwrap_or(Ordering::Equal);
let lazy_by_doc_address = || self.doc.partial_cmp(&other.doc).unwrap_or(Ordering::Equal);
// In case of a tie on the feature, we sort by ascending
// `DocAddress` in order to ensure a stable sorting of the
// documents.
by_feature.then_with(lazy_by_doc_address)
}
}
impl<T: PartialOrd, D> PartialEq for ComparableDoc<T, D> {
impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl<T: PartialOrd, D> Eq for ComparableDoc<T, D> {}
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {}
pub(crate) struct TopCollector<T> {
limit: usize,
@@ -214,4 +225,94 @@ mod tests {
]
);
}
#[test]
fn test_top_segment_collector_stable_ordering_for_equal_feature() {
// given that the documents are collected in ascending doc id order,
// when harvesting we have to guarantee stable sorting in case of a tie
// on the score
let doc_ids_collection = [4, 5, 6];
let score = 3.14;
let mut top_collector_limit_2 = TopSegmentCollector::new(0, 2);
for id in &doc_ids_collection {
top_collector_limit_2.collect(*id, score);
}
let mut top_collector_limit_3 = TopSegmentCollector::new(0, 3);
for id in &doc_ids_collection {
top_collector_limit_3.collect(*id, score);
}
assert_eq!(
top_collector_limit_2.harvest(),
top_collector_limit_3.harvest()[..2].to_vec(),
);
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::TopSegmentCollector;
use test::Bencher;
#[bench]
fn bench_top_segment_collector_collect_not_at_capacity(b: &mut Bencher) {
let mut top_collector = TopSegmentCollector::new(0, 400);
b.iter(|| {
for i in 0..100 {
top_collector.collect(i, 0.8);
}
});
}
#[bench]
fn bench_top_segment_collector_collect_at_capacity(b: &mut Bencher) {
let mut top_collector = TopSegmentCollector::new(0, 100);
for i in 0..100 {
top_collector.collect(i, 0.8);
}
b.iter(|| {
for i in 0..100 {
top_collector.collect(i, 0.8);
}
});
}
#[bench]
fn bench_top_segment_collector_collect_and_harvest_many_ties(b: &mut Bencher) {
b.iter(|| {
let mut top_collector = TopSegmentCollector::new(0, 100);
for i in 0..100 {
top_collector.collect(i, 0.8);
}
// it would be nice to be able to do the setup N times but still
// measure only harvest(). We can't since harvest() consumes
// the top_collector.
top_collector.harvest()
});
}
#[bench]
fn bench_top_segment_collector_collect_and_harvest_no_tie(b: &mut Bencher) {
b.iter(|| {
let mut top_collector = TopSegmentCollector::new(0, 100);
let mut score = 1.0;
for i in 0..100 {
score += 1.0;
top_collector.collect(i, score);
}
// it would be nice to be able to do the setup N times but still
// measure only harvest(). We can't since harvest() consumes
// the top_collector.
top_collector.harvest()
});
}
}

View File

@@ -15,13 +15,16 @@ use crate::SegmentLocalId;
use crate::SegmentReader;
use std::fmt;
/// The Top Score Collector keeps track of the K documents
/// The `TopDocs` collector keeps track of the top `K` documents
/// sorted by their score.
///
/// The implementation is based on a `BinaryHeap`.
/// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`.
///
/// This collector guarantees a stable sorting in case of a tie on the
/// document score. As such, it is suitable to implement pagination.
///
/// ```rust
/// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser;
@@ -428,12 +431,13 @@ impl SegmentCollector for TopScoreSegmentCollector {
mod tests {
use super::TopDocs;
use crate::collector::Collector;
use crate::query::{Query, QueryParser};
use crate::query::{AllQuery, Query, QueryParser};
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
use crate::DocAddress;
use crate::Index;
use crate::IndexWriter;
use crate::Score;
use itertools::Itertools;
fn make_index() -> Index {
let mut schema_builder = Schema::builder();
@@ -494,6 +498,29 @@ mod tests {
);
}
#[test]
fn test_top_collector_stable_sorting() {
let index = make_index();
// using AllQuery to get a constant score
let searcher = index.reader().unwrap().searcher();
let page_1 = searcher.search(&AllQuery, &TopDocs::with_limit(2)).unwrap();
let page_2 = searcher.search(&AllQuery, &TopDocs::with_limit(3)).unwrap();
// precondition for the test to be meaningful: we did get documents
// with the same score
assert!(page_1.iter().map(|result| result.0).all_equal());
assert!(page_2.iter().map(|result| result.0).all_equal());
// sanity check since we're relying on make_index()
assert_eq!(page_1.len(), 2);
assert_eq!(page_2.len(), 3);
assert_eq!(page_1, &page_2[..page_1.len()]);
}
#[test]
#[should_panic]
fn test_top_0() {
@@ -551,7 +578,7 @@ mod tests {
));
});
let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_u64_field(Field(2));
let top_collector = TopDocs::with_limit(4).order_by_u64_field(Field::from_field_id(2));
let segment_reader = searcher.segment_reader(0u32);
top_collector
.for_segment(0, segment_reader)

View File

@@ -199,13 +199,13 @@ mod test {
let w = directory.open_write(path).unwrap();
let mut composite_write = CompositeWrite::wrap(w);
{
let mut write_0 = composite_write.for_field(Field(0u32));
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
VInt(32431123u64).serialize(&mut write_0).unwrap();
write_0.flush().unwrap();
}
{
let mut write_4 = composite_write.for_field(Field(4u32));
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
VInt(2).serialize(&mut write_4).unwrap();
write_4.flush().unwrap();
}
@@ -215,14 +215,18 @@ mod test {
let r = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&r).unwrap();
{
let file0 = composite_file.open_read(Field(0u32)).unwrap();
let file0 = composite_file
.open_read(Field::from_field_id(0u32))
.unwrap();
let mut file0_buf = file0.as_slice();
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
assert_eq!(file0_buf.len(), 0);
assert_eq!(payload_0, 32431123u64);
}
{
let file4 = composite_file.open_read(Field(4u32)).unwrap();
let file4 = composite_file
.open_read(Field::from_field_id(4u32))
.unwrap();
let mut file4_buf = file4.as_slice();
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
assert_eq!(file4_buf.len(), 0);

View File

@@ -76,7 +76,7 @@ impl SegmentId {
}
/// Error type used when parsing a `SegmentId` from a string fails.
pub struct SegmentIdParseError(uuid::parser::ParseError);
pub struct SegmentIdParseError(uuid::Error);
impl Error for SegmentIdParseError {}

View File

@@ -59,8 +59,7 @@ impl FastFieldReaders {
fast_bytes: Default::default(),
fast_fields_composite: fast_fields_composite.clone(),
};
for (field_id, field_entry) in schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
for (field, field_entry) in schema.fields() {
let field_type = field_entry.field_type();
if field_type == &FieldType::Bytes {
let idx_reader = fast_fields_composite

View File

@@ -24,8 +24,7 @@ impl FastFieldsWriter {
let mut multi_values_writers = Vec::new();
let mut bytes_value_writers = Vec::new();
for (field_id, field_entry) in schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
for (field, field_entry) in schema.fields() {
let default_value = match *field_entry.field_type() {
FieldType::I64(_) => common::i64_to_u64(0i64),
FieldType::F64(_) => common::f64_to_u64(0.0f64),

View File

@@ -22,11 +22,14 @@ impl FieldNormsWriter {
pub(crate) fn fields_with_fieldnorm(schema: &Schema) -> Vec<Field> {
schema
.fields()
.iter()
.enumerate()
.filter(|&(_, field_entry)| field_entry.is_indexed())
.map(|(field, _)| Field(field as u32))
.collect::<Vec<Field>>()
.filter_map(|(field, field_entry)| {
if field_entry.is_indexed() {
Some(field)
} else {
None
}
})
.collect::<Vec<_>>()
}
/// Initialize with state for tracking the field norm fields
@@ -35,7 +38,7 @@ impl FieldNormsWriter {
let fields = FieldNormsWriter::fields_with_fieldnorm(schema);
let max_field = fields
.iter()
.map(|field| field.0)
.map(Field::field_id)
.max()
.map(|max_field_id| max_field_id as usize + 1)
.unwrap_or(0);
@@ -50,8 +53,8 @@ impl FieldNormsWriter {
///
/// Will extend with 0-bytes for documents that have not been seen.
pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) {
for &field in self.fields.iter() {
self.fieldnorms_buffer[field.0 as usize].resize(max_doc as usize, 0u8);
for field in self.fields.iter() {
self.fieldnorms_buffer[field.field_id() as usize].resize(max_doc as usize, 0u8);
}
}
@@ -64,7 +67,7 @@ impl FieldNormsWriter {
/// * field - the field being set
/// * fieldnorm - the number of terms present in document `doc` in field `field`
pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) {
let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.0 as usize];
let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.field_id() as usize];
assert!(
fieldnorm_buffer.len() <= doc as usize,
"Cannot register a given fieldnorm twice"
@@ -77,7 +80,7 @@ impl FieldNormsWriter {
/// Serialize the seen fieldnorm values to the serializer for all fields.
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
for &field in self.fields.iter() {
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.0 as usize][..];
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
}
Ok(())

View File

@@ -258,7 +258,7 @@ mod tests {
let delete_queue = DeleteQueue::new();
let make_op = |i: usize| {
let field = Field(1u32);
let field = Field::from_field_id(1u32);
DeleteOperation {
opstamp: i as u64,
term: Term::from_field_u64(field, i as u64),

View File

@@ -732,7 +732,7 @@ impl IndexWriter {
}
UserOperation::Add(document) => {
let add_operation = AddOperation { opstamp, document };
adds.push(add_operation);
adds.push(add_operpation);
}
}
}

View File

@@ -7,6 +7,10 @@ use std::collections::HashSet;
pub struct MergeOperationInventory(Inventory<InnerMergeOperation>);
impl MergeOperationInventory {
pub fn num_merge_operations(&self) -> usize {
self.0.list().len()
}
pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
let mut segment_in_merge = HashSet::default();
for merge_op in self.0.list() {

View File

@@ -12,6 +12,10 @@ pub struct MergeCandidate(pub Vec<SegmentId>);
/// Every time a the list of segments changes, the segment updater
/// asks the merge policy if some segments should be merged.
pub trait MergePolicy: marker::Send + marker::Sync + Debug {
fn maximum_num_threads(&self) -> Option<usize> {
None
}
/// Given the list of segment metas, returns the list of merge candidates.
///
/// This call happens on the segment updater thread, and will block

View File

@@ -190,8 +190,7 @@ impl IndexMerger {
fast_field_serializer: &mut FastFieldSerializer,
mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>,
) -> Result<()> {
for (field_id, field_entry) in self.schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
for (field, field_entry) in self.schema.fields() {
let field_type = field_entry.field_type();
match *field_type {
FieldType::HierarchicalFacet => {
@@ -649,15 +648,12 @@ impl IndexMerger {
serializer: &mut InvertedIndexSerializer,
) -> Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new();
for (field_ord, field_entry) in self.schema.fields().iter().enumerate() {
for (field, field_entry) in self.schema.fields() {
if field_entry.is_indexed() {
let indexed_field = Field(field_ord as u32);
if let Some(term_ordinal_mapping) = self.write_postings_for_field(
indexed_field,
field_entry.field_type(),
serializer,
)? {
term_ordinal_mappings.insert(indexed_field, term_ordinal_mapping);
if let Some(term_ordinal_mapping) =
self.write_postings_for_field(field, field_entry.field_type(), serializer)?
{
term_ordinal_mappings.insert(field, term_ordinal_mapping);
}
}
}

View File

@@ -39,6 +39,7 @@ use std::sync::Arc;
use std::sync::RwLock;
use std::thread;
use std::thread::JoinHandle;
use std::time::Duration;
/// Save the index meta file.
/// This operation is atomic :
@@ -200,6 +201,12 @@ impl SegmentUpdater {
}
pub fn add_segment(&self, segment_entry: SegmentEntry) -> bool {
let max_num_threads_opt = self.0.merge_policy.read().unwrap().maximum_num_threads();
if let Some(max_num_threads) = max_num_threads_opt {
while self.0.merge_operations.num_merge_operations() >= max_num_threads_opt {
std::thread::sleep(Duration::from_secs(1u64));
}
}
self.run_async(|segment_updater| {
segment_updater.0.segment_manager.add_segment(segment_entry);
segment_updater.consider_merge_options();

View File

@@ -6,11 +6,11 @@ use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::SegmentSerializer;
use crate::postings::compute_table_size;
use crate::postings::MultiFieldPostingsWriter;
use crate::schema::FieldEntry;
use crate::schema::FieldType;
use crate::schema::Schema;
use crate::schema::Term;
use crate::schema::Value;
use crate::schema::{Field, FieldEntry};
use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::FacetTokenizer;
use crate::tokenizer::{TokenStream, Tokenizer};
@@ -70,12 +70,10 @@ impl SegmentWriter {
let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
let tokenizers =
schema
.fields()
.iter()
.map(FieldEntry::field_type)
.map(|field_type| match *field_type {
let tokenizers = schema
.fields()
.map(
|(_, field_entry): (Field, &FieldEntry)| match field_entry.field_type() {
FieldType::Str(ref text_options) => text_options
.get_indexing_options()
.and_then(|text_index_option| {
@@ -83,8 +81,9 @@ impl SegmentWriter {
segment.index().tokenizers().get(tokenizer_name)
}),
_ => None,
})
.collect();
},
)
.collect();
Ok(SegmentWriter {
max_doc: 0,
multifield_postings,
@@ -160,7 +159,7 @@ impl SegmentWriter {
}
FieldType::Str(_) => {
let num_tokens = if let Some(ref mut tokenizer) =
self.tokenizers[field.0 as usize]
self.tokenizers[field.field_id() as usize]
{
let texts: Vec<&str> = field_values
.iter()

View File

@@ -356,9 +356,9 @@ pub mod tests {
#[test]
fn test_skip_next() {
let term_0 = Term::from_field_u64(Field(0), 0);
let term_1 = Term::from_field_u64(Field(0), 1);
let term_2 = Term::from_field_u64(Field(0), 2);
let term_0 = Term::from_field_u64(Field::from_field_id(0), 0);
let term_1 = Term::from_field_u64(Field::from_field_id(0), 1);
let term_2 = Term::from_field_u64(Field::from_field_id(0), 2);
let num_docs = 300u32;
@@ -511,19 +511,19 @@ pub mod tests {
}
pub static TERM_A: Lazy<Term> = Lazy::new(|| {
let field = Field(0);
let field = Field::from_field_id(0);
Term::from_field_text(field, "a")
});
pub static TERM_B: Lazy<Term> = Lazy::new(|| {
let field = Field(0);
let field = Field::from_field_id(0);
Term::from_field_text(field, "b")
});
pub static TERM_C: Lazy<Term> = Lazy::new(|| {
let field = Field(0);
let field = Field::from_field_id(0);
Term::from_field_text(field, "c")
});
pub static TERM_D: Lazy<Term> = Lazy::new(|| {
let field = Field(0);
let field = Field::from_field_id(0);
Term::from_field_text(field, "d")
});

View File

@@ -61,12 +61,12 @@ fn make_field_partition(
.iter()
.map(|(key, _, _)| Term::wrap(key).field())
.enumerate();
let mut prev_field = Field(u32::max_value());
let mut prev_field_opt = None;
let mut fields = vec![];
let mut offsets = vec![];
for (offset, field) in term_offsets_it {
if field != prev_field {
prev_field = field;
if Some(field) != prev_field_opt {
prev_field_opt = Some(field);
fields.push(field);
offsets.push(offset);
}
@@ -86,8 +86,7 @@ impl MultiFieldPostingsWriter {
let term_index = TermHashMap::new(table_bits);
let per_field_postings_writers: Vec<_> = schema
.fields()
.iter()
.map(|field_entry| posting_from_field_entry(field_entry))
.map(|(_, field_entry)| posting_from_field_entry(field_entry))
.collect();
MultiFieldPostingsWriter {
heap: MemoryArena::new(),
@@ -107,7 +106,8 @@ impl MultiFieldPostingsWriter {
field: Field,
token_stream: &mut dyn TokenStream,
) -> u32 {
let postings_writer = self.per_field_postings_writers[field.0 as usize].deref_mut();
let postings_writer =
self.per_field_postings_writers[field.field_id() as usize].deref_mut();
postings_writer.index_text(
&mut self.term_index,
doc,
@@ -118,7 +118,8 @@ impl MultiFieldPostingsWriter {
}
pub fn subscribe(&mut self, doc: DocId, term: &Term) -> UnorderedTermId {
let postings_writer = self.per_field_postings_writers[term.field().0 as usize].deref_mut();
let postings_writer =
self.per_field_postings_writers[term.field().field_id() as usize].deref_mut();
postings_writer.subscribe(&mut self.term_index, doc, 0u32, term, &mut self.heap)
}
@@ -160,7 +161,7 @@ impl MultiFieldPostingsWriter {
FieldType::Bytes => {}
}
let postings_writer = &self.per_field_postings_writers[field.0 as usize];
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
let mut field_serializer =
serializer.new_field(field, postings_writer.total_num_tokens())?;
postings_writer.serialize(

View File

@@ -674,13 +674,19 @@ mod test {
test_parse_query_to_logical_ast_helper(
"signed:-2324",
&format!("{:?}", Term::from_field_i64(Field(2u32), -2324)),
&format!(
"{:?}",
Term::from_field_i64(Field::from_field_id(2u32), -2324)
),
false,
);
test_parse_query_to_logical_ast_helper(
"float:2.5",
&format!("{:?}", Term::from_field_f64(Field(10u32), 2.5)),
&format!(
"{:?}",
Term::from_field_f64(Field::from_field_id(10u32), 2.5)
),
false,
);
}

View File

@@ -118,7 +118,7 @@ mod tests {
#[test]
fn test_term_query_debug() {
let term_query = TermQuery::new(
Term::from_field_text(Field(1), "hello"),
Term::from_field_text(Field::from_field_id(1), "hello"),
IndexRecordOption::WithFreqs,
);
assert_eq!(

View File

@@ -6,7 +6,19 @@ use std::io::Write;
/// `Field` is represented by an unsigned 32-bit integer type
/// The schema holds the mapping between field names and `Field` objects.
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
pub struct Field(pub u32);
pub struct Field(u32);
impl Field {
/// Create a new field object for the given FieldId.
pub fn from_field_id(field_id: u32) -> Field {
Field(field_id)
}
/// Returns a u32 identifying uniquely a field within a schema.
pub fn field_id(&self) -> u32 {
self.0
}
}
impl BinarySerializable for Field {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {

View File

@@ -167,7 +167,7 @@ impl SchemaBuilder {
/// Adds a field entry to the schema in build.
fn add_field(&mut self, field_entry: FieldEntry) -> Field {
let field = Field(self.fields.len() as u32);
let field = Field::from_field_id(self.fields.len() as u32);
let field_name = field_entry.name().to_string();
self.fields.push(field_entry);
self.fields_map.insert(field_name, field);
@@ -223,7 +223,7 @@ pub struct Schema(Arc<InnerSchema>);
impl Schema {
/// Return the `FieldEntry` associated to a `Field`.
pub fn get_field_entry(&self, field: Field) -> &FieldEntry {
&self.0.fields[field.0 as usize]
&self.0.fields[field.field_id() as usize]
}
/// Return the field name for a given `Field`.
@@ -232,8 +232,12 @@ impl Schema {
}
/// Return the list of all the `Field`s.
pub fn fields(&self) -> &[FieldEntry] {
&self.0.fields
pub fn fields(&self) -> impl Iterator<Item = (Field, &FieldEntry)> {
self.0
.fields
.iter()
.enumerate()
.map(|(field_id, field_entry)| (Field::from_field_id(field_id as u32), field_entry))
}
/// Creates a new builder.
@@ -485,13 +489,32 @@ mod tests {
let schema: Schema = serde_json::from_str(expected).unwrap();
let mut fields = schema.fields().iter();
assert_eq!("title", fields.next().unwrap().name());
assert_eq!("author", fields.next().unwrap().name());
assert_eq!("count", fields.next().unwrap().name());
assert_eq!("popularity", fields.next().unwrap().name());
assert_eq!("score", fields.next().unwrap().name());
let mut fields = schema.fields();
{
let (field, field_entry) = fields.next().unwrap();
assert_eq!("title", field_entry.name());
assert_eq!(0, field.field_id());
}
{
let (field, field_entry) = fields.next().unwrap();
assert_eq!("author", field_entry.name());
assert_eq!(1, field.field_id());
}
{
let (field, field_entry) = fields.next().unwrap();
assert_eq!("count", field_entry.name());
assert_eq!(2, field.field_id());
}
{
let (field, field_entry) = fields.next().unwrap();
assert_eq!("popularity", field_entry.name());
assert_eq!(3, field.field_id());
}
{
let (field, field_entry) = fields.next().unwrap();
assert_eq!("score", field_entry.name());
assert_eq!(4, field.field_id());
}
assert!(fields.next().is_none());
}

View File

@@ -105,7 +105,7 @@ impl Term {
if self.0.len() < 4 {
self.0.resize(4, 0u8);
}
BigEndian::write_u32(&mut self.0[0..4], field.0);
BigEndian::write_u32(&mut self.0[0..4], field.field_id());
}
/// Sets a u64 value in the term.
@@ -157,7 +157,7 @@ where
/// Returns the field.
pub fn field(&self) -> Field {
Field(BigEndian::read_u32(&self.0.as_ref()[..4]))
Field::from_field_id(BigEndian::read_u32(&self.0.as_ref()[..4]))
}
/// Returns the `u64` value stored in a term.
@@ -227,7 +227,7 @@ impl fmt::Debug for Term {
write!(
f,
"Term(field={},bytes={:?})",
self.field().0,
self.field().field_id(),
self.value_bytes()
)
}

View File

@@ -1,5 +1,4 @@
use fail;
use std::io::Write;
use std::path::Path;
use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory, TerminatingWrite};
use tantivy::doc;