Compare commits

..

2 Commits

Author SHA1 Message Date
Paul Masurel
063ed30f66 Added field norm readers 2020-07-20 11:59:43 +09:00
Paul Masurel
6db8bb49d6 Assert nearly equals macro (#853)
* Assert nearly equals macro

* Renamed specialized_scorer in TermScorer
2020-07-17 16:40:41 +09:00
29 changed files with 195 additions and 989 deletions

View File

@@ -58,7 +58,6 @@ winapi = "0.3"
rand = "0.7"
maplit = "1"
matches = "0.1.8"
proptest = "0.10"
[dev-dependencies.fail]
version = "0.4"

View File

@@ -519,7 +519,7 @@ impl Collector for TopDocs {
})?;
}
let fruit: Vec<(Score, DocAddress)> = heap
let fruit = heap
.into_sorted_vec()
.into_iter()
.map(|cid| (cid.feature, DocAddress(segment_ord, cid.doc)))

View File

@@ -89,19 +89,6 @@ impl FixedSize for u64 {
const SIZE_IN_BYTES: usize = 8;
}
impl BinarySerializable for f32 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_f32::<Endianness>(*self)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
reader.read_f32::<Endianness>()
}
}
impl FixedSize for f32 {
const SIZE_IN_BYTES: usize = 4;
}
impl BinarySerializable for i64 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_i64::<Endianness>(*self)

View File

@@ -5,7 +5,7 @@ use std::io::Read;
use std::io::Write;
/// Wrapper over a `u64` that serializes as a variable int.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[derive(Debug, Eq, PartialEq)]
pub struct VInt(pub u64);
const STOP_BIT: u8 = 128;

View File

@@ -126,8 +126,8 @@ impl SegmentReader {
/// They are simply stored as a fast field, serialized in
/// the `.fieldnorm` file of the segment.
pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
if let Some(fieldnorm_source) = self.fieldnorm_readers.get_field(field) {
fieldnorm_source
if let Some(fieldnorm_reader) = self.fieldnorm_readers.get_field(field) {
fieldnorm_reader
} else {
let field_name = self.schema.get_field_name(field);
let err_msg = format!(

View File

@@ -94,24 +94,12 @@ impl Footer {
match &self.versioned_footer {
VersionedFooter::V1 {
crc32: _crc,
store_compression,
store_compression: compression,
} => {
if &library_version.store_compression != store_compression {
if &library_version.store_compression != compression {
return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(),
index_compression_format: store_compression.to_string(),
});
}
Ok(())
}
VersionedFooter::V2 {
crc32: _crc,
store_compression,
} => {
if &library_version.store_compression != store_compression {
return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(),
index_compression_format: store_compression.to_string(),
index_compression_format: compression.to_string(),
});
}
Ok(())
@@ -132,29 +120,24 @@ pub enum VersionedFooter {
crc32: CrcHashU32,
store_compression: String,
},
// Introduction of the Block WAND information.
V2 {
crc32: CrcHashU32,
store_compression: String,
},
}
impl BinarySerializable for VersionedFooter {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
let mut buf = Vec::new();
match self {
VersionedFooter::V2 {
VersionedFooter::V1 {
crc32,
store_compression: compression,
} => {
// Serializes a valid `VersionedFooter` or panics if the version is unknown
// [ version | crc_hash | compression_mode ]
// [ 0..4 | 4..8 | variable ]
BinarySerializable::serialize(&2u32, &mut buf)?;
BinarySerializable::serialize(&1u32, &mut buf)?;
BinarySerializable::serialize(crc32, &mut buf)?;
BinarySerializable::serialize(compression, &mut buf)?;
}
VersionedFooter::V1 { .. } | VersionedFooter::UnknownVersion => {
VersionedFooter::UnknownVersion => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Cannot serialize an unknown versioned footer ",
@@ -185,17 +168,10 @@ impl BinarySerializable for VersionedFooter {
let version = u32::deserialize(&mut cursor)?;
if version == 1 {
let crc32 = u32::deserialize(&mut cursor)?;
let store_compression = String::deserialize(&mut cursor)?;
let compression = String::deserialize(&mut cursor)?;
Ok(VersionedFooter::V1 {
crc32,
store_compression,
})
} else if version == 2 {
let crc32 = u32::deserialize(&mut cursor)?;
let store_compression = String::deserialize(&mut cursor)?;
Ok(VersionedFooter::V2 {
crc32,
store_compression,
store_compression: compression,
})
} else {
Ok(VersionedFooter::UnknownVersion)
@@ -206,7 +182,6 @@ impl BinarySerializable for VersionedFooter {
impl VersionedFooter {
pub fn crc(&self) -> Option<CrcHashU32> {
match self {
VersionedFooter::V2 { crc32, .. } => Some(*crc32),
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
VersionedFooter::UnknownVersion { .. } => None,
}
@@ -244,7 +219,7 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc32 = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V2 {
let footer = Footer::new(VersionedFooter::V1 {
crc32,
store_compression: crate::store::COMPRESSION.to_string(),
});
@@ -273,11 +248,15 @@ mod tests {
assert!(footer_proxy.terminate().is_ok());
assert_eq!(vec.len(), 167);
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
assert!(matches!(
footer.versioned_footer,
VersionedFooter::V2 { store_compression, .. }
if store_compression == crate::store::COMPRESSION
));
if let VersionedFooter::V1 {
crc32: _,
store_compression,
} = footer.versioned_footer
{
assert_eq!(store_compression, crate::store::COMPRESSION);
} else {
panic!("Versioned footer should be V1.");
}
assert_eq!(&footer.version, crate::version());
}
@@ -285,7 +264,7 @@ mod tests {
fn test_serialize_deserialize_footer() {
let mut buffer = Vec::new();
let crc32 = 123456u32;
let footer: Footer = Footer::new(VersionedFooter::V2 {
let footer: Footer = Footer::new(VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
});
@@ -297,7 +276,7 @@ mod tests {
#[test]
fn footer_length() {
let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V2 {
let versioned_footer = VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
};
@@ -318,7 +297,7 @@ mod tests {
// versionned footer length
12 | 128,
// index format version
2,
1,
0,
0,
0,
@@ -337,7 +316,7 @@ mod tests {
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
assert!(cursor.is_empty());
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
let expected_versioned_footer: VersionedFooter = VersionedFooter::V2 {
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
crc32: expected_crc,
store_compression: "lz4".to_string(),
};

View File

@@ -8,9 +8,9 @@ use crate::fastfield::DeleteBitSet;
use crate::fastfield::FastFieldReader;
use crate::fastfield::FastFieldSerializer;
use crate::fastfield::MultiValueIntFastFieldReader;
use crate::fieldnorm::FieldNormReader;
use crate::fieldnorm::FieldNormsSerializer;
use crate::fieldnorm::FieldNormsWriter;
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::indexer::SegmentSerializer;
use crate::postings::InvertedIndexSerializer;
use crate::postings::Postings;
@@ -20,7 +20,7 @@ use crate::schema::{Field, Schema};
use crate::store::StoreWriter;
use crate::termdict::TermMerger;
use crate::termdict::TermOrdinal;
use crate::{DocId, SegmentComponent};
use crate::DocId;
use std::cmp;
use std::collections::HashMap;
@@ -493,7 +493,6 @@ impl IndexMerger {
indexed_field: Field,
field_type: &FieldType,
serializer: &mut InvertedIndexSerializer,
fieldnorm_reader: Option<FieldNormReader>,
) -> crate::Result<Option<TermOrdinalMapping>> {
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
let mut delta_computer = DeltaComputer::new();
@@ -552,8 +551,7 @@ impl IndexMerger {
// - Segment 2's doc ids become [seg0.max_doc + seg1.max_doc,
// seg0.max_doc + seg1.max_doc + seg2.max_doc]
// ...
let mut field_serializer =
serializer.new_field(indexed_field, total_num_tokens, fieldnorm_reader)?;
let mut field_serializer = serializer.new_field(indexed_field, total_num_tokens)?;
let field_entry = self.schema.get_field_entry(indexed_field);
@@ -599,11 +597,7 @@ impl IndexMerger {
// We know that there is at least one document containing
// the term, so we add it.
let term_doc_freq = segment_postings
.iter()
.map(|(_, segment_posting)| segment_posting.doc_freq())
.sum();
let to_term_ord = field_serializer.new_term(term_bytes, term_doc_freq)?;
let to_term_ord = field_serializer.new_term(term_bytes)?;
if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt {
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
@@ -643,18 +637,13 @@ impl IndexMerger {
fn write_postings(
&self,
serializer: &mut InvertedIndexSerializer,
fieldnorm_readers: FieldNormReaders,
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() {
let fieldnorm_reader = fieldnorm_readers.get_field(field);
if field_entry.is_indexed() {
if let Some(term_ordinal_mapping) = self.write_postings_for_field(
field,
field_entry.field_type(),
serializer,
fieldnorm_reader,
)? {
if let Some(term_ordinal_mapping) =
self.write_postings_for_field(field, field_entry.field_type(), serializer)?
{
term_ordinal_mappings.insert(field, term_ordinal_mapping);
}
}
@@ -683,12 +672,7 @@ impl SerializableSegment for IndexMerger {
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
self.write_fieldnorms(fieldnorms_serializer)?;
}
let fieldnorm_data = serializer
.segment()
.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::new(fieldnorm_data)?;
let term_ord_mappings =
self.write_postings(serializer.get_postings_serializer(), fieldnorm_readers)?;
let term_ord_mappings = self.write_postings(serializer.get_postings_serializer())?;
self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?;
self.write_storable_fields(serializer.get_store_writer())?;
serializer.close()?;
@@ -698,15 +682,15 @@ impl SerializableSegment for IndexMerger {
#[cfg(test)]
mod tests {
use crate::assert_nearly_equals;
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use crate::collector::{Count, FacetCollector};
use crate::core::Index;
use crate::query::AllQuery;
use crate::query::BooleanQuery;
use crate::query::Scorer;
use crate::query::TermQuery;
use crate::schema;
use crate::schema::Cardinality;
use crate::schema::Document;
use crate::schema::Facet;
use crate::schema::IndexRecordOption;
@@ -714,11 +698,9 @@ mod tests {
use crate::schema::Term;
use crate::schema::TextFieldIndexing;
use crate::schema::INDEXED;
use crate::schema::{Cardinality, TEXT};
use crate::DocAddress;
use crate::IndexWriter;
use crate::Searcher;
use crate::{schema, DocSet, SegmentId};
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use futures::executor::block_on;
use std::io::Cursor;
@@ -1541,71 +1523,4 @@ mod tests {
assert_eq!(1, index.searchable_segments()?.len());
Ok(())
}
#[test]
fn test_merged_index_has_blockwand() -> crate::Result<()> {
let mut builder = schema::SchemaBuilder::new();
let text = builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
let happy_term = Term::from_field_text(text, "happy");
let term_query = TermQuery::new(happy_term, IndexRecordOption::WithFreqs);
for _ in 0..62 {
writer.add_document(doc!(text=>"hello happy tax payer"));
}
writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(searcher.segment_reader(0u32), 1.0f32)?;
assert_eq!(term_scorer.doc(), 0);
assert_nearly_equals!(term_scorer.block_max_score(), 0.0079681855);
assert_nearly_equals!(term_scorer.score(), 0.0079681855);
for _ in 0..81 {
writer.add_document(doc!(text=>"hello happy tax payer"));
}
writer.commit()?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2);
for segment_reader in searcher.segment_readers() {
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(segment_reader, 1.0f32)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
}
let segment_ids: Vec<SegmentId> = searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect();
block_on(writer.merge(&segment_ids[..]))?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0u32);
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(segment_reader, 1.0f32)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
Ok(())
}
}

View File

@@ -36,6 +36,7 @@ impl SegmentSerializer {
})
}
#[allow(dead_code)]
pub fn segment(&self) -> &Segment {
&self.segment
}

View File

@@ -130,15 +130,14 @@ fn merge(
// An IndexMerger is like a "view" of our merged segments.
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
let merged_segment_id = merged_segment.id();
// ... we just serialize this index merger in our new segment to merge the two segments.
let segment_serializer = SegmentSerializer::for_segment(merged_segment)?;
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone())?;
let num_docs = merger.write(segment_serializer)?;
let segment_meta = index.new_segment_meta(merged_segment_id, num_docs);
let merged_segment_id = merged_segment.id();
let segment_meta = index.new_segment_meta(merged_segment_id, num_docs);
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
}

View File

@@ -2,7 +2,7 @@ use super::operation::AddOperation;
use crate::core::Segment;
use crate::core::SerializableSegment;
use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::SegmentSerializer;
use crate::postings::compute_table_size;
use crate::postings::MultiFieldPostingsWriter;
@@ -14,8 +14,8 @@ use crate::schema::{Field, FieldEntry};
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
use crate::tokenizer::{TokenStreamChain, Tokenizer};
use crate::DocId;
use crate::Opstamp;
use crate::{DocId, SegmentComponent};
use std::io;
use std::str;
@@ -284,12 +284,7 @@ fn write(
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
fieldnorms_writer.serialize(fieldnorms_serializer)?;
}
let fieldnorm_data = serializer
.segment()
.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::new(fieldnorm_data)?;
let term_ord_map =
multifield_postings.serialize(serializer.get_postings_serializer(), fieldnorm_readers)?;
let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?;
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
serializer.close()?;
Ok(())

View File

@@ -173,7 +173,7 @@ use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
/// Index format version.
const INDEX_FORMAT_VERSION: u32 = 2;
const INDEX_FORMAT_VERSION: u32 = 1;
/// Structure version for the index.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]

View File

@@ -1,21 +1,11 @@
use crate::common::{BinarySerializable, VInt};
use crate::directory::ReadOnlySource;
use crate::fieldnorm::FieldNormReader;
use crate::postings::compression::{
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
};
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
use crate::query::BM25Weight;
use crate::schema::IndexRecordOption;
use crate::{DocId, Score, TERMINATED};
fn max_f32<I: Iterator<Item = f32>>(mut it: I) -> Option<f32> {
if let Some(first) = it.next() {
Some(it.fold(first, f32::max))
} else {
None
}
}
use crate::{DocId, TERMINATED};
/// `BlockSegmentPostings` is a cursor iterating over blocks
/// of documents.
@@ -29,12 +19,11 @@ pub struct BlockSegmentPostings {
loaded_offset: usize,
freq_decoder: BlockDecoder,
freq_reading_option: FreqReadingOption,
block_max_score_cache: Option<Score>,
doc_freq: u32,
doc_freq: usize,
data: ReadOnlySource,
pub(crate) skip_reader: SkipReader,
skip_reader: SkipReader,
}
fn decode_bitpacked_block(
@@ -58,7 +47,6 @@ fn decode_vint_block(
doc_offset: DocId,
num_vint_docs: usize,
) {
doc_decoder.fill(TERMINATED);
let num_consumed_bytes = doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs);
if let Some(freq_decoder) = freq_decoder_opt {
freq_decoder.uncompress_vint_unsorted(&data[num_consumed_bytes..], num_vint_docs);
@@ -100,12 +88,12 @@ impl BlockSegmentPostings {
None => SkipReader::new(ReadOnlySource::empty(), doc_freq, record_option),
};
let doc_freq = doc_freq as usize;
let mut block_segment_postings = BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED),
loaded_offset: std::usize::MAX,
freq_decoder: BlockDecoder::with_val(1),
freq_reading_option,
block_max_score_cache: None,
doc_freq,
data: postings_data,
skip_reader,
@@ -114,43 +102,6 @@ impl BlockSegmentPostings {
block_segment_postings
}
/// Returns the block_max_score for the current block.
/// It does not require the block to be loaded. For instance, it is ok to call this method
/// after having called `.shallow_advance(..)`.
///
/// See `TermScorer::block_max_score(..)` for more information.
pub fn block_max_score(
&mut self,
fieldnorm_reader: &FieldNormReader,
bm25_weight: &BM25Weight,
) -> Score {
let (block_max_score_cache, skip_reader, doc_decoder, freq_decoder) = (
&mut self.block_max_score_cache,
&self.skip_reader,
&self.doc_decoder,
&self.freq_decoder,
);
*block_max_score_cache.get_or_insert_with(|| {
skip_reader
.block_max_score(bm25_weight)
.or_else(|| {
let docs = doc_decoder.output_array();
let freqs = freq_decoder.output_array();
max_f32(docs.iter().cloned().zip(freqs.iter().cloned()).map(
|(doc, term_freq)| {
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
bm25_weight.score(fieldnorm_id, term_freq)
},
))
})
.unwrap_or(0f32)
})
}
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
self.freq_reading_option
}
// Resets the block segment postings on another position
// in the postings file.
//
@@ -170,16 +121,15 @@ impl BlockSegmentPostings {
} else {
self.skip_reader.reset(ReadOnlySource::empty(), doc_freq);
}
self.doc_freq = doc_freq;
self.doc_freq = doc_freq as usize;
self.load_block();
}
/// Returns the overall number of documents in the block postings.
/// It does not take in account whether documents are deleted or not.
/// Returns the document frequency associated to this block postings.
///
/// This `doc_freq` is simply the sum of the length of all of the blocks
/// length, and it does not take in account deleted documents.
pub fn doc_freq(&self) -> u32 {
pub fn doc_freq(&self) -> usize {
self.doc_freq
}
@@ -189,20 +139,11 @@ impl BlockSegmentPostings {
/// returned by `.docs()` is empty.
#[inline]
pub fn docs(&self) -> &[DocId] {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_array()
}
/// Returns a full block, regardless of whetehr the block is complete or incomplete (
/// as it happens for the last block of the posting list).
///
/// In the latter case, the block is guaranteed to be padded with the sentinel value:
/// `TERMINATED`. The array is also guaranteed to be aligned on 16 bytes = 128 bits.
///
/// This method is useful to run SSE2 linear search.
#[inline(always)]
pub(crate) fn docs_aligned(&self) -> &AlignedBuffer {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_aligned()
}
@@ -215,14 +156,12 @@ impl BlockSegmentPostings {
/// Return the array of `term freq` in the block.
#[inline]
pub fn freqs(&self) -> &[u32] {
debug_assert!(self.block_is_loaded());
self.freq_decoder.output_array()
}
/// Return the frequency at index `idx` of the block.
#[inline]
pub fn freq(&self, idx: usize) -> u32 {
debug_assert!(self.block_is_loaded());
self.freq_decoder.output(idx)
}
@@ -233,10 +172,13 @@ impl BlockSegmentPostings {
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
#[inline]
pub fn block_len(&self) -> usize {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_len
}
pub(crate) fn position_offset(&self) -> u64 {
self.skip_reader.position_offset()
}
/// Position on a block that may contains `target_doc`.
///
/// If all docs are smaller than target, the block loaded may be empty,
@@ -246,26 +188,7 @@ impl BlockSegmentPostings {
self.load_block();
}
pub(crate) fn position_offset(&self) -> u64 {
self.skip_reader.position_offset()
}
/// Dangerous API! This calls seek on the skip list,
/// but does not `.load_block()` afterwards.
///
/// `.load_block()` needs to be called manually afterwards.
/// If all docs are smaller than target, the block loaded may be empty,
/// or be the last an incomplete VInt block.
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
self.skip_reader.seek(target_doc);
}
pub(crate) fn block_is_loaded(&self) -> bool {
self.loaded_offset == self.skip_reader.byte_offset()
}
pub(crate) fn load_block(&mut self) {
self.block_max_score_cache = None;
fn load_block(&mut self) {
let offset = self.skip_reader.byte_offset();
if self.loaded_offset == offset {
return;
@@ -290,14 +213,11 @@ impl BlockSegmentPostings {
tf_num_bits,
);
}
BlockInfo::VInt { num_docs } => {
let data = {
if num_docs == 0 {
&[]
} else {
&self.data.as_slice()[offset..]
}
};
BlockInfo::VInt(num_vint_docs) => {
self.doc_decoder.clear();
if num_vint_docs == 0 {
return;
}
decode_vint_block(
&mut self.doc_decoder,
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
@@ -305,9 +225,9 @@ impl BlockSegmentPostings {
} else {
None
},
data,
&self.data.as_slice()[offset..],
self.skip_reader.last_doc_in_previous_block,
num_docs as usize,
num_vint_docs as usize,
);
}
}
@@ -325,10 +245,9 @@ impl BlockSegmentPostings {
pub fn empty() -> BlockSegmentPostings {
BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED),
loaded_offset: 0,
loaded_offset: std::usize::MAX,
freq_decoder: BlockDecoder::with_val(1),
freq_reading_option: FreqReadingOption::NoFreq,
block_max_score_cache: None,
doc_freq: 0,
data: ReadOnlySource::new(vec![]),
skip_reader: SkipReader::new(ReadOnlySource::new(vec![]), 0, IndexRecordOption::Basic),
@@ -354,10 +273,8 @@ mod tests {
#[test]
fn test_empty_segment_postings() {
let mut postings = SegmentPostings::empty();
assert_eq!(postings.doc(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.doc_freq(), 0);
assert_eq!(postings.len(), 0);
}
@@ -377,8 +294,6 @@ mod tests {
#[test]
fn test_empty_block_segment_postings() {
let mut postings = BlockSegmentPostings::empty();
assert!(postings.docs().is_empty());
assert_eq!(postings.doc_freq(), 0);
postings.advance();
assert!(postings.docs().is_empty());
assert_eq!(postings.doc_freq(), 0);
@@ -460,21 +375,19 @@ mod tests {
}
#[test]
fn test_block_segment_postings_seek2() {
fn test_block_segment_postings_skip2() {
let mut docs = vec![0];
for i in 0..1300 {
docs.push((i * i / 100) + i);
}
let mut block_postings = build_block_postings(&docs[..]);
for i in vec![0, 424, 10000] {
block_postings.shallow_seek(i);
block_postings.load_block();
block_postings.seek(i);
let docs = block_postings.docs();
assert!(docs[0] <= i);
assert!(docs.last().cloned().unwrap_or(0u32) >= i);
}
block_postings.shallow_seek(100_000);
block_postings.load_block();
block_postings.seek(100_000);
assert_eq!(block_postings.doc(COMPRESSION_BLOCK_SIZE - 1), TERMINATED);
}

View File

@@ -108,8 +108,9 @@ impl BlockDecoder {
self.output.0[idx]
}
pub fn fill(&mut self, val: u32) {
self.output.0.iter_mut().for_each(|el| *el = val);
pub fn clear(&mut self) {
self.output_len = 0;
self.output.0.iter_mut().for_each(|el| *el = TERMINATED);
}
}
@@ -244,6 +245,19 @@ pub mod tests {
}
}
#[test]
fn test_clearing() {
let mut encoder = BlockEncoder::new();
let vals = (0u32..128u32).map(|i| i * 3).collect::<Vec<_>>();
let (num_bits, compressed) = encoder.compress_block_sorted(&vals[..], 0u32);
let mut decoder = BlockDecoder::default();
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
assert_eq!(decoder.output_len, 128);
assert_eq!(decoder.output_array(), &vals[..]);
decoder.clear();
assert!(decoder.output_array().is_empty());
}
#[test]
fn test_encode_unsorted_block_with_junk() {
let mut compressed: Vec<u8> = Vec::new();

View File

@@ -73,10 +73,8 @@ pub mod tests {
let mut segment = index.new_segment();
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
{
let mut field_serializer = posting_serializer
.new_field(text_field, 120 * 4, None)
.unwrap();
field_serializer.new_term("abc".as_bytes(), 12u32).unwrap();
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
field_serializer.new_term("abc".as_bytes()).unwrap();
for doc_id in 0u32..120u32 {
let delta_positions = vec![1, 2, 3, 2];
field_serializer

View File

@@ -1,6 +1,5 @@
use super::stacker::{Addr, MemoryArena, TermHashMap};
use crate::fieldnorm::FieldNormReaders;
use crate::postings::recorder::{
BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder,
};
@@ -129,7 +128,6 @@ impl MultiFieldPostingsWriter {
pub fn serialize(
&self,
serializer: &mut InvertedIndexSerializer,
fieldnorm_readers: FieldNormReaders,
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
self.term_index.iter().collect();
@@ -163,12 +161,8 @@ impl MultiFieldPostingsWriter {
}
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
let fieldnorm_reader = fieldnorm_readers.get_field(field);
let mut field_serializer = serializer.new_field(
field,
postings_writer.total_num_tokens(),
fieldnorm_reader,
)?;
let mut field_serializer =
serializer.new_field(field, postings_writer.total_num_tokens())?;
postings_writer.serialize(
&term_offsets[start..stop],
&mut field_serializer,
@@ -303,8 +297,7 @@ impl<Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<Rec>
let mut buffer_lender = BufferLender::default();
for &(term_bytes, addr, _) in term_addrs {
let recorder: Rec = termdict_heap.read(addr);
let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32);
serializer.new_term(&term_bytes[4..], term_doc_freq)?;
serializer.new_term(&term_bytes[4..])?;
recorder.serialize(&mut buffer_lender, serializer, heap)?;
serializer.close_term()?;
}

View File

@@ -75,8 +75,6 @@ pub(crate) trait Recorder: Copy + 'static {
serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena,
) -> io::Result<()>;
/// Returns the number of document containg this term.
fn term_doc_freq(&self) -> Option<u32>;
}
/// Only records the doc ids
@@ -115,16 +113,11 @@ impl Recorder for NothingRecorder {
) -> io::Result<()> {
let buffer = buffer_lender.lend_u8();
self.stack.read_to_end(heap, buffer);
// TODO avoid reading twice.
for doc in VInt32Reader::new(&buffer[..]) {
serializer.write_doc(doc as u32, 0u32, &[][..])?;
}
Ok(())
}
fn term_doc_freq(&self) -> Option<u32> {
None
}
}
/// Recorder encoding document ids, and term frequencies
@@ -133,7 +126,6 @@ pub struct TermFrequencyRecorder {
stack: ExpUnrolledLinkedList,
current_doc: DocId,
current_tf: u32,
term_doc_freq: u32,
}
impl Recorder for TermFrequencyRecorder {
@@ -142,7 +134,6 @@ impl Recorder for TermFrequencyRecorder {
stack: ExpUnrolledLinkedList::new(),
current_doc: u32::max_value(),
current_tf: 0u32,
term_doc_freq: 0u32,
}
}
@@ -151,7 +142,6 @@ impl Recorder for TermFrequencyRecorder {
}
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
self.term_doc_freq += 1;
self.current_doc = doc;
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
}
@@ -182,10 +172,6 @@ impl Recorder for TermFrequencyRecorder {
Ok(())
}
fn term_doc_freq(&self) -> Option<u32> {
Some(self.term_doc_freq)
}
}
/// Recorder encoding term frequencies as well as positions.
@@ -193,14 +179,12 @@ impl Recorder for TermFrequencyRecorder {
pub struct TFAndPositionRecorder {
stack: ExpUnrolledLinkedList,
current_doc: DocId,
term_doc_freq: u32,
}
impl Recorder for TFAndPositionRecorder {
fn new() -> Self {
TFAndPositionRecorder {
stack: ExpUnrolledLinkedList::new(),
current_doc: u32::max_value(),
term_doc_freq: 0u32,
}
}
@@ -210,7 +194,6 @@ impl Recorder for TFAndPositionRecorder {
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
self.current_doc = doc;
self.term_doc_freq += 1u32;
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
}
@@ -250,10 +233,6 @@ impl Recorder for TFAndPositionRecorder {
}
Ok(())
}
fn term_doc_freq(&self) -> Option<u32> {
Some(self.term_doc_freq)
}
}
#[cfg(test)]

View File

@@ -13,7 +13,6 @@ use crate::schema::IndexRecordOption;
use crate::DocId;
use crate::directory::ReadOnlySource;
use crate::fieldnorm::FieldNormReader;
use crate::postings::BlockSegmentPostings;
/// `SegmentPostings` represents the inverted list or postings associated to
@@ -22,7 +21,7 @@ use crate::postings::BlockSegmentPostings;
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
/// Positions on the other hand, are optionally entirely decoded upfront.
pub struct SegmentPostings {
pub(crate) block_cursor: BlockSegmentPostings,
block_cursor: BlockSegmentPostings,
cur: usize,
position_reader: Option<PositionReader>,
block_searcher: BlockSearcher,
@@ -39,12 +38,6 @@ impl SegmentPostings {
}
}
/// Returns the overall number of documents in the block postings.
/// It does not take in account whether documents are deleted or not.
pub fn doc_freq(&self) -> u32 {
self.block_cursor.doc_freq()
}
/// Creates a segment postings object with the given documents
/// and no frequency encoded.
///
@@ -56,8 +49,7 @@ impl SegmentPostings {
pub fn create_from_docs(docs: &[u32]) -> SegmentPostings {
let mut buffer = Vec::new();
{
let mut postings_serializer = PostingsSerializer::new(&mut buffer, false, false, None);
postings_serializer.new_term(docs.len() as u32);
let mut postings_serializer = PostingsSerializer::new(&mut buffer, false, false);
for &doc in docs {
postings_serializer.write_doc(doc, 1u32);
}
@@ -74,29 +66,6 @@ impl SegmentPostings {
SegmentPostings::from_block_postings(block_segment_postings, None)
}
/// Helper functions to create `SegmentPostings` for tests.
pub fn create_from_docs_and_tfs(
doc_and_tfs: &[(u32, u32)],
fieldnorm_reader: Option<FieldNormReader>,
) -> crate::Result<SegmentPostings> {
let mut buffer = Vec::new();
let mut postings_serializer =
PostingsSerializer::new(&mut buffer, true, false, fieldnorm_reader);
postings_serializer.new_term(doc_and_tfs.len() as u32);
for &(doc, tf) in doc_and_tfs {
postings_serializer.write_doc(doc, tf);
}
postings_serializer
.close_term(doc_and_tfs.len() as u32)?;
let block_segment_postings = BlockSegmentPostings::from_data(
doc_and_tfs.len() as u32,
ReadOnlySource::from(buffer),
IndexRecordOption::WithFreqs,
IndexRecordOption::WithFreqs,
);
Ok(SegmentPostings::from_block_postings(block_segment_postings, None))
}
/// Reads a Segment postings from an &[u8]
///
/// * `len` - number of document in the posting lists.
@@ -121,7 +90,6 @@ impl DocSet for SegmentPostings {
// next needs to be called a first time to point to the correct element.
#[inline]
fn advance(&mut self) -> DocId {
assert!(self.block_cursor.block_is_loaded());
if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
self.cur = 0;
self.block_cursor.advance();
@@ -173,7 +141,7 @@ impl DocSet for SegmentPostings {
impl HasLen for SegmentPostings {
fn len(&self) -> usize {
self.block_cursor.doc_freq() as usize
self.block_cursor.doc_freq()
}
}

View File

@@ -3,16 +3,13 @@ use crate::common::{BinarySerializable, VInt};
use crate::common::{CompositeWrite, CountingWriter};
use crate::core::Segment;
use crate::directory::WritePtr;
use crate::fieldnorm::FieldNormReader;
use crate::positions::PositionSerializer;
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
use crate::postings::skip::SkipSerializer;
use crate::query::BM25Weight;
use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::DocId;
use std::cmp::Ordering;
use std::io::{self, Write};
/// `InvertedIndexSerializer` is in charge of serializing
@@ -92,7 +89,6 @@ impl InvertedIndexSerializer {
&mut self,
field: Field,
total_num_tokens: u64,
fieldnorm_reader: Option<FieldNormReader>,
) -> io::Result<FieldSerializer<'_>> {
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
let term_dictionary_write = self.terms_write.for_field(field);
@@ -107,7 +103,6 @@ impl InvertedIndexSerializer {
postings_write,
positions_write,
positionsidx_write,
fieldnorm_reader,
)
}
@@ -139,7 +134,6 @@ impl<'a> FieldSerializer<'a> {
postings_write: &'a mut CountingWriter<WritePtr>,
positions_write: &'a mut CountingWriter<WritePtr>,
positionsidx_write: &'a mut CountingWriter<WritePtr>,
fieldnorm_reader: Option<FieldNormReader>,
) -> io::Result<FieldSerializer<'a>> {
let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
FieldType::Str(ref text_options) => {
@@ -153,12 +147,8 @@ impl<'a> FieldSerializer<'a> {
_ => (false, false),
};
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
let postings_serializer = PostingsSerializer::new(
postings_write,
term_freq_enabled,
position_enabled,
fieldnorm_reader,
);
let postings_serializer =
PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
let positions_serializer_opt = if position_enabled {
Some(PositionSerializer::new(positions_write, positionsidx_write))
} else {
@@ -191,8 +181,8 @@ impl<'a> FieldSerializer<'a> {
/// Starts the postings for a new term.
/// * term - the term. It needs to come after the previous term according
/// to the lexicographical order.
/// * term_doc_freq - return the number of document containing the term.
pub fn new_term(&mut self, term: &[u8], term_doc_freq: u32) -> io::Result<TermOrdinal> {
/// * doc_freq - return the number of document containing the term.
pub fn new_term(&mut self, term: &[u8]) -> io::Result<TermOrdinal> {
assert!(
!self.term_open,
"Called new_term, while the previous term was not closed."
@@ -203,7 +193,6 @@ impl<'a> FieldSerializer<'a> {
self.term_dictionary_builder.insert_key(term)?;
let term_ordinal = self.num_terms;
self.num_terms += 1;
self.postings_serializer.new_term(term_doc_freq);
Ok(term_ordinal)
}
@@ -317,21 +306,6 @@ pub struct PostingsSerializer<W: Write> {
termfreq_enabled: bool,
termfreq_sum_enabled: bool,
fieldnorm_reader: Option<FieldNormReader>,
bm25_weight: Option<BM25Weight>,
num_docs: u32, // Number of docs in the segment
avg_fieldnorm: f32, // Average number of term in the field for that segment.
// this value is used to compute the block wand information.
}
fn get_avg_fieldnorm(fieldnorm_reader: &FieldNormReader) -> f32 {
let num_docs = fieldnorm_reader.num_docs();
let sum_fieldnorm: f32 = (0u32..num_docs)
.map(|doc| fieldnorm_reader.fieldnorm(doc) as f32)
.sum();
sum_fieldnorm / (num_docs as f32)
}
impl<W: Write> PostingsSerializer<W> {
@@ -339,16 +313,7 @@ impl<W: Write> PostingsSerializer<W> {
write: W,
termfreq_enabled: bool,
termfreq_sum_enabled: bool,
fieldnorm_reader: Option<FieldNormReader>,
) -> PostingsSerializer<W> {
let avg_fieldnorm: f32 = fieldnorm_reader
.as_ref()
.map(get_avg_fieldnorm)
.unwrap_or(0f32);
let num_docs = fieldnorm_reader
.as_ref()
.map(|fieldnorm_reader| fieldnorm_reader.num_docs())
.unwrap_or(0u32);
PostingsSerializer {
output_write: CountingWriter::wrap(write),
@@ -361,23 +326,6 @@ impl<W: Write> PostingsSerializer<W> {
last_doc_id_encoded: 0u32,
termfreq_enabled,
termfreq_sum_enabled,
fieldnorm_reader,
bm25_weight: None,
num_docs,
avg_fieldnorm,
}
}
pub fn new_term(&mut self, term_doc_freq: u32) {
if self.termfreq_enabled && self.num_docs > 0 {
let bm25_weight = BM25Weight::for_one_term(
term_doc_freq as u64,
self.num_docs as u64,
self.avg_fieldnorm,
);
self.bm25_weight = Some(bm25_weight);
}
}
@@ -394,6 +342,7 @@ impl<W: Write> PostingsSerializer<W> {
self.postings_write.extend(block_encoded);
}
if self.termfreq_enabled {
// encode the term_freqs
let (num_bits, block_encoded): (u8, &[u8]) = self
.block_encoder
.compress_block_unsorted(&self.block.term_freqs());
@@ -403,32 +352,6 @@ impl<W: Write> PostingsSerializer<W> {
let sum_freq = self.block.term_freqs().iter().cloned().sum();
self.skip_write.write_total_term_freq(sum_freq);
}
let mut blockwand_params_opt = None;
if let Some(bm25_weight) = self.bm25_weight.as_ref() {
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
let docs = self.block.doc_ids();
let term_freqs = self.block.term_freqs();
blockwand_params_opt = docs
.iter()
.cloned()
.map(|doc| fieldnorm_reader.fieldnorm_id(doc))
.zip(term_freqs.iter().cloned())
.max_by(
|(left_fieldnorm_id, left_term_freq),
(right_fieldnorm_id, right_term_freq)| {
let left_score =
bm25_weight.tf_factor(*left_fieldnorm_id, *left_term_freq);
let right_score =
bm25_weight.tf_factor(*right_fieldnorm_id, *right_term_freq);
left_score
.partial_cmp(&right_score)
.unwrap_or(Ordering::Equal)
},
);
}
}
let (fieldnorm_id, term_freq) = blockwand_params_opt.unwrap_or((0u8, 0u32));
self.skip_write.write_blockwand_max(fieldnorm_id, term_freq);
}
self.block.clear();
}
@@ -477,7 +400,6 @@ impl<W: Write> PostingsSerializer<W> {
}
self.skip_write.clear();
self.postings_write.clear();
self.bm25_weight = None;
Ok(())
}

View File

@@ -1,9 +1,8 @@
use crate::common::{BinarySerializable, VInt};
use crate::common::BinarySerializable;
use crate::directory::ReadOnlySource;
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
use crate::query::BM25Weight;
use crate::schema::IndexRecordOption;
use crate::{DocId, Score, TERMINATED};
use crate::{DocId, TERMINATED};
use owned_read::OwnedRead;
pub struct SkipSerializer {
@@ -41,11 +40,6 @@ impl SkipSerializer {
.expect("Should never fail");
}
pub fn write_blockwand_max(&mut self, fieldnorm_id: u8, term_freq: u32) {
self.buffer.push(fieldnorm_id);
VInt(term_freq as u64).serialize_into_vec(&mut self.buffer);
}
pub fn data(&self) -> &[u8] {
&self.buffer[..]
}
@@ -69,23 +63,19 @@ pub(crate) struct SkipReader {
position_offset: u64,
}
#[derive(Clone, Copy, Debug)]
#[derive(Clone, Eq, PartialEq, Copy, Debug)]
pub(crate) enum BlockInfo {
BitPacked {
doc_num_bits: u8,
tf_num_bits: u8,
tf_sum: u32,
block_wand_fieldnorm_id: u8,
block_wand_term_freq: u32,
},
VInt {
num_docs: u32,
},
VInt(u32),
}
impl Default for BlockInfo {
fn default() -> Self {
BlockInfo::VInt { num_docs: 0u32 }
BlockInfo::VInt(0)
}
}
@@ -100,7 +90,7 @@ impl SkipReader {
last_doc_in_previous_block: 0u32,
owned_read: OwnedRead::new(data),
skip_info,
block_info: BlockInfo::VInt { num_docs: doc_freq },
block_info: BlockInfo::VInt(doc_freq),
byte_offset: 0,
remaining_docs: doc_freq,
position_offset: 0u64,
@@ -119,7 +109,7 @@ impl SkipReader {
};
self.last_doc_in_previous_block = 0u32;
self.owned_read = OwnedRead::new(data);
self.block_info = BlockInfo::VInt { num_docs: doc_freq };
self.block_info = BlockInfo::VInt(doc_freq);
self.byte_offset = 0;
self.remaining_docs = doc_freq;
self.position_offset = 0u64;
@@ -128,17 +118,8 @@ impl SkipReader {
}
}
pub fn block_max_score(&self, bm25_weight: &BM25Weight) -> Option<Score> {
match self.block_info {
BlockInfo::BitPacked {
block_wand_fieldnorm_id,
block_wand_term_freq,
..
} => Some(bm25_weight.score(block_wand_fieldnorm_id, block_wand_term_freq)),
BlockInfo::VInt { .. } => None,
}
}
#[cfg(test)]
#[inline(always)]
pub(crate) fn last_doc_in_block(&self) -> DocId {
self.last_doc_in_block
}
@@ -162,38 +143,25 @@ impl SkipReader {
doc_num_bits,
tf_num_bits: 0,
tf_sum: 0,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0,
};
}
IndexRecordOption::WithFreqs => {
let tf_num_bits = self.owned_read.get(1);
let block_wand_fieldnorm_id = self.owned_read.get(2);
self.owned_read.advance(3);
let block_wand_term_freq =
VInt::deserialize_u64(&mut self.owned_read).unwrap() as u32;
self.block_info = BlockInfo::BitPacked {
doc_num_bits,
tf_num_bits,
tf_sum: 0,
block_wand_fieldnorm_id,
block_wand_term_freq,
};
self.owned_read.advance(2);
}
IndexRecordOption::WithFreqsAndPositions => {
let tf_num_bits = self.owned_read.get(1);
self.owned_read.advance(2);
let tf_sum = u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
let block_wand_fieldnorm_id = self.owned_read.get(0);
self.owned_read.advance(1);
let block_wand_term_freq =
VInt::deserialize_u64(&mut self.owned_read).unwrap() as u32;
self.block_info = BlockInfo::BitPacked {
doc_num_bits,
tf_num_bits,
tf_sum,
block_wand_fieldnorm_id,
block_wand_term_freq,
};
}
}
@@ -208,7 +176,7 @@ impl SkipReader {
/// If the target is larger than all documents, the skip_reader
/// then advance to the last Variable In block.
pub fn seek(&mut self, target: DocId) {
while self.last_doc_in_block() < target {
while self.last_doc_in_block < target {
self.advance();
}
}
@@ -219,14 +187,13 @@ impl SkipReader {
doc_num_bits,
tf_num_bits,
tf_sum,
..
} => {
self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32;
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
self.position_offset += tf_sum as u64;
}
BlockInfo::VInt { num_docs} => {
debug_assert_eq!(num_docs, self.remaining_docs);
BlockInfo::VInt(num_vint_docs) => {
debug_assert_eq!(num_vint_docs, self.remaining_docs);
self.remaining_docs = 0;
self.byte_offset = std::usize::MAX;
}
@@ -236,7 +203,7 @@ impl SkipReader {
self.read_block_info();
} else {
self.last_doc_in_block = TERMINATED;
self.block_info = BlockInfo::VInt { num_docs: self.remaining_docs };
self.block_info = BlockInfo::VInt(self.remaining_docs);
}
}
}
@@ -256,10 +223,8 @@ mod tests {
let mut skip_serializer = SkipSerializer::new();
skip_serializer.write_doc(1u32, 2u8);
skip_serializer.write_term_freq(3u8);
skip_serializer.write_blockwand_max(13u8, 3u32);
skip_serializer.write_doc(5u32, 5u8);
skip_serializer.write_term_freq(2u8);
skip_serializer.write_blockwand_max(8u8, 2u32);
skip_serializer.data().to_owned()
};
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
@@ -269,34 +234,30 @@ mod tests {
IndexRecordOption::WithFreqs,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert!(matches!(
skip_reader.block_info,
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 2u8,
tf_num_bits: 3u8,
tf_sum: 0,
block_wand_fieldnorm_id: 13,
block_wand_term_freq: 3
tf_sum: 0
}
));
);
skip_reader.advance();
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
assert!(matches!(
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 5u8,
tf_num_bits: 2u8,
tf_sum: 0,
block_wand_fieldnorm_id: 8,
block_wand_term_freq: 2
tf_sum: 0
}
));
);
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32));
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32));
}
#[test]
@@ -314,34 +275,30 @@ mod tests {
IndexRecordOption::Basic,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert!(matches!(
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 2u8,
tf_num_bits: 0,
tf_sum: 0u32,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
tf_sum: 0u32
}
));
);
skip_reader.advance();
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
assert!(matches!(
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 5u8,
tf_num_bits: 0,
tf_sum: 0u32,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
tf_sum: 0u32
}
));
);
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32));
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32));
}
#[test]
@@ -358,17 +315,15 @@ mod tests {
IndexRecordOption::Basic,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert!(matches!(
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 2u8,
tf_num_bits: 0,
tf_sum: 0u32,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
tf_sum: 0u32
}
));
);
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32));
}
}

View File

@@ -3,14 +3,11 @@ use crate::query::Explanation;
use crate::Score;
use crate::Searcher;
use crate::Term;
use serde::Deserialize;
use serde::Serialize;
const K1: f32 = 1.2;
const B: f32 = 0.75;
fn idf(doc_freq: u64, doc_count: u64) -> f32 {
assert!(doc_count >= doc_freq, "{} >= {}", doc_count, doc_freq);
let x = ((doc_count - doc_freq) as f32 + 0.5) / (doc_freq as f32 + 0.5);
(1f32 + x).ln()
}
@@ -28,12 +25,6 @@ fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
cache
}
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct BM25Params {
pub idf: f32,
pub avg_fieldnorm: f32,
}
pub struct BM25Weight {
idf_explain: Explanation,
weight: f32,
@@ -71,9 +62,17 @@ impl BM25Weight {
}
let average_fieldnorm = total_num_tokens as f32 / total_num_docs as f32;
let mut idf_explain: Explanation;
if terms.len() == 1 {
let term_doc_freq = searcher.doc_freq(&terms[0]);
BM25Weight::for_one_term(term_doc_freq, total_num_docs, average_fieldnorm)
let idf = idf(term_doc_freq, total_num_docs);
idf_explain =
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
idf_explain.add_const(
"n, number of docs containing this term",
term_doc_freq as f32,
);
idf_explain.add_const("N, total number of docs", total_num_docs as f32);
} else {
let idf = terms
.iter()
@@ -82,21 +81,9 @@ impl BM25Weight {
idf(term_doc_freq, total_num_docs)
})
.sum::<f32>();
let idf_explain = Explanation::new("idf", idf);
BM25Weight::new(idf_explain, average_fieldnorm)
idf_explain = Explanation::new("idf", idf);
}
}
pub fn for_one_term(term_doc_freq: u64, total_num_docs: u64, avg_fieldnorm: f32) -> BM25Weight {
let idf = idf(term_doc_freq, total_num_docs);
let mut idf_explain =
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
idf_explain.add_const(
"n, number of docs containing this term",
term_doc_freq as f32,
);
idf_explain.add_const("N, total number of docs", total_num_docs as f32);
BM25Weight::new(idf_explain, avg_fieldnorm)
BM25Weight::new(idf_explain, average_fieldnorm)
}
fn new(idf_explain: Explanation, average_fieldnorm: f32) -> BM25Weight {
@@ -111,23 +98,15 @@ impl BM25Weight {
#[inline(always)]
pub fn score(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
self.weight * self.tf_factor(fieldnorm_id, term_freq)
}
pub fn max_score(&self) -> Score {
self.score(255u8, 2_013_265_944)
}
#[inline(always)]
pub(crate) fn tf_factor(&self, fieldnorm_id: u8, term_freq: u32) -> f32 {
let term_freq = term_freq as f32;
let norm = self.cache[fieldnorm_id as usize];
term_freq / (term_freq + norm)
let term_freq = term_freq as f32;
self.weight * term_freq / (term_freq + norm)
}
pub fn explain(&self, fieldnorm_id: u8, term_freq: u32) -> Explanation {
// The explain format is directly copied from Lucene's.
// (So, Kudos to Lucene)
let score = self.score(fieldnorm_id, term_freq);
let norm = self.cache[fieldnorm_id as usize];
@@ -164,6 +143,6 @@ mod tests {
#[test]
fn test_idf() {
assert_nearly_equals!(idf(1, 2), std::f32::consts::LN_2);
assert_nearly_equals!(idf(1, 2), 0.6931472);
}
}

View File

@@ -1,206 +0,0 @@
use crate::query::term_query::TermScorer;
use crate::query::Scorer;
use crate::{DocId, DocSet, Score, TERMINATED};
use std::ops::DerefMut;
use std::ops::Deref;
/// Takes a term_scorers sorted by their current doc() and a threshold and returns
/// Returns (pivot_len, pivot_ord) defined as follows:
/// - `pivot_doc` lowest document that has a chance of exceeding (>) the threshold score.
/// - `before_pivot_len` number of term_scorers such that term_scorer.doc() < pivot.
/// - `pivot_len` number of term_scorers such that term_scorer.doc() <= pivot.
///
/// We always have `before_pivot_len` < `pivot_len`.
///
/// None is returned if we establish that no document can exceed the threshold.
fn find_pivot_doc(term_scorers: &[TermScorerWithMaxScore], threshold: f32) -> Option<(usize, usize, DocId)> {
let mut max_score = 0.0f32;
let mut before_pivot_len = 0;
let mut pivot_doc = TERMINATED;
while before_pivot_len < term_scorers.len() {
let term_scorer = &term_scorers[before_pivot_len];
max_score += term_scorer.max_score;
if max_score > threshold {
pivot_doc = term_scorer.doc();
break;
}
before_pivot_len += 1;
}
if pivot_doc == TERMINATED {
return None;
}
// Right now i is an ordinal, we want a len.
let mut pivot_len = before_pivot_len + 1;
// Some other term_scorer may be positioned on the same document.
pivot_len += term_scorers[pivot_len..].iter()
.take_while(|term_scorer| term_scorer.doc() == pivot_doc)
.count();
Some((before_pivot_len, pivot_len, pivot_doc))
}
struct TermScorerWithMaxScore<'a> {
scorer: &'a mut TermScorer,
max_score: f32,
}
impl<'a> From<&'a mut TermScorer> for TermScorerWithMaxScore<'a> {
fn from(scorer: &'a mut TermScorer) -> Self {
let max_score = scorer.max_score();
TermScorerWithMaxScore {
scorer,
max_score
}
}
}
impl<'a> Deref for TermScorerWithMaxScore<'a> {
type Target = TermScorer;
fn deref(&self) -> &Self::Target {
self.scorer
}
}
impl<'a> DerefMut for TermScorerWithMaxScore<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.scorer
}
}
// Before and after calling this method, scorers need to be sorted by their `.doc()`.
fn block_max_was_too_low_advance_one_scorer(scorers: &mut Vec<TermScorerWithMaxScore>, pivot_len: usize) {
let mut scorer_to_seek = pivot_len - 1;
let mut doc_to_seek_after = scorers[scorer_to_seek].doc();
for scorer_ord in (0..pivot_len - 1).rev() {
let scorer = &scorers[scorer_ord];
if scorer.last_doc_in_block() <= doc_to_seek_after {
doc_to_seek_after = scorer.last_doc_in_block();
scorer_to_seek = scorer_ord;
}
}
for scorer in &scorers[pivot_len..] {
if scorer.doc() <= doc_to_seek_after {
doc_to_seek_after = scorer.doc();
}
}
scorers[scorer_to_seek].seek(doc_to_seek_after + 1);
restore_ordering(scorers, scorer_to_seek);
}
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
// except term_scorers[ord] that might be in advance compared to its ranks,
// bubble up term_scorers[ord] in order to restore the ordering.
fn restore_ordering(term_scorers: &mut Vec<TermScorerWithMaxScore>, ord: usize) {
let doc = term_scorers[ord].doc();
for i in ord + 1..term_scorers.len() {
if term_scorers[i].doc() >= doc {
break;
}
term_scorers.swap(i, i - 1);
}
}
// Attempts to advance all term_scorers between `&term_scorers[0..before_len]` to the pivot.
// If this works, return true.
// If this fails (ie: one of the term_scorer does not contain `pivot_doc` and seek goes past the
// pivot), reorder the term_scorers to ensure the list is still sorted and returns `false`.
// If a term_scorer reach TERMINATED in the process return false remove the term_scorer and return.
fn align_scorers(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_doc: DocId, before_pivot_len: usize) -> bool {
debug_assert_ne!(pivot_doc, TERMINATED);
for i in (0..before_pivot_len).rev() {
let new_doc = term_scorers[i].seek(pivot_doc);
if new_doc != pivot_doc {
if new_doc == TERMINATED {
term_scorers.swap_remove(i);
}
// We went past the pivot.
// We just go through the outer loop mechanic (Note that pivot is
// still a possible candidate).
//
// Termination is still guaranteed since we can only consider the same
// pivot at most term_scorers.len() - 1 times.
restore_ordering(term_scorers, i);
return false;
}
}
return true;
}
// Assumes terms_scorers[..pivot_len] are positioned on the same doc (pivot_doc).
// Advance term_scorers[..pivot_len] and out of these removes the terminated scores.
// Restores the ordering of term_scorers.
fn advance_all_scorers_on_pivot(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_len: usize) {
let mut i = 0;
for _ in 0..pivot_len {
if term_scorers[i].advance() == TERMINATED {
term_scorers.swap_remove(i);
} else {
i += 1;
}
}
term_scorers.sort_by_key(|scorer| scorer.doc());
}
pub fn block_wand(
mut scorers: Vec<TermScorer>,
mut threshold: f32,
callback: &mut dyn FnMut(u32, Score) -> Score,
) {
let mut scorers: Vec<TermScorerWithMaxScore> = scorers.iter_mut().map(TermScorerWithMaxScore::from).collect();
scorers.sort_by_key(|scorer| scorer.doc());
loop {
// At this point we need to ensure that the scorers are sorted!
if let Some((before_pivot_len, pivot_len, pivot_doc)) = find_pivot_doc(&scorers[..], threshold) {
debug_assert_ne!(pivot_doc, TERMINATED);
debug_assert!(before_pivot_len < pivot_len);
let block_max_score_upperbound: Score = scorers[..pivot_len].iter_mut()
.map(|scorer| {
scorer.shallow_seek(pivot_doc);
scorer.block_max_score()
})
.sum();
// Beware after shallow advance, skip readers can be in advance compared to
// the segment posting lists.
//
// `block_segment_postings.load_block()` need to be called separately.
if block_max_score_upperbound <= threshold {
// Block max condition was not reached.
// We could get away by simply advancing the scorers to DocId + 1 but it would
// be inefficient. The optimization requires proper explanation and was
// isolated in a different function.
block_max_was_too_low_advance_one_scorer(&mut scorers, pivot_len);
continue;
}
// Block max condition is observed.
//
// Let's try and advance all scorers before the pivot to the pivot.
if !align_scorers(&mut scorers, pivot_doc, before_pivot_len) {
// At least of the scorer does not contain the pivot.
//
// Let's stop scoring this pivot and go through the pivot selection again.
// Note that the current pivot is not necessarily a bad candidate and it
// may be picked again.
continue;
}
// At this point, all scorers are positioned on the doc.
let score = scorers[..pivot_len]
.iter_mut()
.map(|scorer| scorer.score())
.sum();
if score > threshold {
threshold = callback(pivot_doc, score);
}
// let's advance all of the scorers that are currently positioned on the pivot.
advance_all_scorers_on_pivot(&mut scorers, pivot_len);
} else {
return;
}
}
}

View File

@@ -1,5 +1,4 @@
use crate::core::SegmentReader;
use crate::postings::FreqReadingOption;
use crate::query::explanation::does_not_match;
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
use crate::query::term_query::TermScorer;
@@ -15,12 +14,12 @@ use crate::query::{intersect_scorers, Explanation};
use crate::{DocId, Score};
use std::collections::HashMap;
enum SpecializedScorer {
TermUnion(Vec<TermScorer>),
enum SpecializedScorer<TScoreCombiner: ScoreCombiner> {
TermUnion(Union<TermScorer, TScoreCombiner>),
Other(Box<dyn Scorer>),
}
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer<TScoreCombiner>
where
TScoreCombiner: ScoreCombiner,
{
@@ -36,29 +35,20 @@ where
.into_iter()
.map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap()))
.collect();
if scorers
.iter()
.all(|scorer| scorer.freq_reading_option() == FreqReadingOption::ReadFreq)
{
// Block wand is only available iff we read frequencies.
return SpecializedScorer::TermUnion(scorers);
} else {
return SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(
scorers,
)));
}
return SpecializedScorer::TermUnion(Union::<TermScorer, TScoreCombiner>::from(
scorers,
));
}
}
SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(scorers)))
}
fn into_box_scorer<TScoreCombiner: ScoreCombiner>(scorer: SpecializedScorer) -> Box<dyn Scorer> {
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let union_scorer = Union::<TermScorer, TScoreCombiner>::from(term_scorers);
Box::new(union_scorer)
impl<TScoreCombiner: ScoreCombiner> Into<Box<dyn Scorer>> for SpecializedScorer<TScoreCombiner> {
fn into(self) -> Box<dyn Scorer> {
match self {
Self::TermUnion(union) => Box::new(union),
Self::Other(scorer) => scorer,
}
SpecializedScorer::Other(scorer) => scorer,
}
}
@@ -95,47 +85,46 @@ impl BooleanWeight {
&self,
reader: &SegmentReader,
boost: f32,
) -> crate::Result<SpecializedScorer> {
) -> crate::Result<SpecializedScorer<TScoreCombiner>> {
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
let should_scorer_opt: Option<SpecializedScorer> = per_occur_scorers
let should_scorer_opt: Option<SpecializedScorer<TScoreCombiner>> = per_occur_scorers
.remove(&Occur::Should)
.map(scorer_union::<TScoreCombiner>);
let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::MustNot)
.map(scorer_union::<DoNothingCombiner>)
.map(into_box_scorer::<DoNothingCombiner>);
.map(Into::into);
let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::Must)
.map(intersect_scorers);
let positive_scorer: SpecializedScorer = match (should_scorer_opt, must_scorer_opt) {
(Some(should_scorer), Some(must_scorer)) => {
if self.scoring_enabled {
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
Box<dyn Scorer>,
Box<dyn Scorer>,
TScoreCombiner,
>::new(
must_scorer,
into_box_scorer::<TScoreCombiner>(should_scorer),
)))
} else {
SpecializedScorer::Other(must_scorer)
let positive_scorer: SpecializedScorer<TScoreCombiner> =
match (should_scorer_opt, must_scorer_opt) {
(Some(should_scorer), Some(must_scorer)) => {
if self.scoring_enabled {
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
Box<dyn Scorer>,
Box<dyn Scorer>,
TScoreCombiner,
>::new(
must_scorer, should_scorer.into()
)))
} else {
SpecializedScorer::Other(must_scorer)
}
}
}
(None, Some(must_scorer)) => SpecializedScorer::Other(must_scorer),
(Some(should_scorer), None) => should_scorer,
(None, None) => {
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
}
};
(None, Some(must_scorer)) => SpecializedScorer::Other(must_scorer),
(Some(should_scorer), None) => should_scorer,
(None, None) => {
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
}
};
if let Some(exclude_scorer) = exclude_scorer_opt {
let positive_scorer_boxed: Box<dyn Scorer> =
into_box_scorer::<TScoreCombiner>(positive_scorer);
let positive_scorer_boxed: Box<dyn Scorer> = positive_scorer.into();
Ok(SpecializedScorer::Other(Box::new(Exclude::new(
positive_scorer_boxed,
exclude_scorer,
@@ -159,12 +148,10 @@ impl Weight for BooleanWeight {
}
} else if self.scoring_enabled {
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
.map(|specialized_scorer| {
into_box_scorer::<SumWithCoordsCombiner>(specialized_scorer)
})
.map(Into::into)
} else {
self.complex_scorer::<DoNothingCombiner>(reader, boost)
.map(into_box_scorer::<DoNothingCombiner>)
.map(Into::into)
}
}
@@ -195,9 +182,7 @@ impl Weight for BooleanWeight {
) -> crate::Result<()> {
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let mut union_scorer =
Union::<TermScorer, SumWithCoordsCombiner>::from(term_scorers);
SpecializedScorer::TermUnion(mut union_scorer) => {
for_each_scorer(&mut union_scorer, callback);
}
SpecializedScorer::Other(mut scorer) => {
@@ -225,8 +210,8 @@ impl Weight for BooleanWeight {
) -> crate::Result<()> {
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
super::block_wand(term_scorers, threshold, callback);
SpecializedScorer::TermUnion(mut union_scorer) => {
for_each_pruning_scorer(&mut union_scorer, threshold, callback);
}
SpecializedScorer::Other(mut scorer) => {
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);

View File

@@ -1,8 +1,6 @@
mod block_wand;
mod boolean_query;
mod boolean_weight;
pub(crate) use self::block_wand::block_wand;
pub use self::boolean_query::BooleanQuery;
#[cfg(test)]

View File

@@ -1,6 +1,5 @@
use crate::{DocId, TantivyError};
use serde::Serialize;
use std::fmt;
pub(crate) fn does_not_match(doc: DocId) -> TantivyError {
TantivyError::InvalidArgument(format!("Document #({}) does not match", doc))
@@ -19,12 +18,6 @@ pub struct Explanation {
details: Vec<Explanation>,
}
impl fmt::Debug for Explanation {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Explanation({})", self.to_pretty_json())
}
}
impl Explanation {
/// Creates a new explanation object.
pub fn new<T: ToString>(description: T, value: f32) -> Explanation {

View File

@@ -119,6 +119,7 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
continue 'outer;
}
}
debug_assert_eq!(candidate, self.left.doc());
debug_assert_eq!(candidate, self.right.doc());
debug_assert!(self.others.iter().all(|docset| docset.doc() == candidate));

View File

@@ -26,7 +26,6 @@ mod weight;
mod vec_docset;
pub(crate) mod score_combiner;
pub(crate) use self::bm25::BM25Weight;
pub use self::intersection::Intersection;
pub use self::union::Union;

View File

@@ -15,7 +15,8 @@ mod tests {
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::query::{Query, QueryParser, Scorer, TermQuery};
use crate::schema::{Field, IndexRecordOption, Schema, STRING, TEXT};
use crate::{Term, Index, TERMINATED};
use crate::Term;
use crate::{Index, TERMINATED};
#[test]
pub fn test_term_query_no_freq() {

View File

@@ -4,8 +4,8 @@ use crate::DocId;
use crate::Score;
use crate::fieldnorm::FieldNormReader;
use crate::postings::Postings;
use crate::postings::SegmentPostings;
use crate::postings::{FreqReadingOption, Postings};
use crate::query::bm25::BM25Weight;
pub struct TermScorer {
@@ -26,70 +26,13 @@ impl TermScorer {
similarity_weight,
}
}
}
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
self.postings.block_cursor.shallow_seek(target_doc)
}
#[cfg(test)]
pub fn create_for_test(
doc_and_tfs: &[(DocId, u32)],
fieldnorm_vals: &[u32],
similarity_weight: BM25Weight,
) -> crate::Result<TermScorer> {
assert!(!doc_and_tfs.is_empty());
assert!(doc_and_tfs.len() <= fieldnorm_vals.len());
let doc_freq = doc_and_tfs.len();
let max_doc = doc_and_tfs.last().unwrap().0 + 1;
let mut fieldnorms: Vec<u32> = std::iter::repeat(1).take(max_doc as usize).collect();
for i in 0..doc_freq {
let doc = doc_and_tfs[i].0;
let fieldnorm = fieldnorm_vals[i];
fieldnorms[doc as usize] = fieldnorm;
}
let fieldnorm_reader = FieldNormReader::from(&fieldnorms[..]);
let segment_postings =
SegmentPostings::create_from_docs_and_tfs(doc_and_tfs, Some(fieldnorm_reader.clone()))?;
Ok(TermScorer::new(segment_postings, fieldnorm_reader, similarity_weight))
}
/// See `FreqReadingOption`.
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
self.postings.block_cursor.freq_reading_option()
}
/// Returns the maximum score for the current block.
///
/// In some rare case, the result may not be exact. In this case a lower value is returned,
/// (and may lead us to return a lesser document).
///
/// At index time, we store the (fieldnorm_id, term frequency) pair that maximizes the
/// score assuming the average fieldnorm computed on this segment.
///
/// Though extremely rare, it is theoretically possible that the actual average fieldnorm
/// is different enough from the current segment average fieldnorm that the maximum over a
/// specific is achieved on a different document.
///
/// (The result is on the other hand guaranteed to be correct if there is only one segment).
pub fn block_max_score(&mut self) -> Score {
self.postings
.block_cursor
.block_max_score(&self.fieldnorm_reader, &self.similarity_weight)
}
impl TermScorer {
pub fn term_freq(&self) -> u32 {
self.postings.term_freq()
}
pub fn doc_freq(&self) -> usize {
self.postings.doc_freq() as usize
}
pub fn fieldnorm_id(&self) -> u8 {
self.fieldnorm_reader.fieldnorm_id(self.doc())
}
@@ -99,14 +42,6 @@ impl TermScorer {
let term_freq = self.term_freq();
self.similarity_weight.explain(fieldnorm_id, term_freq)
}
pub fn max_score(&self) -> f32 {
self.similarity_weight.max_score()
}
pub fn last_doc_in_block(&self) -> DocId {
self.postings.block_cursor.skip_reader.last_doc_in_block()
}
}
impl DocSet for TermScorer {
@@ -134,99 +69,3 @@ impl Scorer for TermScorer {
self.similarity_weight.score(fieldnorm_id, term_freq)
}
}
#[cfg(test)]
mod tests {
use crate::assert_nearly_equals;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::query::term_query::TermScorer;
use crate::query::{BM25Weight, Scorer};
use crate::{DocId, DocSet, TERMINATED};
use proptest::prelude::*;
#[test]
fn test_term_scorer_max_score() -> crate::Result<()> {
let bm25_weight = BM25Weight::for_one_term(3, 6, 10f32);
let mut term_scorer =
TermScorer::create_for_test(&[(2, 3), (3, 12), (7, 8)], &[10, 12, 100], bm25_weight)?;
let max_scorer = term_scorer.max_score();
assert_eq!(max_scorer, 1.3990127f32);
assert_eq!(term_scorer.doc(), 2);
assert_eq!(term_scorer.term_freq(), 3);
assert_nearly_equals!(term_scorer.block_max_score(), 1.3676447f32);
assert_nearly_equals!(term_scorer.score(), 1.0892314f32);
assert_eq!(term_scorer.advance(), 3);
assert_eq!(term_scorer.doc(), 3);
assert_eq!(term_scorer.term_freq(), 12);
assert_nearly_equals!(term_scorer.score(), 1.3676447f32);
assert_eq!(term_scorer.advance(), 7);
assert_eq!(term_scorer.doc(), 7);
assert_eq!(term_scorer.term_freq(), 8);
assert_nearly_equals!(term_scorer.score(), 0.72015285f32);
assert_eq!(term_scorer.advance(), TERMINATED);
Ok(())
}
#[test]
fn test_term_scorer_shallow_advance() -> crate::Result<()> {
let bm25_weight = BM25Weight::for_one_term(300, 1024, 10f32);
let mut doc_and_tfs = vec![];
for i in 0u32..300u32 {
let doc = i * 10;
doc_and_tfs.push((doc, 1u32 + doc % 3u32));
}
let fieldnorms: Vec<u32> = std::iter::repeat(10u32).take(1024).collect();
let mut term_scorer =
TermScorer::create_for_test(&doc_and_tfs, &fieldnorms, bm25_weight)?;
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.shallow_seek(1289);
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.seek(1289);
assert_eq!(term_scorer.doc(), 1290);
Ok(())
}
proptest! {
#[test]
fn test_term_scorer_block_max_score(term_freqs_fieldnorms in proptest::collection::vec((1u32..10u32, 0u32..100u32), 80..300)) {
let term_doc_freq = term_freqs_fieldnorms.len();
let doc_tfs: Vec<(u32, u32)> = term_freqs_fieldnorms.iter()
.cloned()
.enumerate()
.map(|(doc, (tf, _))| (doc as u32, tf))
.collect();
let mut fieldnorms: Vec<u32> = vec![];
for i in 0..term_doc_freq {
let (tf, num_extra_terms) = term_freqs_fieldnorms[i];
fieldnorms.push(tf + num_extra_terms);
}
let average_fieldnorm = fieldnorms
.iter()
.cloned()
.sum::<u32>() as f32 / term_doc_freq as f32;
// Average fieldnorm is over the entire index,
// not necessarily the docs that are in the posting list.
// For this reason we multiply by 1.1 to make a realistic value.
let bm25_weight = BM25Weight::for_one_term(term_doc_freq as u64,
term_doc_freq as u64 * 10u64,
average_fieldnorm);
let mut term_scorer =
TermScorer::create_for_test(&doc_tfs[..], &fieldnorms[..], bm25_weight).unwrap();
let docs: Vec<DocId> = (0..term_doc_freq).map(|doc| doc as DocId).collect();
for block in docs.chunks(COMPRESSION_BLOCK_SIZE) {
let block_max_score = term_scorer.block_max_score();
let mut block_max_score_computed = 0.0f32;
for &doc in block {
assert_eq!(term_scorer.doc(), doc);
block_max_score_computed = block_max_score_computed.max(term_scorer.score());
term_scorer.advance();
}
assert_nearly_equals!(block_max_score_computed, block_max_score);
}
}
}
}

View File

@@ -92,7 +92,7 @@ impl TermWeight {
}
}
pub fn specialized_scorer(&self, reader: &SegmentReader, boost: f32) -> Result<TermScorer> {
fn specialized_scorer(&self, reader: &SegmentReader, boost: f32) -> Result<TermScorer> {
let field = self.term.field();
let inverted_index = reader.inverted_index(field);
let fieldnorm_reader = reader.get_fieldnorms_reader(field);