Compare commits

...

12 Commits

Author SHA1 Message Date
Paul Masurel
ce7db2e1d0 Merge branch 'master' into blockwand 2020-07-16 15:39:52 +09:00
Paul Masurel
1a462c641b merge 2020-07-14 14:01:48 +09:00
Paul Masurel
663db14d70 Bugfix.
go_to_first_doc was typically calling seek with a target smaller than
doc.

Since SegmentPostings typically do a linear search on the full block,
regardless of the current position, it could have our segment postings
go backward.
2020-07-14 12:59:20 +09:00
Paul Masurel
75ea74e465 added blockwand information 2020-07-14 09:56:20 +09:00
Paul Masurel
d71447a9e0 introducing Block WAND params to TextOptions 2020-06-10 09:44:34 +09:00
Paul Masurel
7df5a8a530 ll 2020-06-05 19:37:38 +09:00
Paul Masurel
f0ab0fa5b8 Relying on blockwand 2020-06-01 22:28:08 +09:00
Paul Masurel
a53572069b merge 2020-06-01 13:57:32 +09:00
Paul Masurel
522953ce5c merged 2020-05-27 17:13:49 +09:00
Paul Masurel
f750b18fd6 merged 2020-05-27 16:57:50 +09:00
Paul Masurel
5623112132 blop 2020-05-19 17:31:29 +09:00
Paul Masurel
dd20454cc7 First stab at blockwand 2020-05-17 16:09:04 +09:00
38 changed files with 1119 additions and 259 deletions

View File

@@ -58,6 +58,7 @@ winapi = "0.3"
rand = "0.7" rand = "0.7"
maplit = "1" maplit = "1"
matches = "0.1.8" matches = "0.1.8"
proptest = "0.10"
[dev-dependencies.fail] [dev-dependencies.fail]
version = "0.4" version = "0.4"

View File

@@ -519,7 +519,7 @@ impl Collector for TopDocs {
})?; })?;
} }
let fruit = heap let fruit: Vec<(Score, DocAddress)> = heap
.into_sorted_vec() .into_sorted_vec()
.into_iter() .into_iter()
.map(|cid| (cid.feature, DocAddress(segment_ord, cid.doc))) .map(|cid| (cid.feature, DocAddress(segment_ord, cid.doc)))

View File

@@ -89,6 +89,19 @@ impl FixedSize for u64 {
const SIZE_IN_BYTES: usize = 8; const SIZE_IN_BYTES: usize = 8;
} }
impl BinarySerializable for f32 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_f32::<Endianness>(*self)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
reader.read_f32::<Endianness>()
}
}
impl FixedSize for f32 {
const SIZE_IN_BYTES: usize = 4;
}
impl BinarySerializable for i64 { impl BinarySerializable for i64 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> { fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_i64::<Endianness>(*self) writer.write_i64::<Endianness>(*self)

View File

@@ -5,7 +5,7 @@ use std::io::Read;
use std::io::Write; use std::io::Write;
/// Wrapper over a `u64` that serializes as a variable int. /// Wrapper over a `u64` that serializes as a variable int.
#[derive(Debug, Eq, PartialEq)] #[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct VInt(pub u64); pub struct VInt(pub u64);
const STOP_BIT: u8 = 128; const STOP_BIT: u8 = 128;

View File

@@ -8,7 +8,7 @@ use crate::directory::ReadOnlySource;
use crate::fastfield::DeleteBitSet; use crate::fastfield::DeleteBitSet;
use crate::fastfield::FacetReader; use crate::fastfield::FacetReader;
use crate::fastfield::FastFieldReaders; use crate::fastfield::FastFieldReaders;
use crate::fieldnorm::FieldNormReader; use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::schema::Field; use crate::schema::Field;
use crate::schema::FieldType; use crate::schema::FieldType;
use crate::schema::Schema; use crate::schema::Schema;
@@ -48,7 +48,7 @@ pub struct SegmentReader {
positions_composite: CompositeFile, positions_composite: CompositeFile,
positions_idx_composite: CompositeFile, positions_idx_composite: CompositeFile,
fast_fields_readers: Arc<FastFieldReaders>, fast_fields_readers: Arc<FastFieldReaders>,
fieldnorms_composite: CompositeFile, fieldnorm_readers: FieldNormReaders,
store_source: ReadOnlySource, store_source: ReadOnlySource,
delete_bitset_opt: Option<DeleteBitSet>, delete_bitset_opt: Option<DeleteBitSet>,
@@ -126,8 +126,8 @@ impl SegmentReader {
/// They are simply stored as a fast field, serialized in /// They are simply stored as a fast field, serialized in
/// the `.fieldnorm` file of the segment. /// the `.fieldnorm` file of the segment.
pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader { pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
if let Some(fieldnorm_source) = self.fieldnorms_composite.open_read(field) { if let Some(fieldnorm_source) = self.fieldnorm_readers.get_field(field) {
FieldNormReader::open(fieldnorm_source) fieldnorm_source
} else { } else {
let field_name = self.schema.get_field_name(field); let field_name = self.schema.get_field_name(field);
let err_msg = format!( let err_msg = format!(
@@ -178,8 +178,8 @@ impl SegmentReader {
let fast_field_readers = let fast_field_readers =
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?); Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?; let fieldnorm_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?; let fieldnorm_readers = FieldNormReaders::new(fieldnorm_data)?;
let delete_bitset_opt = if segment.meta().has_deletes() { let delete_bitset_opt = if segment.meta().has_deletes() {
let delete_data = segment.open_read(SegmentComponent::DELETE)?; let delete_data = segment.open_read(SegmentComponent::DELETE)?;
@@ -195,7 +195,7 @@ impl SegmentReader {
termdict_composite, termdict_composite,
postings_composite, postings_composite,
fast_fields_readers: fast_field_readers, fast_fields_readers: fast_field_readers,
fieldnorms_composite, fieldnorm_readers,
segment_id: segment.id(), segment_id: segment.id(),
store_source, store_source,
delete_bitset_opt, delete_bitset_opt,
@@ -308,7 +308,7 @@ impl SegmentReader {
self.positions_composite.space_usage(), self.positions_composite.space_usage(),
self.positions_idx_composite.space_usage(), self.positions_idx_composite.space_usage(),
self.fast_fields_readers.space_usage(), self.fast_fields_readers.space_usage(),
self.fieldnorms_composite.space_usage(), self.fieldnorm_readers.space_usage(),
self.get_store_reader().space_usage(), self.get_store_reader().space_usage(),
self.delete_bitset_opt self.delete_bitset_opt
.as_ref() .as_ref()

View File

@@ -94,12 +94,24 @@ impl Footer {
match &self.versioned_footer { match &self.versioned_footer {
VersionedFooter::V1 { VersionedFooter::V1 {
crc32: _crc, crc32: _crc,
store_compression: compression, store_compression,
} => { } => {
if &library_version.store_compression != compression { if &library_version.store_compression != store_compression {
return Err(Incompatibility::CompressionMismatch { return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(), library_compression_format: library_version.store_compression.to_string(),
index_compression_format: compression.to_string(), index_compression_format: store_compression.to_string(),
});
}
Ok(())
}
VersionedFooter::V2 {
crc32: _crc,
store_compression,
} => {
if &library_version.store_compression != store_compression {
return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(),
index_compression_format: store_compression.to_string(),
}); });
} }
Ok(()) Ok(())
@@ -120,24 +132,29 @@ pub enum VersionedFooter {
crc32: CrcHashU32, crc32: CrcHashU32,
store_compression: String, store_compression: String,
}, },
// Introduction of the Block WAND information.
V2 {
crc32: CrcHashU32,
store_compression: String,
},
} }
impl BinarySerializable for VersionedFooter { impl BinarySerializable for VersionedFooter {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
let mut buf = Vec::new(); let mut buf = Vec::new();
match self { match self {
VersionedFooter::V1 { VersionedFooter::V2 {
crc32, crc32,
store_compression: compression, store_compression: compression,
} => { } => {
// Serializes a valid `VersionedFooter` or panics if the version is unknown // Serializes a valid `VersionedFooter` or panics if the version is unknown
// [ version | crc_hash | compression_mode ] // [ version | crc_hash | compression_mode ]
// [ 0..4 | 4..8 | variable ] // [ 0..4 | 4..8 | variable ]
BinarySerializable::serialize(&1u32, &mut buf)?; BinarySerializable::serialize(&2u32, &mut buf)?;
BinarySerializable::serialize(crc32, &mut buf)?; BinarySerializable::serialize(crc32, &mut buf)?;
BinarySerializable::serialize(compression, &mut buf)?; BinarySerializable::serialize(compression, &mut buf)?;
} }
VersionedFooter::UnknownVersion => { VersionedFooter::V1 { .. } | VersionedFooter::UnknownVersion => {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::InvalidInput, io::ErrorKind::InvalidInput,
"Cannot serialize an unknown versioned footer ", "Cannot serialize an unknown versioned footer ",
@@ -168,10 +185,17 @@ impl BinarySerializable for VersionedFooter {
let version = u32::deserialize(&mut cursor)?; let version = u32::deserialize(&mut cursor)?;
if version == 1 { if version == 1 {
let crc32 = u32::deserialize(&mut cursor)?; let crc32 = u32::deserialize(&mut cursor)?;
let compression = String::deserialize(&mut cursor)?; let store_compression = String::deserialize(&mut cursor)?;
Ok(VersionedFooter::V1 { Ok(VersionedFooter::V1 {
crc32, crc32,
store_compression: compression, store_compression,
})
} else if version == 2 {
let crc32 = u32::deserialize(&mut cursor)?;
let store_compression = String::deserialize(&mut cursor)?;
Ok(VersionedFooter::V2 {
crc32,
store_compression,
}) })
} else { } else {
Ok(VersionedFooter::UnknownVersion) Ok(VersionedFooter::UnknownVersion)
@@ -182,6 +206,7 @@ impl BinarySerializable for VersionedFooter {
impl VersionedFooter { impl VersionedFooter {
pub fn crc(&self) -> Option<CrcHashU32> { pub fn crc(&self) -> Option<CrcHashU32> {
match self { match self {
VersionedFooter::V2 { crc32, .. } => Some(*crc32),
VersionedFooter::V1 { crc32, .. } => Some(*crc32), VersionedFooter::V1 { crc32, .. } => Some(*crc32),
VersionedFooter::UnknownVersion { .. } => None, VersionedFooter::UnknownVersion { .. } => None,
} }
@@ -219,7 +244,7 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> { impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> { fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc32 = self.hasher.take().unwrap().finalize(); let crc32 = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V1 { let footer = Footer::new(VersionedFooter::V2 {
crc32, crc32,
store_compression: crate::store::COMPRESSION.to_string(), store_compression: crate::store::COMPRESSION.to_string(),
}); });
@@ -248,15 +273,11 @@ mod tests {
assert!(footer_proxy.terminate().is_ok()); assert!(footer_proxy.terminate().is_ok());
assert_eq!(vec.len(), 167); assert_eq!(vec.len(), 167);
let footer = Footer::deserialize(&mut &vec[..]).unwrap(); let footer = Footer::deserialize(&mut &vec[..]).unwrap();
if let VersionedFooter::V1 { assert!(matches!(
crc32: _, footer.versioned_footer,
store_compression, VersionedFooter::V2 { store_compression, .. }
} = footer.versioned_footer if store_compression == crate::store::COMPRESSION
{ ));
assert_eq!(store_compression, crate::store::COMPRESSION);
} else {
panic!("Versioned footer should be V1.");
}
assert_eq!(&footer.version, crate::version()); assert_eq!(&footer.version, crate::version());
} }
@@ -264,7 +285,7 @@ mod tests {
fn test_serialize_deserialize_footer() { fn test_serialize_deserialize_footer() {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
let crc32 = 123456u32; let crc32 = 123456u32;
let footer: Footer = Footer::new(VersionedFooter::V1 { let footer: Footer = Footer::new(VersionedFooter::V2 {
crc32, crc32,
store_compression: "lz4".to_string(), store_compression: "lz4".to_string(),
}); });
@@ -276,7 +297,7 @@ mod tests {
#[test] #[test]
fn footer_length() { fn footer_length() {
let crc32 = 1111111u32; let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V1 { let versioned_footer = VersionedFooter::V2 {
crc32, crc32,
store_compression: "lz4".to_string(), store_compression: "lz4".to_string(),
}; };
@@ -297,7 +318,7 @@ mod tests {
// versionned footer length // versionned footer length
12 | 128, 12 | 128,
// index format version // index format version
1, 2,
0, 0,
0, 0,
0, 0,
@@ -316,7 +337,7 @@ mod tests {
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap(); let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
assert!(cursor.is_empty()); assert!(cursor.is_empty());
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32; let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 { let expected_versioned_footer: VersionedFooter = VersionedFooter::V2 {
crc32: expected_crc, crc32: expected_crc,
store_compression: "lz4".to_string(), store_compression: "lz4".to_string(),
}; };

View File

@@ -21,7 +21,7 @@ mod reader;
mod serializer; mod serializer;
mod writer; mod writer;
pub use self::reader::FieldNormReader; pub use self::reader::{FieldNormReader, FieldNormReaders};
pub use self::serializer::FieldNormsSerializer; pub use self::serializer::FieldNormsSerializer;
pub use self::writer::FieldNormsWriter; pub use self::writer::FieldNormsWriter;

View File

@@ -1,6 +1,41 @@
use super::{fieldnorm_to_id, id_to_fieldnorm}; use super::{fieldnorm_to_id, id_to_fieldnorm};
use crate::common::CompositeFile;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::schema::Field;
use crate::space_usage::PerFieldSpaceUsage;
use crate::DocId; use crate::DocId;
use std::sync::Arc;
/// Reader for the fieldnorm (for each document, the number of tokens indexed in the
/// field) of all indexed fields in the index.
///
/// Each fieldnorm is approximately compressed over one byte. We refer to this byte as
/// `fieldnorm_id`.
/// The mapping from `fieldnorm` to `fieldnorm_id` is given by monotonic.
#[derive(Clone)]
pub struct FieldNormReaders {
data: Arc<CompositeFile>,
}
impl FieldNormReaders {
/// Creates a field norm reader.
pub fn new(source: ReadOnlySource) -> crate::Result<FieldNormReaders> {
let data = CompositeFile::open(&source)?;
Ok(FieldNormReaders {
data: Arc::new(data),
})
}
/// Returns the FieldNormReader for a specific field.
pub fn get_field(&self, field: Field) -> Option<FieldNormReader> {
self.data.open_read(field).map(FieldNormReader::open)
}
/// Return a break down of the space usage per field.
pub fn space_usage(&self) -> PerFieldSpaceUsage {
self.data.space_usage()
}
}
/// Reads the fieldnorm associated to a document. /// Reads the fieldnorm associated to a document.
/// The fieldnorm represents the length associated to /// The fieldnorm represents the length associated to
@@ -19,6 +54,7 @@ use crate::DocId;
/// Apart from compression, this scale also makes it possible to /// Apart from compression, this scale also makes it possible to
/// precompute computationally expensive functions of the fieldnorm /// precompute computationally expensive functions of the fieldnorm
/// in a very short array. /// in a very short array.
#[derive(Clone)]
pub struct FieldNormReader { pub struct FieldNormReader {
data: ReadOnlySource, data: ReadOnlySource,
} }
@@ -29,6 +65,11 @@ impl FieldNormReader {
FieldNormReader { data } FieldNormReader { data }
} }
/// Returns the number of documents in this segment.
pub fn num_docs(&self) -> u32 {
self.data.len() as u32
}
/// Returns the `fieldnorm` associated to a doc id. /// Returns the `fieldnorm` associated to a doc id.
/// The fieldnorm is a value approximating the number /// The fieldnorm is a value approximating the number
/// of tokens in a given field of the `doc_id`. /// of tokens in a given field of the `doc_id`.
@@ -65,10 +106,11 @@ impl FieldNormReader {
} }
#[cfg(test)] #[cfg(test)]
impl From<Vec<u32>> for FieldNormReader { impl From<&[u32]> for FieldNormReader {
fn from(field_norms: Vec<u32>) -> FieldNormReader { fn from(field_norms: &[u32]) -> FieldNormReader {
let field_norms_id = field_norms let field_norms_id = field_norms
.into_iter() .iter()
.cloned()
.map(FieldNormReader::fieldnorm_to_id) .map(FieldNormReader::fieldnorm_to_id)
.collect::<Vec<u8>>(); .collect::<Vec<u8>>();
let field_norms_data = ReadOnlySource::from(field_norms_id); let field_norms_data = ReadOnlySource::from(field_norms_id);

View File

@@ -78,11 +78,12 @@ impl FieldNormsWriter {
} }
/// Serialize the seen fieldnorm values to the serializer for all fields. /// Serialize the seen fieldnorm values to the serializer for all fields.
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> { pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> {
for &field in self.fields.iter() { for &field in self.fields.iter() {
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..]; let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?; fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
} }
fieldnorms_serializer.close()?;
Ok(()) Ok(())
} }
} }

View File

@@ -8,9 +8,9 @@ use crate::fastfield::DeleteBitSet;
use crate::fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::fastfield::FastFieldSerializer; use crate::fastfield::FastFieldSerializer;
use crate::fastfield::MultiValueIntFastFieldReader; use crate::fastfield::MultiValueIntFastFieldReader;
use crate::fieldnorm::FieldNormReader;
use crate::fieldnorm::FieldNormsSerializer; use crate::fieldnorm::FieldNormsSerializer;
use crate::fieldnorm::FieldNormsWriter; use crate::fieldnorm::FieldNormsWriter;
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::indexer::SegmentSerializer; use crate::indexer::SegmentSerializer;
use crate::postings::InvertedIndexSerializer; use crate::postings::InvertedIndexSerializer;
use crate::postings::Postings; use crate::postings::Postings;
@@ -20,7 +20,7 @@ use crate::schema::{Field, Schema};
use crate::store::StoreWriter; use crate::store::StoreWriter;
use crate::termdict::TermMerger; use crate::termdict::TermMerger;
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use crate::DocId; use crate::{DocId, SegmentComponent};
use std::cmp; use std::cmp;
use std::collections::HashMap; use std::collections::HashMap;
@@ -167,7 +167,7 @@ impl IndexMerger {
fn write_fieldnorms( fn write_fieldnorms(
&self, &self,
fieldnorms_serializer: &mut FieldNormsSerializer, mut fieldnorms_serializer: FieldNormsSerializer,
) -> crate::Result<()> { ) -> crate::Result<()> {
let fields = FieldNormsWriter::fields_with_fieldnorm(&self.schema); let fields = FieldNormsWriter::fields_with_fieldnorm(&self.schema);
let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize); let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize);
@@ -182,6 +182,7 @@ impl IndexMerger {
} }
fieldnorms_serializer.serialize_field(field, &fieldnorms_data[..])?; fieldnorms_serializer.serialize_field(field, &fieldnorms_data[..])?;
} }
fieldnorms_serializer.close()?;
Ok(()) Ok(())
} }
@@ -492,6 +493,7 @@ impl IndexMerger {
indexed_field: Field, indexed_field: Field,
field_type: &FieldType, field_type: &FieldType,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
fieldnorm_reader: Option<FieldNormReader>,
) -> crate::Result<Option<TermOrdinalMapping>> { ) -> crate::Result<Option<TermOrdinalMapping>> {
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000); let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
let mut delta_computer = DeltaComputer::new(); let mut delta_computer = DeltaComputer::new();
@@ -550,7 +552,8 @@ impl IndexMerger {
// - Segment 2's doc ids become [seg0.max_doc + seg1.max_doc, // - Segment 2's doc ids become [seg0.max_doc + seg1.max_doc,
// seg0.max_doc + seg1.max_doc + seg2.max_doc] // seg0.max_doc + seg1.max_doc + seg2.max_doc]
// ... // ...
let mut field_serializer = serializer.new_field(indexed_field, total_num_tokens)?; let mut field_serializer =
serializer.new_field(indexed_field, total_num_tokens, fieldnorm_reader)?;
let field_entry = self.schema.get_field_entry(indexed_field); let field_entry = self.schema.get_field_entry(indexed_field);
@@ -596,7 +599,11 @@ impl IndexMerger {
// We know that there is at least one document containing // We know that there is at least one document containing
// the term, so we add it. // the term, so we add it.
let to_term_ord = field_serializer.new_term(term_bytes)?; let term_doc_freq = segment_postings
.iter()
.map(|(_, segment_posting)| segment_posting.doc_freq())
.sum();
let to_term_ord = field_serializer.new_term(term_bytes, term_doc_freq)?;
if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt { if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt {
for (segment_ord, from_term_ord) in merged_terms.matching_segments() { for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
@@ -636,13 +643,18 @@ impl IndexMerger {
fn write_postings( fn write_postings(
&self, &self,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
fieldnorm_readers: FieldNormReaders,
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> { ) -> crate::Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new(); let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() { for (field, field_entry) in self.schema.fields() {
let fieldnorm_reader = fieldnorm_readers.get_field(field);
if field_entry.is_indexed() { if field_entry.is_indexed() {
if let Some(term_ordinal_mapping) = if let Some(term_ordinal_mapping) = self.write_postings_for_field(
self.write_postings_for_field(field, field_entry.field_type(), serializer)? field,
{ field_entry.field_type(),
serializer,
fieldnorm_reader,
)? {
term_ordinal_mappings.insert(field, term_ordinal_mapping); term_ordinal_mappings.insert(field, term_ordinal_mapping);
} }
} }
@@ -668,8 +680,15 @@ impl IndexMerger {
impl SerializableSegment for IndexMerger { impl SerializableSegment for IndexMerger {
fn write(&self, mut serializer: SegmentSerializer) -> crate::Result<u32> { fn write(&self, mut serializer: SegmentSerializer) -> crate::Result<u32> {
let term_ord_mappings = self.write_postings(serializer.get_postings_serializer())?; if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
self.write_fieldnorms(serializer.get_fieldnorms_serializer())?; self.write_fieldnorms(fieldnorms_serializer)?;
}
let fieldnorm_data = serializer
.segment()
.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::new(fieldnorm_data)?;
let term_ord_mappings =
self.write_postings(serializer.get_postings_serializer(), fieldnorm_readers)?;
self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?; self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?;
self.write_storable_fields(serializer.get_store_writer())?; self.write_storable_fields(serializer.get_store_writer())?;
serializer.close()?; serializer.close()?;
@@ -679,15 +698,15 @@ impl SerializableSegment for IndexMerger {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::assert_nearly_equals;
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE; use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector}; use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use crate::collector::{Count, FacetCollector}; use crate::collector::{Count, FacetCollector};
use crate::core::Index; use crate::core::Index;
use crate::query::AllQuery; use crate::query::AllQuery;
use crate::query::BooleanQuery; use crate::query::BooleanQuery;
use crate::query::Scorer;
use crate::query::TermQuery; use crate::query::TermQuery;
use crate::schema;
use crate::schema::Cardinality;
use crate::schema::Document; use crate::schema::Document;
use crate::schema::Facet; use crate::schema::Facet;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
@@ -695,9 +714,11 @@ mod tests {
use crate::schema::Term; use crate::schema::Term;
use crate::schema::TextFieldIndexing; use crate::schema::TextFieldIndexing;
use crate::schema::INDEXED; use crate::schema::INDEXED;
use crate::schema::{Cardinality, TEXT};
use crate::DocAddress; use crate::DocAddress;
use crate::IndexWriter; use crate::IndexWriter;
use crate::Searcher; use crate::Searcher;
use crate::{schema, DocSet, SegmentId};
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use futures::executor::block_on; use futures::executor::block_on;
use std::io::Cursor; use std::io::Cursor;
@@ -1504,12 +1525,9 @@ mod tests {
for i in 0..100 { for i in 0..100 {
let mut doc = Document::new(); let mut doc = Document::new();
doc.add_f64(field, 42.0); doc.add_f64(field, 42.0);
doc.add_f64(multi_field, 0.24); doc.add_f64(multi_field, 0.24);
doc.add_f64(multi_field, 0.27); doc.add_f64(multi_field, 0.27);
writer.add_document(doc); writer.add_document(doc);
if i % 5 == 0 { if i % 5 == 0 {
writer.commit()?; writer.commit()?;
} }
@@ -1521,6 +1539,72 @@ mod tests {
// If a merging thread fails, we should end up with more // If a merging thread fails, we should end up with more
// than one segment here // than one segment here
assert_eq!(1, index.searchable_segments()?.len()); assert_eq!(1, index.searchable_segments()?.len());
Ok(())
}
#[test]
fn test_merged_index_has_blockwand() -> crate::Result<()> {
let mut builder = schema::SchemaBuilder::new();
let text = builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
let happy_term = Term::from_field_text(text, "happy");
let term_query = TermQuery::new(happy_term, IndexRecordOption::WithFreqs);
for _ in 0..62 {
writer.add_document(doc!(text=>"hello happy tax payer"));
}
writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(searcher.segment_reader(0u32), 1.0f32)?;
assert_eq!(term_scorer.doc(), 0);
assert_nearly_equals!(term_scorer.block_max_score(), 0.0079681855);
assert_nearly_equals!(term_scorer.score(), 0.0079681855);
for _ in 0..81 {
writer.add_document(doc!(text=>"hello happy tax payer"));
}
writer.commit()?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2);
for segment_reader in searcher.segment_readers() {
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(segment_reader, 1.0f32)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
}
let segment_ids: Vec<SegmentId> = searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect();
block_on(writer.merge(&segment_ids[..]))?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0u32);
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(segment_reader, 1.0f32)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
Ok(()) Ok(())
} }

View File

@@ -8,15 +8,16 @@ use crate::store::StoreWriter;
/// Segment serializer is in charge of laying out on disk /// Segment serializer is in charge of laying out on disk
/// the data accumulated and sorted by the `SegmentWriter`. /// the data accumulated and sorted by the `SegmentWriter`.
pub struct SegmentSerializer { pub struct SegmentSerializer {
segment: Segment,
store_writer: StoreWriter, store_writer: StoreWriter,
fast_field_serializer: FastFieldSerializer, fast_field_serializer: FastFieldSerializer,
fieldnorms_serializer: FieldNormsSerializer, fieldnorms_serializer: Option<FieldNormsSerializer>,
postings_serializer: InvertedIndexSerializer, postings_serializer: InvertedIndexSerializer,
} }
impl SegmentSerializer { impl SegmentSerializer {
/// Creates a new `SegmentSerializer`. /// Creates a new `SegmentSerializer`.
pub fn for_segment(segment: &mut Segment) -> crate::Result<SegmentSerializer> { pub fn for_segment(mut segment: Segment) -> crate::Result<SegmentSerializer> {
let store_write = segment.open_write(SegmentComponent::STORE)?; let store_write = segment.open_write(SegmentComponent::STORE)?;
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?; let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
@@ -25,15 +26,20 @@ impl SegmentSerializer {
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?; let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?; let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
let postings_serializer = InvertedIndexSerializer::open(segment)?; let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
Ok(SegmentSerializer { Ok(SegmentSerializer {
segment,
store_writer: StoreWriter::new(store_write), store_writer: StoreWriter::new(store_write),
fast_field_serializer, fast_field_serializer,
fieldnorms_serializer, fieldnorms_serializer: Some(fieldnorms_serializer),
postings_serializer, postings_serializer,
}) })
} }
pub fn segment(&self) -> &Segment {
&self.segment
}
/// Accessor to the `PostingsSerializer`. /// Accessor to the `PostingsSerializer`.
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer { pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
&mut self.postings_serializer &mut self.postings_serializer
@@ -44,9 +50,11 @@ impl SegmentSerializer {
&mut self.fast_field_serializer &mut self.fast_field_serializer
} }
/// Accessor to the field norm serializer. /// Extract the field norm serializer.
pub fn get_fieldnorms_serializer(&mut self) -> &mut FieldNormsSerializer { ///
&mut self.fieldnorms_serializer /// Note the fieldnorms serializer can only be extracted once.
pub fn extract_fieldnorms_serializer(&mut self) -> Option<FieldNormsSerializer> {
self.fieldnorms_serializer.take()
} }
/// Accessor to the `StoreWriter`. /// Accessor to the `StoreWriter`.
@@ -55,11 +63,13 @@ impl SegmentSerializer {
} }
/// Finalize the segment serialization. /// Finalize the segment serialization.
pub fn close(self) -> crate::Result<()> { pub fn close(mut self) -> crate::Result<()> {
if let Some(fieldnorms_serializer) = self.extract_fieldnorms_serializer() {
fieldnorms_serializer.close()?;
}
self.fast_field_serializer.close()?; self.fast_field_serializer.close()?;
self.postings_serializer.close()?; self.postings_serializer.close()?;
self.store_writer.close()?; self.store_writer.close()?;
self.fieldnorms_serializer.close()?;
Ok(()) Ok(())
} }
} }

View File

@@ -112,7 +112,7 @@ fn merge(
target_opstamp: Opstamp, target_opstamp: Opstamp,
) -> crate::Result<SegmentEntry> { ) -> crate::Result<SegmentEntry> {
// first we need to apply deletes to our segment. // first we need to apply deletes to our segment.
let mut merged_segment = index.new_segment(); let merged_segment = index.new_segment();
// First we apply all of the delet to the merged segment, up to the target opstamp. // First we apply all of the delet to the merged segment, up to the target opstamp.
for segment_entry in &mut segment_entries { for segment_entry in &mut segment_entries {
@@ -130,12 +130,14 @@ fn merge(
// An IndexMerger is like a "view" of our merged segments. // An IndexMerger is like a "view" of our merged segments.
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?; let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
let merged_segment_id = merged_segment.id();
// ... we just serialize this index merger in our new segment to merge the two segments. // ... we just serialize this index merger in our new segment to merge the two segments.
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?; let segment_serializer = SegmentSerializer::for_segment(merged_segment)?;
let num_docs = merger.write(segment_serializer)?; let num_docs = merger.write(segment_serializer)?;
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs); let segment_meta = index.new_segment_meta(merged_segment_id, num_docs);
Ok(SegmentEntry::new(segment_meta, delete_cursor, None)) Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
} }

View File

@@ -2,7 +2,7 @@ use super::operation::AddOperation;
use crate::core::Segment; use crate::core::Segment;
use crate::core::SerializableSegment; use crate::core::SerializableSegment;
use crate::fastfield::FastFieldsWriter; use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::FieldNormsWriter; use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::postings::compute_table_size; use crate::postings::compute_table_size;
use crate::postings::MultiFieldPostingsWriter; use crate::postings::MultiFieldPostingsWriter;
@@ -14,8 +14,8 @@ use crate::schema::{Field, FieldEntry};
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream}; use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
use crate::tokenizer::{FacetTokenizer, TextAnalyzer}; use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
use crate::tokenizer::{TokenStreamChain, Tokenizer}; use crate::tokenizer::{TokenStreamChain, Tokenizer};
use crate::DocId;
use crate::Opstamp; use crate::Opstamp;
use crate::{DocId, SegmentComponent};
use std::io; use std::io;
use std::str; use std::str;
@@ -62,11 +62,12 @@ impl SegmentWriter {
/// - schema /// - schema
pub fn for_segment( pub fn for_segment(
memory_budget: usize, memory_budget: usize,
mut segment: Segment, segment: Segment,
schema: &Schema, schema: &Schema,
) -> crate::Result<SegmentWriter> { ) -> crate::Result<SegmentWriter> {
let tokenizer_manager = segment.index().tokenizers().clone();
let table_num_bits = initial_table_size(memory_budget)?; let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?; let segment_serializer = SegmentSerializer::for_segment(segment)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits); let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
let tokenizers = schema let tokenizers = schema
.fields() .fields()
@@ -76,7 +77,7 @@ impl SegmentWriter {
.get_indexing_options() .get_indexing_options()
.and_then(|text_index_option| { .and_then(|text_index_option| {
let tokenizer_name = &text_index_option.tokenizer(); let tokenizer_name = &text_index_option.tokenizer();
segment.index().tokenizers().get(tokenizer_name) tokenizer_manager.get(tokenizer_name)
}), }),
_ => None, _ => None,
}, },
@@ -280,9 +281,16 @@ fn write(
fieldnorms_writer: &FieldNormsWriter, fieldnorms_writer: &FieldNormsWriter,
mut serializer: SegmentSerializer, mut serializer: SegmentSerializer,
) -> crate::Result<()> { ) -> crate::Result<()> {
let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?; if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
fieldnorms_writer.serialize(fieldnorms_serializer)?;
}
let fieldnorm_data = serializer
.segment()
.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::new(fieldnorm_data)?;
let term_ord_map =
multifield_postings.serialize(serializer.get_postings_serializer(), fieldnorm_readers)?;
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?; fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer())?;
serializer.close()?; serializer.close()?;
Ok(()) Ok(())
} }

View File

@@ -173,7 +173,7 @@ use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// Index format version. /// Index format version.
const INDEX_FORMAT_VERSION: u32 = 1; const INDEX_FORMAT_VERSION: u32 = 2;
/// Structure version for the index. /// Structure version for the index.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
@@ -298,17 +298,26 @@ mod tests {
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
pub fn assert_nearly_equals(expected: f32, val: f32) { /// Checks if left and right are close one to each other.
assert!( /// Panics if the two values are more than 0.5% apart.
nearly_equals(val, expected), #[macro_export]
"Got {}, expected {}.", macro_rules! assert_nearly_equals {
val, ($left:expr, $right:expr) => {{
expected match (&$left, &$right) {
); (left_val, right_val) => {
} let diff = (left_val - right_val).abs();
let add = left_val.abs() + right_val.abs();
pub fn nearly_equals(a: f32, b: f32) -> bool { if diff > 0.0005 * add {
(a - b).abs() < 0.0005 * (a + b).abs() panic!(
r#"assertion failed: `(left ~= right)`
left: `{:?}`,
right: `{:?}`"#,
&*left_val, &*right_val
)
}
}
}
}};
} }
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> { pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {

View File

@@ -1,11 +1,21 @@
use crate::common::{BinarySerializable, VInt}; use crate::common::{BinarySerializable, VInt};
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::fieldnorm::FieldNormReader;
use crate::postings::compression::{ use crate::postings::compression::{
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE, AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
}; };
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader}; use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
use crate::query::BM25Weight;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::{DocId, TERMINATED}; use crate::{DocId, Score, TERMINATED};
fn max_f32<I: Iterator<Item = f32>>(mut it: I) -> Option<f32> {
if let Some(first) = it.next() {
Some(it.fold(first, f32::max))
} else {
None
}
}
/// `BlockSegmentPostings` is a cursor iterating over blocks /// `BlockSegmentPostings` is a cursor iterating over blocks
/// of documents. /// of documents.
@@ -19,11 +29,12 @@ pub struct BlockSegmentPostings {
loaded_offset: usize, loaded_offset: usize,
freq_decoder: BlockDecoder, freq_decoder: BlockDecoder,
freq_reading_option: FreqReadingOption, freq_reading_option: FreqReadingOption,
block_max_score_cache: Option<Score>,
doc_freq: usize, doc_freq: u32,
data: ReadOnlySource, data: ReadOnlySource,
skip_reader: SkipReader, pub(crate) skip_reader: SkipReader,
} }
fn decode_bitpacked_block( fn decode_bitpacked_block(
@@ -47,6 +58,7 @@ fn decode_vint_block(
doc_offset: DocId, doc_offset: DocId,
num_vint_docs: usize, num_vint_docs: usize,
) { ) {
doc_decoder.fill(TERMINATED);
let num_consumed_bytes = doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs); let num_consumed_bytes = doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs);
if let Some(freq_decoder) = freq_decoder_opt { if let Some(freq_decoder) = freq_decoder_opt {
freq_decoder.uncompress_vint_unsorted(&data[num_consumed_bytes..], num_vint_docs); freq_decoder.uncompress_vint_unsorted(&data[num_consumed_bytes..], num_vint_docs);
@@ -88,12 +100,12 @@ impl BlockSegmentPostings {
None => SkipReader::new(ReadOnlySource::empty(), doc_freq, record_option), None => SkipReader::new(ReadOnlySource::empty(), doc_freq, record_option),
}; };
let doc_freq = doc_freq as usize;
let mut block_segment_postings = BlockSegmentPostings { let mut block_segment_postings = BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED), doc_decoder: BlockDecoder::with_val(TERMINATED),
loaded_offset: std::usize::MAX, loaded_offset: std::usize::MAX,
freq_decoder: BlockDecoder::with_val(1), freq_decoder: BlockDecoder::with_val(1),
freq_reading_option, freq_reading_option,
block_max_score_cache: None,
doc_freq, doc_freq,
data: postings_data, data: postings_data,
skip_reader, skip_reader,
@@ -102,6 +114,43 @@ impl BlockSegmentPostings {
block_segment_postings block_segment_postings
} }
/// Returns the block_max_score for the current block.
/// It does not require the block to be loaded. For instance, it is ok to call this method
/// after having called `.shallow_advance(..)`.
///
/// See `TermScorer::block_max_score(..)` for more information.
pub fn block_max_score(
&mut self,
fieldnorm_reader: &FieldNormReader,
bm25_weight: &BM25Weight,
) -> Score {
let (block_max_score_cache, skip_reader, doc_decoder, freq_decoder) = (
&mut self.block_max_score_cache,
&self.skip_reader,
&self.doc_decoder,
&self.freq_decoder,
);
*block_max_score_cache.get_or_insert_with(|| {
skip_reader
.block_max_score(bm25_weight)
.or_else(|| {
let docs = doc_decoder.output_array();
let freqs = freq_decoder.output_array();
max_f32(docs.iter().cloned().zip(freqs.iter().cloned()).map(
|(doc, term_freq)| {
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
bm25_weight.score(fieldnorm_id, term_freq)
},
))
})
.unwrap_or(0f32)
})
}
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
self.freq_reading_option
}
// Resets the block segment postings on another position // Resets the block segment postings on another position
// in the postings file. // in the postings file.
// //
@@ -121,15 +170,16 @@ impl BlockSegmentPostings {
} else { } else {
self.skip_reader.reset(ReadOnlySource::empty(), doc_freq); self.skip_reader.reset(ReadOnlySource::empty(), doc_freq);
} }
self.doc_freq = doc_freq as usize; self.doc_freq = doc_freq;
self.load_block(); self.load_block();
} }
/// Returns the document frequency associated to this block postings. /// Returns the overall number of documents in the block postings.
/// It does not take in account whether documents are deleted or not.
/// ///
/// This `doc_freq` is simply the sum of the length of all of the blocks /// This `doc_freq` is simply the sum of the length of all of the blocks
/// length, and it does not take in account deleted documents. /// length, and it does not take in account deleted documents.
pub fn doc_freq(&self) -> usize { pub fn doc_freq(&self) -> u32 {
self.doc_freq self.doc_freq
} }
@@ -139,11 +189,20 @@ impl BlockSegmentPostings {
/// returned by `.docs()` is empty. /// returned by `.docs()` is empty.
#[inline] #[inline]
pub fn docs(&self) -> &[DocId] { pub fn docs(&self) -> &[DocId] {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_array() self.doc_decoder.output_array()
} }
/// Returns a full block, regardless of whetehr the block is complete or incomplete (
/// as it happens for the last block of the posting list).
///
/// In the latter case, the block is guaranteed to be padded with the sentinel value:
/// `TERMINATED`. The array is also guaranteed to be aligned on 16 bytes = 128 bits.
///
/// This method is useful to run SSE2 linear search.
#[inline(always)] #[inline(always)]
pub(crate) fn docs_aligned(&self) -> &AlignedBuffer { pub(crate) fn docs_aligned(&self) -> &AlignedBuffer {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_aligned() self.doc_decoder.output_aligned()
} }
@@ -156,12 +215,14 @@ impl BlockSegmentPostings {
/// Return the array of `term freq` in the block. /// Return the array of `term freq` in the block.
#[inline] #[inline]
pub fn freqs(&self) -> &[u32] { pub fn freqs(&self) -> &[u32] {
debug_assert!(self.block_is_loaded());
self.freq_decoder.output_array() self.freq_decoder.output_array()
} }
/// Return the frequency at index `idx` of the block. /// Return the frequency at index `idx` of the block.
#[inline] #[inline]
pub fn freq(&self, idx: usize) -> u32 { pub fn freq(&self, idx: usize) -> u32 {
debug_assert!(self.block_is_loaded());
self.freq_decoder.output(idx) self.freq_decoder.output(idx)
} }
@@ -172,13 +233,10 @@ impl BlockSegmentPostings {
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1` /// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
#[inline] #[inline]
pub fn block_len(&self) -> usize { pub fn block_len(&self) -> usize {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_len self.doc_decoder.output_len
} }
pub(crate) fn position_offset(&self) -> u64 {
self.skip_reader.position_offset()
}
/// Position on a block that may contains `target_doc`. /// Position on a block that may contains `target_doc`.
/// ///
/// If all docs are smaller than target, the block loaded may be empty, /// If all docs are smaller than target, the block loaded may be empty,
@@ -188,7 +246,26 @@ impl BlockSegmentPostings {
self.load_block(); self.load_block();
} }
fn load_block(&mut self) { pub(crate) fn position_offset(&self) -> u64 {
self.skip_reader.position_offset()
}
/// Dangerous API! This calls seek on the skip list,
/// but does not `.load_block()` afterwards.
///
/// `.load_block()` needs to be called manually afterwards.
/// If all docs are smaller than target, the block loaded may be empty,
/// or be the last an incomplete VInt block.
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
self.skip_reader.seek(target_doc);
}
pub(crate) fn block_is_loaded(&self) -> bool {
self.loaded_offset == self.skip_reader.byte_offset()
}
pub(crate) fn load_block(&mut self) {
self.block_max_score_cache = None;
let offset = self.skip_reader.byte_offset(); let offset = self.skip_reader.byte_offset();
if self.loaded_offset == offset { if self.loaded_offset == offset {
return; return;
@@ -213,11 +290,14 @@ impl BlockSegmentPostings {
tf_num_bits, tf_num_bits,
); );
} }
BlockInfo::VInt(num_vint_docs) => { BlockInfo::VInt { num_docs } => {
self.doc_decoder.clear(); let data = {
if num_vint_docs == 0 { if num_docs == 0 {
return; &[]
} } else {
&self.data.as_slice()[offset..]
}
};
decode_vint_block( decode_vint_block(
&mut self.doc_decoder, &mut self.doc_decoder,
if let FreqReadingOption::ReadFreq = self.freq_reading_option { if let FreqReadingOption::ReadFreq = self.freq_reading_option {
@@ -225,9 +305,9 @@ impl BlockSegmentPostings {
} else { } else {
None None
}, },
&self.data.as_slice()[offset..], data,
self.skip_reader.last_doc_in_previous_block, self.skip_reader.last_doc_in_previous_block,
num_vint_docs as usize, num_docs as usize,
); );
} }
} }
@@ -245,9 +325,10 @@ impl BlockSegmentPostings {
pub fn empty() -> BlockSegmentPostings { pub fn empty() -> BlockSegmentPostings {
BlockSegmentPostings { BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED), doc_decoder: BlockDecoder::with_val(TERMINATED),
loaded_offset: std::usize::MAX, loaded_offset: 0,
freq_decoder: BlockDecoder::with_val(1), freq_decoder: BlockDecoder::with_val(1),
freq_reading_option: FreqReadingOption::NoFreq, freq_reading_option: FreqReadingOption::NoFreq,
block_max_score_cache: None,
doc_freq: 0, doc_freq: 0,
data: ReadOnlySource::new(vec![]), data: ReadOnlySource::new(vec![]),
skip_reader: SkipReader::new(ReadOnlySource::new(vec![]), 0, IndexRecordOption::Basic), skip_reader: SkipReader::new(ReadOnlySource::new(vec![]), 0, IndexRecordOption::Basic),
@@ -273,8 +354,10 @@ mod tests {
#[test] #[test]
fn test_empty_segment_postings() { fn test_empty_segment_postings() {
let mut postings = SegmentPostings::empty(); let mut postings = SegmentPostings::empty();
assert_eq!(postings.doc(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.doc_freq(), 0);
assert_eq!(postings.len(), 0); assert_eq!(postings.len(), 0);
} }
@@ -294,6 +377,8 @@ mod tests {
#[test] #[test]
fn test_empty_block_segment_postings() { fn test_empty_block_segment_postings() {
let mut postings = BlockSegmentPostings::empty(); let mut postings = BlockSegmentPostings::empty();
assert!(postings.docs().is_empty());
assert_eq!(postings.doc_freq(), 0);
postings.advance(); postings.advance();
assert!(postings.docs().is_empty()); assert!(postings.docs().is_empty());
assert_eq!(postings.doc_freq(), 0); assert_eq!(postings.doc_freq(), 0);
@@ -375,19 +460,21 @@ mod tests {
} }
#[test] #[test]
fn test_block_segment_postings_skip2() { fn test_block_segment_postings_seek2() {
let mut docs = vec![0]; let mut docs = vec![0];
for i in 0..1300 { for i in 0..1300 {
docs.push((i * i / 100) + i); docs.push((i * i / 100) + i);
} }
let mut block_postings = build_block_postings(&docs[..]); let mut block_postings = build_block_postings(&docs[..]);
for i in vec![0, 424, 10000] { for i in vec![0, 424, 10000] {
block_postings.seek(i); block_postings.shallow_seek(i);
block_postings.load_block();
let docs = block_postings.docs(); let docs = block_postings.docs();
assert!(docs[0] <= i); assert!(docs[0] <= i);
assert!(docs.last().cloned().unwrap_or(0u32) >= i); assert!(docs.last().cloned().unwrap_or(0u32) >= i);
} }
block_postings.seek(100_000); block_postings.shallow_seek(100_000);
block_postings.load_block();
assert_eq!(block_postings.doc(COMPRESSION_BLOCK_SIZE - 1), TERMINATED); assert_eq!(block_postings.doc(COMPRESSION_BLOCK_SIZE - 1), TERMINATED);
} }

View File

@@ -108,9 +108,8 @@ impl BlockDecoder {
self.output.0[idx] self.output.0[idx]
} }
pub fn clear(&mut self) { pub fn fill(&mut self, val: u32) {
self.output_len = 0; self.output.0.iter_mut().for_each(|el| *el = val);
self.output.0.iter_mut().for_each(|el| *el = TERMINATED);
} }
} }
@@ -245,19 +244,6 @@ pub mod tests {
} }
} }
#[test]
fn test_clearing() {
let mut encoder = BlockEncoder::new();
let vals = (0u32..128u32).map(|i| i * 3).collect::<Vec<_>>();
let (num_bits, compressed) = encoder.compress_block_sorted(&vals[..], 0u32);
let mut decoder = BlockDecoder::default();
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
assert_eq!(decoder.output_len, 128);
assert_eq!(decoder.output_array(), &vals[..]);
decoder.clear();
assert!(decoder.output_array().is_empty());
}
#[test] #[test]
fn test_encode_unsorted_block_with_junk() { fn test_encode_unsorted_block_with_junk() {
let mut compressed: Vec<u8> = Vec::new(); let mut compressed: Vec<u8> = Vec::new();

View File

@@ -73,8 +73,10 @@ pub mod tests {
let mut segment = index.new_segment(); let mut segment = index.new_segment();
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap(); let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
{ {
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap(); let mut field_serializer = posting_serializer
field_serializer.new_term("abc".as_bytes()).unwrap(); .new_field(text_field, 120 * 4, None)
.unwrap();
field_serializer.new_term("abc".as_bytes(), 12u32).unwrap();
for doc_id in 0u32..120u32 { for doc_id in 0u32..120u32 {
let delta_positions = vec![1, 2, 3, 2]; let delta_positions = vec![1, 2, 3, 2];
field_serializer field_serializer

View File

@@ -1,5 +1,6 @@
use super::stacker::{Addr, MemoryArena, TermHashMap}; use super::stacker::{Addr, MemoryArena, TermHashMap};
use crate::fieldnorm::FieldNormReaders;
use crate::postings::recorder::{ use crate::postings::recorder::{
BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder, BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder,
}; };
@@ -128,6 +129,7 @@ impl MultiFieldPostingsWriter {
pub fn serialize( pub fn serialize(
&self, &self,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
fieldnorm_readers: FieldNormReaders,
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> { ) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> = let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
self.term_index.iter().collect(); self.term_index.iter().collect();
@@ -161,8 +163,12 @@ impl MultiFieldPostingsWriter {
} }
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize]; let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
let mut field_serializer = let fieldnorm_reader = fieldnorm_readers.get_field(field);
serializer.new_field(field, postings_writer.total_num_tokens())?; let mut field_serializer = serializer.new_field(
field,
postings_writer.total_num_tokens(),
fieldnorm_reader,
)?;
postings_writer.serialize( postings_writer.serialize(
&term_offsets[start..stop], &term_offsets[start..stop],
&mut field_serializer, &mut field_serializer,
@@ -297,7 +303,8 @@ impl<Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<Rec>
let mut buffer_lender = BufferLender::default(); let mut buffer_lender = BufferLender::default();
for &(term_bytes, addr, _) in term_addrs { for &(term_bytes, addr, _) in term_addrs {
let recorder: Rec = termdict_heap.read(addr); let recorder: Rec = termdict_heap.read(addr);
serializer.new_term(&term_bytes[4..])?; let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32);
serializer.new_term(&term_bytes[4..], term_doc_freq)?;
recorder.serialize(&mut buffer_lender, serializer, heap)?; recorder.serialize(&mut buffer_lender, serializer, heap)?;
serializer.close_term()?; serializer.close_term()?;
} }

View File

@@ -75,6 +75,8 @@ pub(crate) trait Recorder: Copy + 'static {
serializer: &mut FieldSerializer<'_>, serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena, heap: &MemoryArena,
) -> io::Result<()>; ) -> io::Result<()>;
/// Returns the number of document containg this term.
fn term_doc_freq(&self) -> Option<u32>;
} }
/// Only records the doc ids /// Only records the doc ids
@@ -113,11 +115,16 @@ impl Recorder for NothingRecorder {
) -> io::Result<()> { ) -> io::Result<()> {
let buffer = buffer_lender.lend_u8(); let buffer = buffer_lender.lend_u8();
self.stack.read_to_end(heap, buffer); self.stack.read_to_end(heap, buffer);
// TODO avoid reading twice.
for doc in VInt32Reader::new(&buffer[..]) { for doc in VInt32Reader::new(&buffer[..]) {
serializer.write_doc(doc as u32, 0u32, &[][..])?; serializer.write_doc(doc as u32, 0u32, &[][..])?;
} }
Ok(()) Ok(())
} }
fn term_doc_freq(&self) -> Option<u32> {
None
}
} }
/// Recorder encoding document ids, and term frequencies /// Recorder encoding document ids, and term frequencies
@@ -126,6 +133,7 @@ pub struct TermFrequencyRecorder {
stack: ExpUnrolledLinkedList, stack: ExpUnrolledLinkedList,
current_doc: DocId, current_doc: DocId,
current_tf: u32, current_tf: u32,
term_doc_freq: u32,
} }
impl Recorder for TermFrequencyRecorder { impl Recorder for TermFrequencyRecorder {
@@ -134,6 +142,7 @@ impl Recorder for TermFrequencyRecorder {
stack: ExpUnrolledLinkedList::new(), stack: ExpUnrolledLinkedList::new(),
current_doc: u32::max_value(), current_doc: u32::max_value(),
current_tf: 0u32, current_tf: 0u32,
term_doc_freq: 0u32,
} }
} }
@@ -142,6 +151,7 @@ impl Recorder for TermFrequencyRecorder {
} }
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) { fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
self.term_doc_freq += 1;
self.current_doc = doc; self.current_doc = doc;
let _ = write_u32_vint(doc, &mut self.stack.writer(heap)); let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
} }
@@ -172,6 +182,10 @@ impl Recorder for TermFrequencyRecorder {
Ok(()) Ok(())
} }
fn term_doc_freq(&self) -> Option<u32> {
Some(self.term_doc_freq)
}
} }
/// Recorder encoding term frequencies as well as positions. /// Recorder encoding term frequencies as well as positions.
@@ -179,12 +193,14 @@ impl Recorder for TermFrequencyRecorder {
pub struct TFAndPositionRecorder { pub struct TFAndPositionRecorder {
stack: ExpUnrolledLinkedList, stack: ExpUnrolledLinkedList,
current_doc: DocId, current_doc: DocId,
term_doc_freq: u32,
} }
impl Recorder for TFAndPositionRecorder { impl Recorder for TFAndPositionRecorder {
fn new() -> Self { fn new() -> Self {
TFAndPositionRecorder { TFAndPositionRecorder {
stack: ExpUnrolledLinkedList::new(), stack: ExpUnrolledLinkedList::new(),
current_doc: u32::max_value(), current_doc: u32::max_value(),
term_doc_freq: 0u32,
} }
} }
@@ -194,6 +210,7 @@ impl Recorder for TFAndPositionRecorder {
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) { fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
self.current_doc = doc; self.current_doc = doc;
self.term_doc_freq += 1u32;
let _ = write_u32_vint(doc, &mut self.stack.writer(heap)); let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
} }
@@ -233,6 +250,10 @@ impl Recorder for TFAndPositionRecorder {
} }
Ok(()) Ok(())
} }
fn term_doc_freq(&self) -> Option<u32> {
Some(self.term_doc_freq)
}
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -13,6 +13,7 @@ use crate::schema::IndexRecordOption;
use crate::DocId; use crate::DocId;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::fieldnorm::FieldNormReader;
use crate::postings::BlockSegmentPostings; use crate::postings::BlockSegmentPostings;
/// `SegmentPostings` represents the inverted list or postings associated to /// `SegmentPostings` represents the inverted list or postings associated to
@@ -21,7 +22,7 @@ use crate::postings::BlockSegmentPostings;
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded. /// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
/// Positions on the other hand, are optionally entirely decoded upfront. /// Positions on the other hand, are optionally entirely decoded upfront.
pub struct SegmentPostings { pub struct SegmentPostings {
block_cursor: BlockSegmentPostings, pub(crate) block_cursor: BlockSegmentPostings,
cur: usize, cur: usize,
position_reader: Option<PositionReader>, position_reader: Option<PositionReader>,
block_searcher: BlockSearcher, block_searcher: BlockSearcher,
@@ -38,6 +39,12 @@ impl SegmentPostings {
} }
} }
/// Returns the overall number of documents in the block postings.
/// It does not take in account whether documents are deleted or not.
pub fn doc_freq(&self) -> u32 {
self.block_cursor.doc_freq()
}
/// Creates a segment postings object with the given documents /// Creates a segment postings object with the given documents
/// and no frequency encoded. /// and no frequency encoded.
/// ///
@@ -49,7 +56,8 @@ impl SegmentPostings {
pub fn create_from_docs(docs: &[u32]) -> SegmentPostings { pub fn create_from_docs(docs: &[u32]) -> SegmentPostings {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
{ {
let mut postings_serializer = PostingsSerializer::new(&mut buffer, false, false); let mut postings_serializer = PostingsSerializer::new(&mut buffer, false, false, None);
postings_serializer.new_term(docs.len() as u32);
for &doc in docs { for &doc in docs {
postings_serializer.write_doc(doc, 1u32); postings_serializer.write_doc(doc, 1u32);
} }
@@ -66,6 +74,29 @@ impl SegmentPostings {
SegmentPostings::from_block_postings(block_segment_postings, None) SegmentPostings::from_block_postings(block_segment_postings, None)
} }
/// Helper functions to create `SegmentPostings` for tests.
pub fn create_from_docs_and_tfs(
doc_and_tfs: &[(u32, u32)],
fieldnorm_reader: Option<FieldNormReader>,
) -> crate::Result<SegmentPostings> {
let mut buffer = Vec::new();
let mut postings_serializer =
PostingsSerializer::new(&mut buffer, true, false, fieldnorm_reader);
postings_serializer.new_term(doc_and_tfs.len() as u32);
for &(doc, tf) in doc_and_tfs {
postings_serializer.write_doc(doc, tf);
}
postings_serializer
.close_term(doc_and_tfs.len() as u32)?;
let block_segment_postings = BlockSegmentPostings::from_data(
doc_and_tfs.len() as u32,
ReadOnlySource::from(buffer),
IndexRecordOption::WithFreqs,
IndexRecordOption::WithFreqs,
);
Ok(SegmentPostings::from_block_postings(block_segment_postings, None))
}
/// Reads a Segment postings from an &[u8] /// Reads a Segment postings from an &[u8]
/// ///
/// * `len` - number of document in the posting lists. /// * `len` - number of document in the posting lists.
@@ -90,6 +121,7 @@ impl DocSet for SegmentPostings {
// next needs to be called a first time to point to the correct element. // next needs to be called a first time to point to the correct element.
#[inline] #[inline]
fn advance(&mut self) -> DocId { fn advance(&mut self) -> DocId {
assert!(self.block_cursor.block_is_loaded());
if self.cur == COMPRESSION_BLOCK_SIZE - 1 { if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
self.cur = 0; self.cur = 0;
self.block_cursor.advance(); self.block_cursor.advance();
@@ -141,7 +173,7 @@ impl DocSet for SegmentPostings {
impl HasLen for SegmentPostings { impl HasLen for SegmentPostings {
fn len(&self) -> usize { fn len(&self) -> usize {
self.block_cursor.doc_freq() self.block_cursor.doc_freq() as usize
} }
} }

View File

@@ -3,13 +3,16 @@ use crate::common::{BinarySerializable, VInt};
use crate::common::{CompositeWrite, CountingWriter}; use crate::common::{CompositeWrite, CountingWriter};
use crate::core::Segment; use crate::core::Segment;
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::fieldnorm::FieldNormReader;
use crate::positions::PositionSerializer; use crate::positions::PositionSerializer;
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE}; use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
use crate::postings::skip::SkipSerializer; use crate::postings::skip::SkipSerializer;
use crate::query::BM25Weight;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType}; use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal}; use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::DocId; use crate::DocId;
use std::cmp::Ordering;
use std::io::{self, Write}; use std::io::{self, Write};
/// `InvertedIndexSerializer` is in charge of serializing /// `InvertedIndexSerializer` is in charge of serializing
@@ -89,6 +92,7 @@ impl InvertedIndexSerializer {
&mut self, &mut self,
field: Field, field: Field,
total_num_tokens: u64, total_num_tokens: u64,
fieldnorm_reader: Option<FieldNormReader>,
) -> io::Result<FieldSerializer<'_>> { ) -> io::Result<FieldSerializer<'_>> {
let field_entry: &FieldEntry = self.schema.get_field_entry(field); let field_entry: &FieldEntry = self.schema.get_field_entry(field);
let term_dictionary_write = self.terms_write.for_field(field); let term_dictionary_write = self.terms_write.for_field(field);
@@ -103,6 +107,7 @@ impl InvertedIndexSerializer {
postings_write, postings_write,
positions_write, positions_write,
positionsidx_write, positionsidx_write,
fieldnorm_reader,
) )
} }
@@ -134,6 +139,7 @@ impl<'a> FieldSerializer<'a> {
postings_write: &'a mut CountingWriter<WritePtr>, postings_write: &'a mut CountingWriter<WritePtr>,
positions_write: &'a mut CountingWriter<WritePtr>, positions_write: &'a mut CountingWriter<WritePtr>,
positionsidx_write: &'a mut CountingWriter<WritePtr>, positionsidx_write: &'a mut CountingWriter<WritePtr>,
fieldnorm_reader: Option<FieldNormReader>,
) -> io::Result<FieldSerializer<'a>> { ) -> io::Result<FieldSerializer<'a>> {
let (term_freq_enabled, position_enabled): (bool, bool) = match field_type { let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
FieldType::Str(ref text_options) => { FieldType::Str(ref text_options) => {
@@ -147,8 +153,12 @@ impl<'a> FieldSerializer<'a> {
_ => (false, false), _ => (false, false),
}; };
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?; let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
let postings_serializer = let postings_serializer = PostingsSerializer::new(
PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled); postings_write,
term_freq_enabled,
position_enabled,
fieldnorm_reader,
);
let positions_serializer_opt = if position_enabled { let positions_serializer_opt = if position_enabled {
Some(PositionSerializer::new(positions_write, positionsidx_write)) Some(PositionSerializer::new(positions_write, positionsidx_write))
} else { } else {
@@ -181,8 +191,8 @@ impl<'a> FieldSerializer<'a> {
/// Starts the postings for a new term. /// Starts the postings for a new term.
/// * term - the term. It needs to come after the previous term according /// * term - the term. It needs to come after the previous term according
/// to the lexicographical order. /// to the lexicographical order.
/// * doc_freq - return the number of document containing the term. /// * term_doc_freq - return the number of document containing the term.
pub fn new_term(&mut self, term: &[u8]) -> io::Result<TermOrdinal> { pub fn new_term(&mut self, term: &[u8], term_doc_freq: u32) -> io::Result<TermOrdinal> {
assert!( assert!(
!self.term_open, !self.term_open,
"Called new_term, while the previous term was not closed." "Called new_term, while the previous term was not closed."
@@ -193,6 +203,7 @@ impl<'a> FieldSerializer<'a> {
self.term_dictionary_builder.insert_key(term)?; self.term_dictionary_builder.insert_key(term)?;
let term_ordinal = self.num_terms; let term_ordinal = self.num_terms;
self.num_terms += 1; self.num_terms += 1;
self.postings_serializer.new_term(term_doc_freq);
Ok(term_ordinal) Ok(term_ordinal)
} }
@@ -306,6 +317,21 @@ pub struct PostingsSerializer<W: Write> {
termfreq_enabled: bool, termfreq_enabled: bool,
termfreq_sum_enabled: bool, termfreq_sum_enabled: bool,
fieldnorm_reader: Option<FieldNormReader>,
bm25_weight: Option<BM25Weight>,
num_docs: u32, // Number of docs in the segment
avg_fieldnorm: f32, // Average number of term in the field for that segment.
// this value is used to compute the block wand information.
}
fn get_avg_fieldnorm(fieldnorm_reader: &FieldNormReader) -> f32 {
let num_docs = fieldnorm_reader.num_docs();
let sum_fieldnorm: f32 = (0u32..num_docs)
.map(|doc| fieldnorm_reader.fieldnorm(doc) as f32)
.sum();
sum_fieldnorm / (num_docs as f32)
} }
impl<W: Write> PostingsSerializer<W> { impl<W: Write> PostingsSerializer<W> {
@@ -313,7 +339,16 @@ impl<W: Write> PostingsSerializer<W> {
write: W, write: W,
termfreq_enabled: bool, termfreq_enabled: bool,
termfreq_sum_enabled: bool, termfreq_sum_enabled: bool,
fieldnorm_reader: Option<FieldNormReader>,
) -> PostingsSerializer<W> { ) -> PostingsSerializer<W> {
let avg_fieldnorm: f32 = fieldnorm_reader
.as_ref()
.map(get_avg_fieldnorm)
.unwrap_or(0f32);
let num_docs = fieldnorm_reader
.as_ref()
.map(|fieldnorm_reader| fieldnorm_reader.num_docs())
.unwrap_or(0u32);
PostingsSerializer { PostingsSerializer {
output_write: CountingWriter::wrap(write), output_write: CountingWriter::wrap(write),
@@ -326,6 +361,23 @@ impl<W: Write> PostingsSerializer<W> {
last_doc_id_encoded: 0u32, last_doc_id_encoded: 0u32,
termfreq_enabled, termfreq_enabled,
termfreq_sum_enabled, termfreq_sum_enabled,
fieldnorm_reader,
bm25_weight: None,
num_docs,
avg_fieldnorm,
}
}
pub fn new_term(&mut self, term_doc_freq: u32) {
if self.termfreq_enabled && self.num_docs > 0 {
let bm25_weight = BM25Weight::for_one_term(
term_doc_freq as u64,
self.num_docs as u64,
self.avg_fieldnorm,
);
self.bm25_weight = Some(bm25_weight);
} }
} }
@@ -342,7 +394,6 @@ impl<W: Write> PostingsSerializer<W> {
self.postings_write.extend(block_encoded); self.postings_write.extend(block_encoded);
} }
if self.termfreq_enabled { if self.termfreq_enabled {
// encode the term_freqs
let (num_bits, block_encoded): (u8, &[u8]) = self let (num_bits, block_encoded): (u8, &[u8]) = self
.block_encoder .block_encoder
.compress_block_unsorted(&self.block.term_freqs()); .compress_block_unsorted(&self.block.term_freqs());
@@ -352,6 +403,32 @@ impl<W: Write> PostingsSerializer<W> {
let sum_freq = self.block.term_freqs().iter().cloned().sum(); let sum_freq = self.block.term_freqs().iter().cloned().sum();
self.skip_write.write_total_term_freq(sum_freq); self.skip_write.write_total_term_freq(sum_freq);
} }
let mut blockwand_params_opt = None;
if let Some(bm25_weight) = self.bm25_weight.as_ref() {
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
let docs = self.block.doc_ids();
let term_freqs = self.block.term_freqs();
blockwand_params_opt = docs
.iter()
.cloned()
.map(|doc| fieldnorm_reader.fieldnorm_id(doc))
.zip(term_freqs.iter().cloned())
.max_by(
|(left_fieldnorm_id, left_term_freq),
(right_fieldnorm_id, right_term_freq)| {
let left_score =
bm25_weight.tf_factor(*left_fieldnorm_id, *left_term_freq);
let right_score =
bm25_weight.tf_factor(*right_fieldnorm_id, *right_term_freq);
left_score
.partial_cmp(&right_score)
.unwrap_or(Ordering::Equal)
},
);
}
}
let (fieldnorm_id, term_freq) = blockwand_params_opt.unwrap_or((0u8, 0u32));
self.skip_write.write_blockwand_max(fieldnorm_id, term_freq);
} }
self.block.clear(); self.block.clear();
} }
@@ -400,6 +477,7 @@ impl<W: Write> PostingsSerializer<W> {
} }
self.skip_write.clear(); self.skip_write.clear();
self.postings_write.clear(); self.postings_write.clear();
self.bm25_weight = None;
Ok(()) Ok(())
} }

View File

@@ -1,8 +1,9 @@
use crate::common::BinarySerializable; use crate::common::{BinarySerializable, VInt};
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE}; use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
use crate::query::BM25Weight;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::{DocId, TERMINATED}; use crate::{DocId, Score, TERMINATED};
use owned_read::OwnedRead; use owned_read::OwnedRead;
pub struct SkipSerializer { pub struct SkipSerializer {
@@ -40,6 +41,11 @@ impl SkipSerializer {
.expect("Should never fail"); .expect("Should never fail");
} }
pub fn write_blockwand_max(&mut self, fieldnorm_id: u8, term_freq: u32) {
self.buffer.push(fieldnorm_id);
VInt(term_freq as u64).serialize_into_vec(&mut self.buffer);
}
pub fn data(&self) -> &[u8] { pub fn data(&self) -> &[u8] {
&self.buffer[..] &self.buffer[..]
} }
@@ -63,19 +69,23 @@ pub(crate) struct SkipReader {
position_offset: u64, position_offset: u64,
} }
#[derive(Clone, Eq, PartialEq, Copy, Debug)] #[derive(Clone, Copy, Debug)]
pub(crate) enum BlockInfo { pub(crate) enum BlockInfo {
BitPacked { BitPacked {
doc_num_bits: u8, doc_num_bits: u8,
tf_num_bits: u8, tf_num_bits: u8,
tf_sum: u32, tf_sum: u32,
block_wand_fieldnorm_id: u8,
block_wand_term_freq: u32,
},
VInt {
num_docs: u32,
}, },
VInt(u32),
} }
impl Default for BlockInfo { impl Default for BlockInfo {
fn default() -> Self { fn default() -> Self {
BlockInfo::VInt(0) BlockInfo::VInt { num_docs: 0u32 }
} }
} }
@@ -90,7 +100,7 @@ impl SkipReader {
last_doc_in_previous_block: 0u32, last_doc_in_previous_block: 0u32,
owned_read: OwnedRead::new(data), owned_read: OwnedRead::new(data),
skip_info, skip_info,
block_info: BlockInfo::VInt(doc_freq), block_info: BlockInfo::VInt { num_docs: doc_freq },
byte_offset: 0, byte_offset: 0,
remaining_docs: doc_freq, remaining_docs: doc_freq,
position_offset: 0u64, position_offset: 0u64,
@@ -109,7 +119,7 @@ impl SkipReader {
}; };
self.last_doc_in_previous_block = 0u32; self.last_doc_in_previous_block = 0u32;
self.owned_read = OwnedRead::new(data); self.owned_read = OwnedRead::new(data);
self.block_info = BlockInfo::VInt(doc_freq); self.block_info = BlockInfo::VInt { num_docs: doc_freq };
self.byte_offset = 0; self.byte_offset = 0;
self.remaining_docs = doc_freq; self.remaining_docs = doc_freq;
self.position_offset = 0u64; self.position_offset = 0u64;
@@ -118,8 +128,17 @@ impl SkipReader {
} }
} }
#[cfg(test)] pub fn block_max_score(&self, bm25_weight: &BM25Weight) -> Option<Score> {
#[inline(always)] match self.block_info {
BlockInfo::BitPacked {
block_wand_fieldnorm_id,
block_wand_term_freq,
..
} => Some(bm25_weight.score(block_wand_fieldnorm_id, block_wand_term_freq)),
BlockInfo::VInt { .. } => None,
}
}
pub(crate) fn last_doc_in_block(&self) -> DocId { pub(crate) fn last_doc_in_block(&self) -> DocId {
self.last_doc_in_block self.last_doc_in_block
} }
@@ -143,25 +162,38 @@ impl SkipReader {
doc_num_bits, doc_num_bits,
tf_num_bits: 0, tf_num_bits: 0,
tf_sum: 0, tf_sum: 0,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0,
}; };
} }
IndexRecordOption::WithFreqs => { IndexRecordOption::WithFreqs => {
let tf_num_bits = self.owned_read.get(1); let tf_num_bits = self.owned_read.get(1);
let block_wand_fieldnorm_id = self.owned_read.get(2);
self.owned_read.advance(3);
let block_wand_term_freq =
VInt::deserialize_u64(&mut self.owned_read).unwrap() as u32;
self.block_info = BlockInfo::BitPacked { self.block_info = BlockInfo::BitPacked {
doc_num_bits, doc_num_bits,
tf_num_bits, tf_num_bits,
tf_sum: 0, tf_sum: 0,
block_wand_fieldnorm_id,
block_wand_term_freq,
}; };
self.owned_read.advance(2);
} }
IndexRecordOption::WithFreqsAndPositions => { IndexRecordOption::WithFreqsAndPositions => {
let tf_num_bits = self.owned_read.get(1); let tf_num_bits = self.owned_read.get(1);
self.owned_read.advance(2); self.owned_read.advance(2);
let tf_sum = u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum"); let tf_sum = u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
let block_wand_fieldnorm_id = self.owned_read.get(0);
self.owned_read.advance(1);
let block_wand_term_freq =
VInt::deserialize_u64(&mut self.owned_read).unwrap() as u32;
self.block_info = BlockInfo::BitPacked { self.block_info = BlockInfo::BitPacked {
doc_num_bits, doc_num_bits,
tf_num_bits, tf_num_bits,
tf_sum, tf_sum,
block_wand_fieldnorm_id,
block_wand_term_freq,
}; };
} }
} }
@@ -176,7 +208,7 @@ impl SkipReader {
/// If the target is larger than all documents, the skip_reader /// If the target is larger than all documents, the skip_reader
/// then advance to the last Variable In block. /// then advance to the last Variable In block.
pub fn seek(&mut self, target: DocId) { pub fn seek(&mut self, target: DocId) {
while self.last_doc_in_block < target { while self.last_doc_in_block() < target {
self.advance(); self.advance();
} }
} }
@@ -187,13 +219,14 @@ impl SkipReader {
doc_num_bits, doc_num_bits,
tf_num_bits, tf_num_bits,
tf_sum, tf_sum,
..
} => { } => {
self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32; self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32;
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits); self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
self.position_offset += tf_sum as u64; self.position_offset += tf_sum as u64;
} }
BlockInfo::VInt(num_vint_docs) => { BlockInfo::VInt { num_docs} => {
debug_assert_eq!(num_vint_docs, self.remaining_docs); debug_assert_eq!(num_docs, self.remaining_docs);
self.remaining_docs = 0; self.remaining_docs = 0;
self.byte_offset = std::usize::MAX; self.byte_offset = std::usize::MAX;
} }
@@ -203,7 +236,7 @@ impl SkipReader {
self.read_block_info(); self.read_block_info();
} else { } else {
self.last_doc_in_block = TERMINATED; self.last_doc_in_block = TERMINATED;
self.block_info = BlockInfo::VInt(self.remaining_docs); self.block_info = BlockInfo::VInt { num_docs: self.remaining_docs };
} }
} }
} }
@@ -223,8 +256,10 @@ mod tests {
let mut skip_serializer = SkipSerializer::new(); let mut skip_serializer = SkipSerializer::new();
skip_serializer.write_doc(1u32, 2u8); skip_serializer.write_doc(1u32, 2u8);
skip_serializer.write_term_freq(3u8); skip_serializer.write_term_freq(3u8);
skip_serializer.write_blockwand_max(13u8, 3u32);
skip_serializer.write_doc(5u32, 5u8); skip_serializer.write_doc(5u32, 5u8);
skip_serializer.write_term_freq(2u8); skip_serializer.write_term_freq(2u8);
skip_serializer.write_blockwand_max(8u8, 2u32);
skip_serializer.data().to_owned() skip_serializer.data().to_owned()
}; };
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32; let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
@@ -234,30 +269,34 @@ mod tests {
IndexRecordOption::WithFreqs, IndexRecordOption::WithFreqs,
); );
assert_eq!(skip_reader.last_doc_in_block(), 1u32); assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!( assert!(matches!(
skip_reader.block_info(), skip_reader.block_info,
BlockInfo::BitPacked { BlockInfo::BitPacked {
doc_num_bits: 2u8, doc_num_bits: 2u8,
tf_num_bits: 3u8, tf_num_bits: 3u8,
tf_sum: 0 tf_sum: 0,
block_wand_fieldnorm_id: 13,
block_wand_term_freq: 3
} }
); ));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.last_doc_in_block(), 5u32); assert_eq!(skip_reader.last_doc_in_block(), 5u32);
assert_eq!( assert!(matches!(
skip_reader.block_info(), skip_reader.block_info(),
BlockInfo::BitPacked { BlockInfo::BitPacked {
doc_num_bits: 5u8, doc_num_bits: 5u8,
tf_num_bits: 2u8, tf_num_bits: 2u8,
tf_sum: 0 tf_sum: 0,
block_wand_fieldnorm_id: 8,
block_wand_term_freq: 2
} }
); ));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32)); assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 }));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32)); assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32)); assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
} }
#[test] #[test]
@@ -275,30 +314,34 @@ mod tests {
IndexRecordOption::Basic, IndexRecordOption::Basic,
); );
assert_eq!(skip_reader.last_doc_in_block(), 1u32); assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!( assert!(matches!(
skip_reader.block_info(), skip_reader.block_info(),
BlockInfo::BitPacked { BlockInfo::BitPacked {
doc_num_bits: 2u8, doc_num_bits: 2u8,
tf_num_bits: 0, tf_num_bits: 0,
tf_sum: 0u32 tf_sum: 0u32,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
} }
); ));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.last_doc_in_block(), 5u32); assert_eq!(skip_reader.last_doc_in_block(), 5u32);
assert_eq!( assert!(matches!(
skip_reader.block_info(), skip_reader.block_info(),
BlockInfo::BitPacked { BlockInfo::BitPacked {
doc_num_bits: 5u8, doc_num_bits: 5u8,
tf_num_bits: 0, tf_num_bits: 0,
tf_sum: 0u32 tf_sum: 0u32,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
} }
); ));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32)); assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 }));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32)); assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32)); assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
} }
#[test] #[test]
@@ -315,15 +358,17 @@ mod tests {
IndexRecordOption::Basic, IndexRecordOption::Basic,
); );
assert_eq!(skip_reader.last_doc_in_block(), 1u32); assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!( assert!(matches!(
skip_reader.block_info(), skip_reader.block_info(),
BlockInfo::BitPacked { BlockInfo::BitPacked {
doc_num_bits: 2u8, doc_num_bits: 2u8,
tf_num_bits: 0, tf_num_bits: 0,
tf_sum: 0u32 tf_sum: 0u32,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
} }
); ));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32)); assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
} }
} }

View File

@@ -3,11 +3,14 @@ use crate::query::Explanation;
use crate::Score; use crate::Score;
use crate::Searcher; use crate::Searcher;
use crate::Term; use crate::Term;
use serde::Deserialize;
use serde::Serialize;
const K1: f32 = 1.2; const K1: f32 = 1.2;
const B: f32 = 0.75; const B: f32 = 0.75;
fn idf(doc_freq: u64, doc_count: u64) -> f32 { fn idf(doc_freq: u64, doc_count: u64) -> f32 {
assert!(doc_count >= doc_freq, "{} >= {}", doc_count, doc_freq);
let x = ((doc_count - doc_freq) as f32 + 0.5) / (doc_freq as f32 + 0.5); let x = ((doc_count - doc_freq) as f32 + 0.5) / (doc_freq as f32 + 0.5);
(1f32 + x).ln() (1f32 + x).ln()
} }
@@ -25,6 +28,12 @@ fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
cache cache
} }
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct BM25Params {
pub idf: f32,
pub avg_fieldnorm: f32,
}
pub struct BM25Weight { pub struct BM25Weight {
idf_explain: Explanation, idf_explain: Explanation,
weight: f32, weight: f32,
@@ -62,17 +71,9 @@ impl BM25Weight {
} }
let average_fieldnorm = total_num_tokens as f32 / total_num_docs as f32; let average_fieldnorm = total_num_tokens as f32 / total_num_docs as f32;
let mut idf_explain: Explanation;
if terms.len() == 1 { if terms.len() == 1 {
let term_doc_freq = searcher.doc_freq(&terms[0]); let term_doc_freq = searcher.doc_freq(&terms[0]);
let idf = idf(term_doc_freq, total_num_docs); BM25Weight::for_one_term(term_doc_freq, total_num_docs, average_fieldnorm)
idf_explain =
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
idf_explain.add_const(
"n, number of docs containing this term",
term_doc_freq as f32,
);
idf_explain.add_const("N, total number of docs", total_num_docs as f32);
} else { } else {
let idf = terms let idf = terms
.iter() .iter()
@@ -81,9 +82,21 @@ impl BM25Weight {
idf(term_doc_freq, total_num_docs) idf(term_doc_freq, total_num_docs)
}) })
.sum::<f32>(); .sum::<f32>();
idf_explain = Explanation::new("idf", idf); let idf_explain = Explanation::new("idf", idf);
BM25Weight::new(idf_explain, average_fieldnorm)
} }
BM25Weight::new(idf_explain, average_fieldnorm) }
pub fn for_one_term(term_doc_freq: u64, total_num_docs: u64, avg_fieldnorm: f32) -> BM25Weight {
let idf = idf(term_doc_freq, total_num_docs);
let mut idf_explain =
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
idf_explain.add_const(
"n, number of docs containing this term",
term_doc_freq as f32,
);
idf_explain.add_const("N, total number of docs", total_num_docs as f32);
BM25Weight::new(idf_explain, avg_fieldnorm)
} }
fn new(idf_explain: Explanation, average_fieldnorm: f32) -> BM25Weight { fn new(idf_explain: Explanation, average_fieldnorm: f32) -> BM25Weight {
@@ -98,15 +111,23 @@ impl BM25Weight {
#[inline(always)] #[inline(always)]
pub fn score(&self, fieldnorm_id: u8, term_freq: u32) -> Score { pub fn score(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
let norm = self.cache[fieldnorm_id as usize]; self.weight * self.tf_factor(fieldnorm_id, term_freq)
}
pub fn max_score(&self) -> Score {
self.score(255u8, 2_013_265_944)
}
#[inline(always)]
pub(crate) fn tf_factor(&self, fieldnorm_id: u8, term_freq: u32) -> f32 {
let term_freq = term_freq as f32; let term_freq = term_freq as f32;
self.weight * term_freq / (term_freq + norm) let norm = self.cache[fieldnorm_id as usize];
term_freq / (term_freq + norm)
} }
pub fn explain(&self, fieldnorm_id: u8, term_freq: u32) -> Explanation { pub fn explain(&self, fieldnorm_id: u8, term_freq: u32) -> Explanation {
// The explain format is directly copied from Lucene's. // The explain format is directly copied from Lucene's.
// (So, Kudos to Lucene) // (So, Kudos to Lucene)
let score = self.score(fieldnorm_id, term_freq); let score = self.score(fieldnorm_id, term_freq);
let norm = self.cache[fieldnorm_id as usize]; let norm = self.cache[fieldnorm_id as usize];
@@ -139,10 +160,10 @@ impl BM25Weight {
mod tests { mod tests {
use super::idf; use super::idf;
use crate::tests::assert_nearly_equals; use crate::assert_nearly_equals;
#[test] #[test]
fn test_idf() { fn test_idf() {
assert_nearly_equals(idf(1, 2), 0.6931472); assert_nearly_equals!(idf(1, 2), std::f32::consts::LN_2);
} }
} }

View File

@@ -0,0 +1,206 @@
use crate::query::term_query::TermScorer;
use crate::query::Scorer;
use crate::{DocId, DocSet, Score, TERMINATED};
use std::ops::DerefMut;
use std::ops::Deref;
/// Takes a term_scorers sorted by their current doc() and a threshold and returns
/// Returns (pivot_len, pivot_ord) defined as follows:
/// - `pivot_doc` lowest document that has a chance of exceeding (>) the threshold score.
/// - `before_pivot_len` number of term_scorers such that term_scorer.doc() < pivot.
/// - `pivot_len` number of term_scorers such that term_scorer.doc() <= pivot.
///
/// We always have `before_pivot_len` < `pivot_len`.
///
/// None is returned if we establish that no document can exceed the threshold.
fn find_pivot_doc(term_scorers: &[TermScorerWithMaxScore], threshold: f32) -> Option<(usize, usize, DocId)> {
let mut max_score = 0.0f32;
let mut before_pivot_len = 0;
let mut pivot_doc = TERMINATED;
while before_pivot_len < term_scorers.len() {
let term_scorer = &term_scorers[before_pivot_len];
max_score += term_scorer.max_score;
if max_score > threshold {
pivot_doc = term_scorer.doc();
break;
}
before_pivot_len += 1;
}
if pivot_doc == TERMINATED {
return None;
}
// Right now i is an ordinal, we want a len.
let mut pivot_len = before_pivot_len + 1;
// Some other term_scorer may be positioned on the same document.
pivot_len += term_scorers[pivot_len..].iter()
.take_while(|term_scorer| term_scorer.doc() == pivot_doc)
.count();
Some((before_pivot_len, pivot_len, pivot_doc))
}
struct TermScorerWithMaxScore<'a> {
scorer: &'a mut TermScorer,
max_score: f32,
}
impl<'a> From<&'a mut TermScorer> for TermScorerWithMaxScore<'a> {
fn from(scorer: &'a mut TermScorer) -> Self {
let max_score = scorer.max_score();
TermScorerWithMaxScore {
scorer,
max_score
}
}
}
impl<'a> Deref for TermScorerWithMaxScore<'a> {
type Target = TermScorer;
fn deref(&self) -> &Self::Target {
self.scorer
}
}
impl<'a> DerefMut for TermScorerWithMaxScore<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.scorer
}
}
// Before and after calling this method, scorers need to be sorted by their `.doc()`.
fn block_max_was_too_low_advance_one_scorer(scorers: &mut Vec<TermScorerWithMaxScore>, pivot_len: usize) {
let mut scorer_to_seek = pivot_len - 1;
let mut doc_to_seek_after = scorers[scorer_to_seek].doc();
for scorer_ord in (0..pivot_len - 1).rev() {
let scorer = &scorers[scorer_ord];
if scorer.last_doc_in_block() <= doc_to_seek_after {
doc_to_seek_after = scorer.last_doc_in_block();
scorer_to_seek = scorer_ord;
}
}
for scorer in &scorers[pivot_len..] {
if scorer.doc() <= doc_to_seek_after {
doc_to_seek_after = scorer.doc();
}
}
scorers[scorer_to_seek].seek(doc_to_seek_after + 1);
restore_ordering(scorers, scorer_to_seek);
}
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
// except term_scorers[ord] that might be in advance compared to its ranks,
// bubble up term_scorers[ord] in order to restore the ordering.
fn restore_ordering(term_scorers: &mut Vec<TermScorerWithMaxScore>, ord: usize) {
let doc = term_scorers[ord].doc();
for i in ord + 1..term_scorers.len() {
if term_scorers[i].doc() >= doc {
break;
}
term_scorers.swap(i, i - 1);
}
}
// Attempts to advance all term_scorers between `&term_scorers[0..before_len]` to the pivot.
// If this works, return true.
// If this fails (ie: one of the term_scorer does not contain `pivot_doc` and seek goes past the
// pivot), reorder the term_scorers to ensure the list is still sorted and returns `false`.
// If a term_scorer reach TERMINATED in the process return false remove the term_scorer and return.
fn align_scorers(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_doc: DocId, before_pivot_len: usize) -> bool {
debug_assert_ne!(pivot_doc, TERMINATED);
for i in (0..before_pivot_len).rev() {
let new_doc = term_scorers[i].seek(pivot_doc);
if new_doc != pivot_doc {
if new_doc == TERMINATED {
term_scorers.swap_remove(i);
}
// We went past the pivot.
// We just go through the outer loop mechanic (Note that pivot is
// still a possible candidate).
//
// Termination is still guaranteed since we can only consider the same
// pivot at most term_scorers.len() - 1 times.
restore_ordering(term_scorers, i);
return false;
}
}
return true;
}
// Assumes terms_scorers[..pivot_len] are positioned on the same doc (pivot_doc).
// Advance term_scorers[..pivot_len] and out of these removes the terminated scores.
// Restores the ordering of term_scorers.
fn advance_all_scorers_on_pivot(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_len: usize) {
let mut i = 0;
for _ in 0..pivot_len {
if term_scorers[i].advance() == TERMINATED {
term_scorers.swap_remove(i);
} else {
i += 1;
}
}
term_scorers.sort_by_key(|scorer| scorer.doc());
}
pub fn block_wand(
mut scorers: Vec<TermScorer>,
mut threshold: f32,
callback: &mut dyn FnMut(u32, Score) -> Score,
) {
let mut scorers: Vec<TermScorerWithMaxScore> = scorers.iter_mut().map(TermScorerWithMaxScore::from).collect();
scorers.sort_by_key(|scorer| scorer.doc());
loop {
// At this point we need to ensure that the scorers are sorted!
if let Some((before_pivot_len, pivot_len, pivot_doc)) = find_pivot_doc(&scorers[..], threshold) {
debug_assert_ne!(pivot_doc, TERMINATED);
debug_assert!(before_pivot_len < pivot_len);
let block_max_score_upperbound: Score = scorers[..pivot_len].iter_mut()
.map(|scorer| {
scorer.shallow_seek(pivot_doc);
scorer.block_max_score()
})
.sum();
// Beware after shallow advance, skip readers can be in advance compared to
// the segment posting lists.
//
// `block_segment_postings.load_block()` need to be called separately.
if block_max_score_upperbound <= threshold {
// Block max condition was not reached.
// We could get away by simply advancing the scorers to DocId + 1 but it would
// be inefficient. The optimization requires proper explanation and was
// isolated in a different function.
block_max_was_too_low_advance_one_scorer(&mut scorers, pivot_len);
continue;
}
// Block max condition is observed.
//
// Let's try and advance all scorers before the pivot to the pivot.
if !align_scorers(&mut scorers, pivot_doc, before_pivot_len) {
// At least of the scorer does not contain the pivot.
//
// Let's stop scoring this pivot and go through the pivot selection again.
// Note that the current pivot is not necessarily a bad candidate and it
// may be picked again.
continue;
}
// At this point, all scorers are positioned on the doc.
let score = scorers[..pivot_len]
.iter_mut()
.map(|scorer| scorer.score())
.sum();
if score > threshold {
threshold = callback(pivot_doc, score);
}
// let's advance all of the scorers that are currently positioned on the pivot.
advance_all_scorers_on_pivot(&mut scorers, pivot_len);
} else {
return;
}
}
}

View File

@@ -1,4 +1,5 @@
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::postings::FreqReadingOption;
use crate::query::explanation::does_not_match; use crate::query::explanation::does_not_match;
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner}; use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
use crate::query::term_query::TermScorer; use crate::query::term_query::TermScorer;
@@ -14,12 +15,12 @@ use crate::query::{intersect_scorers, Explanation};
use crate::{DocId, Score}; use crate::{DocId, Score};
use std::collections::HashMap; use std::collections::HashMap;
enum SpecializedScorer<TScoreCombiner: ScoreCombiner> { enum SpecializedScorer {
TermUnion(Union<TermScorer, TScoreCombiner>), TermUnion(Vec<TermScorer>),
Other(Box<dyn Scorer>), Other(Box<dyn Scorer>),
} }
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer<TScoreCombiner> fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer
where where
TScoreCombiner: ScoreCombiner, TScoreCombiner: ScoreCombiner,
{ {
@@ -35,20 +36,29 @@ where
.into_iter() .into_iter()
.map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap())) .map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap()))
.collect(); .collect();
return SpecializedScorer::TermUnion(Union::<TermScorer, TScoreCombiner>::from( if scorers
scorers, .iter()
)); .all(|scorer| scorer.freq_reading_option() == FreqReadingOption::ReadFreq)
{
// Block wand is only available iff we read frequencies.
return SpecializedScorer::TermUnion(scorers);
} else {
return SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(
scorers,
)));
}
} }
} }
SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(scorers))) SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(scorers)))
} }
impl<TScoreCombiner: ScoreCombiner> Into<Box<dyn Scorer>> for SpecializedScorer<TScoreCombiner> { fn into_box_scorer<TScoreCombiner: ScoreCombiner>(scorer: SpecializedScorer) -> Box<dyn Scorer> {
fn into(self) -> Box<dyn Scorer> { match scorer {
match self { SpecializedScorer::TermUnion(term_scorers) => {
Self::TermUnion(union) => Box::new(union), let union_scorer = Union::<TermScorer, TScoreCombiner>::from(term_scorers);
Self::Other(scorer) => scorer, Box::new(union_scorer)
} }
SpecializedScorer::Other(scorer) => scorer,
} }
} }
@@ -85,46 +95,47 @@ impl BooleanWeight {
&self, &self,
reader: &SegmentReader, reader: &SegmentReader,
boost: f32, boost: f32,
) -> crate::Result<SpecializedScorer<TScoreCombiner>> { ) -> crate::Result<SpecializedScorer> {
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?; let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
let should_scorer_opt: Option<SpecializedScorer<TScoreCombiner>> = per_occur_scorers let should_scorer_opt: Option<SpecializedScorer> = per_occur_scorers
.remove(&Occur::Should) .remove(&Occur::Should)
.map(scorer_union::<TScoreCombiner>); .map(scorer_union::<TScoreCombiner>);
let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::MustNot) .remove(&Occur::MustNot)
.map(scorer_union::<DoNothingCombiner>) .map(scorer_union::<DoNothingCombiner>)
.map(Into::into); .map(into_box_scorer::<DoNothingCombiner>);
let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::Must) .remove(&Occur::Must)
.map(intersect_scorers); .map(intersect_scorers);
let positive_scorer: SpecializedScorer<TScoreCombiner> = let positive_scorer: SpecializedScorer = match (should_scorer_opt, must_scorer_opt) {
match (should_scorer_opt, must_scorer_opt) { (Some(should_scorer), Some(must_scorer)) => {
(Some(should_scorer), Some(must_scorer)) => { if self.scoring_enabled {
if self.scoring_enabled { SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::< Box<dyn Scorer>,
Box<dyn Scorer>, Box<dyn Scorer>,
Box<dyn Scorer>, TScoreCombiner,
TScoreCombiner, >::new(
>::new( must_scorer,
must_scorer, should_scorer.into() into_box_scorer::<TScoreCombiner>(should_scorer),
))) )))
} else { } else {
SpecializedScorer::Other(must_scorer) SpecializedScorer::Other(must_scorer)
}
} }
(None, Some(must_scorer)) => SpecializedScorer::Other(must_scorer), }
(Some(should_scorer), None) => should_scorer, (None, Some(must_scorer)) => SpecializedScorer::Other(must_scorer),
(None, None) => { (Some(should_scorer), None) => should_scorer,
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer))); (None, None) => {
} return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
}; }
};
if let Some(exclude_scorer) = exclude_scorer_opt { if let Some(exclude_scorer) = exclude_scorer_opt {
let positive_scorer_boxed: Box<dyn Scorer> = positive_scorer.into(); let positive_scorer_boxed: Box<dyn Scorer> =
into_box_scorer::<TScoreCombiner>(positive_scorer);
Ok(SpecializedScorer::Other(Box::new(Exclude::new( Ok(SpecializedScorer::Other(Box::new(Exclude::new(
positive_scorer_boxed, positive_scorer_boxed,
exclude_scorer, exclude_scorer,
@@ -148,10 +159,12 @@ impl Weight for BooleanWeight {
} }
} else if self.scoring_enabled { } else if self.scoring_enabled {
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost) self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
.map(Into::into) .map(|specialized_scorer| {
into_box_scorer::<SumWithCoordsCombiner>(specialized_scorer)
})
} else { } else {
self.complex_scorer::<DoNothingCombiner>(reader, boost) self.complex_scorer::<DoNothingCombiner>(reader, boost)
.map(Into::into) .map(into_box_scorer::<DoNothingCombiner>)
} }
} }
@@ -182,7 +195,9 @@ impl Weight for BooleanWeight {
) -> crate::Result<()> { ) -> crate::Result<()> {
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?; let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
match scorer { match scorer {
SpecializedScorer::TermUnion(mut union_scorer) => { SpecializedScorer::TermUnion(term_scorers) => {
let mut union_scorer =
Union::<TermScorer, SumWithCoordsCombiner>::from(term_scorers);
for_each_scorer(&mut union_scorer, callback); for_each_scorer(&mut union_scorer, callback);
} }
SpecializedScorer::Other(mut scorer) => { SpecializedScorer::Other(mut scorer) => {
@@ -210,8 +225,8 @@ impl Weight for BooleanWeight {
) -> crate::Result<()> { ) -> crate::Result<()> {
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?; let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
match scorer { match scorer {
SpecializedScorer::TermUnion(mut union_scorer) => { SpecializedScorer::TermUnion(term_scorers) => {
for_each_pruning_scorer(&mut union_scorer, threshold, callback); super::block_wand(term_scorers, threshold, callback);
} }
SpecializedScorer::Other(mut scorer) => { SpecializedScorer::Other(mut scorer) => {
for_each_pruning_scorer(scorer.as_mut(), threshold, callback); for_each_pruning_scorer(scorer.as_mut(), threshold, callback);

View File

@@ -1,12 +1,15 @@
mod block_wand;
mod boolean_query; mod boolean_query;
mod boolean_weight; mod boolean_weight;
pub(crate) use self::block_wand::block_wand;
pub use self::boolean_query::BooleanQuery; pub use self::boolean_query::BooleanQuery;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::assert_nearly_equals;
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE; use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::collector::TopDocs; use crate::collector::TopDocs;
use crate::query::score_combiner::SumWithCoordsCombiner; use crate::query::score_combiner::SumWithCoordsCombiner;
@@ -19,7 +22,6 @@ mod tests {
use crate::query::Scorer; use crate::query::Scorer;
use crate::query::TermQuery; use crate::query::TermQuery;
use crate::schema::*; use crate::schema::*;
use crate::tests::assert_nearly_equals;
use crate::Index; use crate::Index;
use crate::{DocAddress, DocId, Score}; use crate::{DocAddress, DocId, Score};
@@ -256,14 +258,14 @@ mod tests {
.scorer(searcher.segment_reader(0u32), 1.0f32) .scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap(); .unwrap();
assert_eq!(boolean_scorer.doc(), 0u32); assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals(boolean_scorer.score(), 0.84163445f32); assert_nearly_equals!(boolean_scorer.score(), 0.84163445f32);
} }
{ {
let mut boolean_scorer = boolean_weight let mut boolean_scorer = boolean_weight
.scorer(searcher.segment_reader(0u32), 2.0f32) .scorer(searcher.segment_reader(0u32), 2.0f32)
.unwrap(); .unwrap();
assert_eq!(boolean_scorer.doc(), 0u32); assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals(boolean_scorer.score(), 1.6832689f32); assert_nearly_equals!(boolean_scorer.score(), 1.6832689f32);
} }
} }

View File

@@ -1,5 +1,6 @@
use crate::{DocId, TantivyError}; use crate::{DocId, TantivyError};
use serde::Serialize; use serde::Serialize;
use std::fmt;
pub(crate) fn does_not_match(doc: DocId) -> TantivyError { pub(crate) fn does_not_match(doc: DocId) -> TantivyError {
TantivyError::InvalidArgument(format!("Document #({}) does not match", doc)) TantivyError::InvalidArgument(format!("Document #({}) does not match", doc))
@@ -18,6 +19,12 @@ pub struct Explanation {
details: Vec<Explanation>, details: Vec<Explanation>,
} }
impl fmt::Debug for Explanation {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Explanation({})", self.to_pretty_json())
}
}
impl Explanation { impl Explanation {
/// Creates a new explanation object. /// Creates a new explanation object.
pub fn new<T: ToString>(description: T, value: f32) -> Explanation { pub fn new<T: ToString>(description: T, value: f32) -> Explanation {

View File

@@ -163,10 +163,10 @@ impl Query for FuzzyTermQuery {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::FuzzyTermQuery; use super::FuzzyTermQuery;
use crate::assert_nearly_equals;
use crate::collector::TopDocs; use crate::collector::TopDocs;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::TEXT; use crate::schema::TEXT;
use crate::tests::assert_nearly_equals;
use crate::Index; use crate::Index;
use crate::Term; use crate::Term;
@@ -199,7 +199,7 @@ mod test {
.unwrap(); .unwrap();
assert_eq!(top_docs.len(), 1, "Expected only 1 document"); assert_eq!(top_docs.len(), 1, "Expected only 1 document");
let (score, _) = top_docs[0]; let (score, _) = top_docs[0];
assert_nearly_equals(1f32, score); assert_nearly_equals!(1f32, score);
} }
// fails because non-prefix Levenshtein distance is more than 1 (add 'a' and 'n') // fails because non-prefix Levenshtein distance is more than 1 (add 'a' and 'n')
@@ -223,7 +223,7 @@ mod test {
.unwrap(); .unwrap();
assert_eq!(top_docs.len(), 1, "Expected only 1 document"); assert_eq!(top_docs.len(), 1, "Expected only 1 document");
let (score, _) = top_docs[0]; let (score, _) = top_docs[0];
assert_nearly_equals(1f32, score); assert_nearly_equals!(1f32, score);
} }
} }
} }

View File

@@ -119,7 +119,6 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
continue 'outer; continue 'outer;
} }
} }
debug_assert_eq!(candidate, self.left.doc()); debug_assert_eq!(candidate, self.left.doc());
debug_assert_eq!(candidate, self.right.doc()); debug_assert_eq!(candidate, self.right.doc());
debug_assert!(self.others.iter().all(|docset| docset.doc() == candidate)); debug_assert!(self.others.iter().all(|docset| docset.doc() == candidate));

View File

@@ -26,6 +26,7 @@ mod weight;
mod vec_docset; mod vec_docset;
pub(crate) mod score_combiner; pub(crate) mod score_combiner;
pub(crate) use self::bm25::BM25Weight;
pub use self::intersection::Intersection; pub use self::intersection::Intersection;
pub use self::union::Union; pub use self::union::Union;

View File

@@ -10,11 +10,11 @@ pub use self::phrase_weight::PhraseWeight;
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::assert_nearly_equals;
use crate::collector::tests::{TEST_COLLECTOR_WITHOUT_SCORE, TEST_COLLECTOR_WITH_SCORE}; use crate::collector::tests::{TEST_COLLECTOR_WITHOUT_SCORE, TEST_COLLECTOR_WITH_SCORE};
use crate::core::Index; use crate::core::Index;
use crate::query::Weight; use crate::query::Weight;
use crate::schema::{Schema, Term, TEXT}; use crate::schema::{Schema, Term, TEXT};
use crate::tests::assert_nearly_equals;
use crate::DocId; use crate::DocId;
use crate::{DocAddress, TERMINATED}; use crate::{DocAddress, TERMINATED};
@@ -175,8 +175,8 @@ pub mod tests {
.to_vec() .to_vec()
}; };
let scores = test_query(vec!["a", "b"]); let scores = test_query(vec!["a", "b"]);
assert_nearly_equals(scores[0], 0.40618482); assert_nearly_equals!(scores[0], 0.40618482);
assert_nearly_equals(scores[1], 0.46844664); assert_nearly_equals!(scores[1], 0.46844664);
} }
#[test] // motivated by #234 #[test] // motivated by #234

View File

@@ -89,10 +89,10 @@ impl Query for RegexQuery {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::RegexQuery; use super::RegexQuery;
use crate::assert_nearly_equals;
use crate::collector::TopDocs; use crate::collector::TopDocs;
use crate::schema::TEXT; use crate::schema::TEXT;
use crate::schema::{Field, Schema}; use crate::schema::{Field, Schema};
use crate::tests::assert_nearly_equals;
use crate::{Index, IndexReader}; use crate::{Index, IndexReader};
use std::sync::Arc; use std::sync::Arc;
use tantivy_fst::Regex; use tantivy_fst::Regex;
@@ -129,7 +129,7 @@ mod test {
.unwrap(); .unwrap();
assert_eq!(scored_docs.len(), 1, "Expected only 1 document"); assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
let (score, _) = scored_docs[0]; let (score, _) = scored_docs[0];
assert_nearly_equals(1f32, score); assert_nearly_equals!(1f32, score);
} }
let top_docs = searcher let top_docs = searcher
.search(&query_matching_zero, &TopDocs::with_limit(2)) .search(&query_matching_zero, &TopDocs::with_limit(2))

View File

@@ -9,14 +9,13 @@ pub use self::term_weight::TermWeight;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::assert_nearly_equals;
use crate::collector::TopDocs; use crate::collector::TopDocs;
use crate::docset::DocSet; use crate::docset::DocSet;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE; use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::query::{Query, QueryParser, Scorer, TermQuery}; use crate::query::{Query, QueryParser, Scorer, TermQuery};
use crate::schema::{Field, IndexRecordOption, Schema, STRING, TEXT}; use crate::schema::{Field, IndexRecordOption, Schema, STRING, TEXT};
use crate::tests::assert_nearly_equals; use crate::{Term, Index, TERMINATED};
use crate::Term;
use crate::{Index, TERMINATED};
#[test] #[test]
pub fn test_term_query_no_freq() { pub fn test_term_query_no_freq() {
@@ -105,7 +104,7 @@ mod tests {
.unwrap(); .unwrap();
assert_eq!(topdocs.len(), 1); assert_eq!(topdocs.len(), 1);
let (score, _) = topdocs[0]; let (score, _) = topdocs[0];
assert_nearly_equals(0.77802235, score); assert_nearly_equals!(0.77802235, score);
} }
{ {
let term = Term::from_field_text(left_field, "left1"); let term = Term::from_field_text(left_field, "left1");
@@ -115,9 +114,9 @@ mod tests {
.unwrap(); .unwrap();
assert_eq!(top_docs.len(), 2); assert_eq!(top_docs.len(), 2);
let (score1, _) = top_docs[0]; let (score1, _) = top_docs[0];
assert_nearly_equals(0.27101856, score1); assert_nearly_equals!(0.27101856, score1);
let (score2, _) = top_docs[1]; let (score2, _) = top_docs[1];
assert_nearly_equals(0.13736556, score2); assert_nearly_equals!(0.13736556, score2);
} }
{ {
let query_parser = QueryParser::for_index(&index, vec![]); let query_parser = QueryParser::for_index(&index, vec![]);
@@ -125,9 +124,9 @@ mod tests {
let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap(); let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
assert_eq!(top_docs.len(), 2); assert_eq!(top_docs.len(), 2);
let (score1, _) = top_docs[0]; let (score1, _) = top_docs[0];
assert_nearly_equals(0.9153879, score1); assert_nearly_equals!(0.9153879, score1);
let (score2, _) = top_docs[1]; let (score2, _) = top_docs[1];
assert_nearly_equals(0.27101856, score2); assert_nearly_equals!(0.27101856, score2);
} }
} }

View File

@@ -4,8 +4,8 @@ use crate::DocId;
use crate::Score; use crate::Score;
use crate::fieldnorm::FieldNormReader; use crate::fieldnorm::FieldNormReader;
use crate::postings::Postings;
use crate::postings::SegmentPostings; use crate::postings::SegmentPostings;
use crate::postings::{FreqReadingOption, Postings};
use crate::query::bm25::BM25Weight; use crate::query::bm25::BM25Weight;
pub struct TermScorer { pub struct TermScorer {
@@ -26,13 +26,70 @@ impl TermScorer {
similarity_weight, similarity_weight,
} }
} }
}
impl TermScorer { pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
self.postings.block_cursor.shallow_seek(target_doc)
}
#[cfg(test)]
pub fn create_for_test(
doc_and_tfs: &[(DocId, u32)],
fieldnorm_vals: &[u32],
similarity_weight: BM25Weight,
) -> crate::Result<TermScorer> {
assert!(!doc_and_tfs.is_empty());
assert!(doc_and_tfs.len() <= fieldnorm_vals.len());
let doc_freq = doc_and_tfs.len();
let max_doc = doc_and_tfs.last().unwrap().0 + 1;
let mut fieldnorms: Vec<u32> = std::iter::repeat(1).take(max_doc as usize).collect();
for i in 0..doc_freq {
let doc = doc_and_tfs[i].0;
let fieldnorm = fieldnorm_vals[i];
fieldnorms[doc as usize] = fieldnorm;
}
let fieldnorm_reader = FieldNormReader::from(&fieldnorms[..]);
let segment_postings =
SegmentPostings::create_from_docs_and_tfs(doc_and_tfs, Some(fieldnorm_reader.clone()))?;
Ok(TermScorer::new(segment_postings, fieldnorm_reader, similarity_weight))
}
/// See `FreqReadingOption`.
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
self.postings.block_cursor.freq_reading_option()
}
/// Returns the maximum score for the current block.
///
/// In some rare case, the result may not be exact. In this case a lower value is returned,
/// (and may lead us to return a lesser document).
///
/// At index time, we store the (fieldnorm_id, term frequency) pair that maximizes the
/// score assuming the average fieldnorm computed on this segment.
///
/// Though extremely rare, it is theoretically possible that the actual average fieldnorm
/// is different enough from the current segment average fieldnorm that the maximum over a
/// specific is achieved on a different document.
///
/// (The result is on the other hand guaranteed to be correct if there is only one segment).
pub fn block_max_score(&mut self) -> Score {
self.postings
.block_cursor
.block_max_score(&self.fieldnorm_reader, &self.similarity_weight)
}
pub fn term_freq(&self) -> u32 { pub fn term_freq(&self) -> u32 {
self.postings.term_freq() self.postings.term_freq()
} }
pub fn doc_freq(&self) -> usize {
self.postings.doc_freq() as usize
}
pub fn fieldnorm_id(&self) -> u8 { pub fn fieldnorm_id(&self) -> u8 {
self.fieldnorm_reader.fieldnorm_id(self.doc()) self.fieldnorm_reader.fieldnorm_id(self.doc())
} }
@@ -42,6 +99,14 @@ impl TermScorer {
let term_freq = self.term_freq(); let term_freq = self.term_freq();
self.similarity_weight.explain(fieldnorm_id, term_freq) self.similarity_weight.explain(fieldnorm_id, term_freq)
} }
pub fn max_score(&self) -> f32 {
self.similarity_weight.max_score()
}
pub fn last_doc_in_block(&self) -> DocId {
self.postings.block_cursor.skip_reader.last_doc_in_block()
}
} }
impl DocSet for TermScorer { impl DocSet for TermScorer {
@@ -69,3 +134,99 @@ impl Scorer for TermScorer {
self.similarity_weight.score(fieldnorm_id, term_freq) self.similarity_weight.score(fieldnorm_id, term_freq)
} }
} }
#[cfg(test)]
mod tests {
use crate::assert_nearly_equals;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::query::term_query::TermScorer;
use crate::query::{BM25Weight, Scorer};
use crate::{DocId, DocSet, TERMINATED};
use proptest::prelude::*;
#[test]
fn test_term_scorer_max_score() -> crate::Result<()> {
let bm25_weight = BM25Weight::for_one_term(3, 6, 10f32);
let mut term_scorer =
TermScorer::create_for_test(&[(2, 3), (3, 12), (7, 8)], &[10, 12, 100], bm25_weight)?;
let max_scorer = term_scorer.max_score();
assert_eq!(max_scorer, 1.3990127f32);
assert_eq!(term_scorer.doc(), 2);
assert_eq!(term_scorer.term_freq(), 3);
assert_nearly_equals!(term_scorer.block_max_score(), 1.3676447f32);
assert_nearly_equals!(term_scorer.score(), 1.0892314f32);
assert_eq!(term_scorer.advance(), 3);
assert_eq!(term_scorer.doc(), 3);
assert_eq!(term_scorer.term_freq(), 12);
assert_nearly_equals!(term_scorer.score(), 1.3676447f32);
assert_eq!(term_scorer.advance(), 7);
assert_eq!(term_scorer.doc(), 7);
assert_eq!(term_scorer.term_freq(), 8);
assert_nearly_equals!(term_scorer.score(), 0.72015285f32);
assert_eq!(term_scorer.advance(), TERMINATED);
Ok(())
}
#[test]
fn test_term_scorer_shallow_advance() -> crate::Result<()> {
let bm25_weight = BM25Weight::for_one_term(300, 1024, 10f32);
let mut doc_and_tfs = vec![];
for i in 0u32..300u32 {
let doc = i * 10;
doc_and_tfs.push((doc, 1u32 + doc % 3u32));
}
let fieldnorms: Vec<u32> = std::iter::repeat(10u32).take(1024).collect();
let mut term_scorer =
TermScorer::create_for_test(&doc_and_tfs, &fieldnorms, bm25_weight)?;
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.shallow_seek(1289);
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.seek(1289);
assert_eq!(term_scorer.doc(), 1290);
Ok(())
}
proptest! {
#[test]
fn test_term_scorer_block_max_score(term_freqs_fieldnorms in proptest::collection::vec((1u32..10u32, 0u32..100u32), 80..300)) {
let term_doc_freq = term_freqs_fieldnorms.len();
let doc_tfs: Vec<(u32, u32)> = term_freqs_fieldnorms.iter()
.cloned()
.enumerate()
.map(|(doc, (tf, _))| (doc as u32, tf))
.collect();
let mut fieldnorms: Vec<u32> = vec![];
for i in 0..term_doc_freq {
let (tf, num_extra_terms) = term_freqs_fieldnorms[i];
fieldnorms.push(tf + num_extra_terms);
}
let average_fieldnorm = fieldnorms
.iter()
.cloned()
.sum::<u32>() as f32 / term_doc_freq as f32;
// Average fieldnorm is over the entire index,
// not necessarily the docs that are in the posting list.
// For this reason we multiply by 1.1 to make a realistic value.
let bm25_weight = BM25Weight::for_one_term(term_doc_freq as u64,
term_doc_freq as u64 * 10u64,
average_fieldnorm);
let mut term_scorer =
TermScorer::create_for_test(&doc_tfs[..], &fieldnorms[..], bm25_weight).unwrap();
let docs: Vec<DocId> = (0..term_doc_freq).map(|doc| doc as DocId).collect();
for block in docs.chunks(COMPRESSION_BLOCK_SIZE) {
let block_max_score = term_scorer.block_max_score();
let mut block_max_score_computed = 0.0f32;
for &doc in block {
assert_eq!(term_scorer.doc(), doc);
block_max_score_computed = block_max_score_computed.max(term_scorer.score());
term_scorer.advance();
}
assert_nearly_equals!(block_max_score_computed, block_max_score);
}
}
}
}

View File

@@ -20,12 +20,12 @@ pub struct TermWeight {
impl Weight for TermWeight { impl Weight for TermWeight {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
let term_scorer = self.scorer_specialized(reader, boost)?; let term_scorer = self.specialized_scorer(reader, boost)?;
Ok(Box::new(term_scorer)) Ok(Box::new(term_scorer))
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer_specialized(reader, 1.0f32)?; let mut scorer = self.specialized_scorer(reader, 1.0f32)?;
if scorer.seek(doc) != doc { if scorer.seek(doc) != doc {
return Err(does_not_match(doc)); return Err(does_not_match(doc));
} }
@@ -52,7 +52,7 @@ impl Weight for TermWeight {
reader: &SegmentReader, reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score), callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> { ) -> crate::Result<()> {
let mut scorer = self.scorer_specialized(reader, 1.0f32)?; let mut scorer = self.specialized_scorer(reader, 1.0f32)?;
for_each_scorer(&mut scorer, callback); for_each_scorer(&mut scorer, callback);
Ok(()) Ok(())
} }
@@ -92,7 +92,7 @@ impl TermWeight {
} }
} }
fn scorer_specialized(&self, reader: &SegmentReader, boost: f32) -> Result<TermScorer> { pub fn specialized_scorer(&self, reader: &SegmentReader, boost: f32) -> Result<TermScorer> {
let field = self.term.field(); let field = self.term.field();
let inverted_index = reader.inverted_index(field); let inverted_index = reader.inverted_index(field);
let fieldnorm_reader = reader.get_fieldnorms_reader(field); let fieldnorm_reader = reader.get_fieldnorms_reader(field);

View File

@@ -14,7 +14,7 @@ use std::fmt;
/// - a field name /// - a field name
/// - a field type, itself wrapping up options describing /// - a field type, itself wrapping up options describing
/// how the field should be indexed. /// how the field should be indexed.
#[derive(Clone, Debug, Eq, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub struct FieldEntry { pub struct FieldEntry {
name: String, name: String,
field_type: FieldType, field_type: FieldType,

View File

@@ -48,7 +48,7 @@ pub enum Type {
/// A `FieldType` describes the type (text, u64) of a field as well as /// A `FieldType` describes the type (text, u64) of a field as well as
/// how it should be handled by tantivy. /// how it should be handled by tantivy.
#[derive(Clone, Debug, Eq, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub enum FieldType { pub enum FieldType {
/// String field type configuration /// String field type configuration
Str(TextOptions), Str(TextOptions),

View File

@@ -6,7 +6,7 @@ use std::borrow::Cow;
use std::ops::BitOr; use std::ops::BitOr;
/// Define how a text field should be handled by tantivy. /// Define how a text field should be handled by tantivy.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TextOptions { pub struct TextOptions {
indexing: Option<TextFieldIndexing>, indexing: Option<TextFieldIndexing>,
stored: bool, stored: bool,
@@ -51,7 +51,7 @@ impl Default for TextOptions {
/// - the amount of information that should be stored about the presence of a term in a document. /// - the amount of information that should be stored about the presence of a term in a document.
/// Essentially, should we store the term frequency and/or the positions (See [`IndexRecordOption`](./enum.IndexRecordOption.html)). /// Essentially, should we store the term frequency and/or the positions (See [`IndexRecordOption`](./enum.IndexRecordOption.html)).
/// - the name of the `Tokenizer` that should be used to process the field. /// - the name of the `Tokenizer` that should be used to process the field.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct TextFieldIndexing { pub struct TextFieldIndexing {
record: IndexRecordOption, record: IndexRecordOption,
tokenizer: Cow<'static, str>, tokenizer: Cow<'static, str>,