Compare commits

..

11 Commits

Author SHA1 Message Date
Paul Masurel
7720d21265 Closes #896 - Facet reader related
Bugfix. Acquiring a facet reader on a segment that does not contain any
doc with this facet returns `None`.
2020-10-01 20:25:28 +09:00
Paul Masurel
c339b05789 Bumped version and edited changelog 2020-09-19 21:13:19 +09:00
Paul Masurel
2d3c657f9d Added Send Sync to collectors. 2020-09-19 21:04:44 +09:00
Paul Masurel
07f9b828ae Added Send and Sync to the Query trait. 2020-09-19 21:04:29 +09:00
Paul Masurel
3dd0322f4c Bumped version 2020-08-19 22:41:48 +09:00
Paul Masurel
2481c87be8 Block wand (#856) 2020-08-19 22:36:36 +09:00
Paul Masurel
b6a664b5f8 cargo fmt 2020-08-16 12:40:50 +09:00
lyj
25b666a7c9 Update occur.rs (#862) 2020-08-16 10:49:55 +09:00
Paul Masurel
9b41912e66 Bugfix (#861) 2020-08-12 16:06:24 +09:00
Paul Masurel
8e74bb98b5 Added field norm readers (#854) 2020-07-20 13:05:05 +09:00
Paul Masurel
6db8bb49d6 Assert nearly equals macro (#853)
* Assert nearly equals macro

* Renamed specialized_scorer in TermScorer
2020-07-17 16:40:41 +09:00
65 changed files with 1197 additions and 767 deletions

1
.gitignore vendored
View File

@@ -1,4 +1,5 @@
tantivy.iml
proptest-regressions
*.swp
target
target/debug

View File

@@ -1,5 +1,23 @@
Tantivy 0.13.2
===================
Bugfix. Acquiring a facet reader on a segment that does not contain any
doc with this facet returns `None`. (#896)
Tantivy 0.13.1
======================
Made `Query` and `Collector` `Send + Sync`.
Updated misc dependency versions.
Tantivy 0.13.0
======================
Tantivy 0.13 introduce a change in the index format that will require
you to reindex your index (BlockWAND information are added in the skiplist).
The index size increase is minor as this information is only added for
full blocks.
If you have a massive index for which reindexing is not an option, please contact me
so that we can discuss possible solutions.
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
- `MMapDirectory::open` does not return a `Result` anymore.
@@ -17,6 +35,8 @@ while doc != TERMINATED {
The change made it possible to greatly simplify a lot of the docset's code.
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
- Added an offset option to the Top(.*)Collectors. (@robyoung)
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
to the PISA team for answering all my questions!)
Tantivy 0.12.0
======================

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy"
version = "0.12.0"
version = "0.13.2"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
@@ -13,21 +13,21 @@ keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies]
base64 = "0.12.0"
byteorder = "1.0"
crc32fast = "1.2.0"
once_cell = "1.0"
regex ={version = "1.3.0", default-features = false, features = ["std"]}
base64 = "0.12"
byteorder = "1"
crc32fast = "1"
once_cell = "1"
regex ={version = "1", default-features = false, features = ["std"]}
tantivy-fst = "0.3"
memmap = {version = "0.7", optional=true}
lz4 = {version="1.20", optional=true}
lz4 = {version="1", optional=true}
snap = "1"
atomicwrites = {version="0.2.2", optional=true}
tempfile = "3.0"
atomicwrites = {version="0.2", optional=true}
tempfile = "3"
log = "0.4"
serde = {version="1.0", features=["derive"]}
serde_json = "1.0"
num_cpus = "1.2"
serde = {version="1", features=["derive"]}
serde_json = "1"
num_cpus = "1"
fs2={version="0.4", optional=true}
levenshtein_automata = "0.2"
notify = {version="4", optional=true}
@@ -35,20 +35,20 @@ uuid = { version = "0.8", features = ["v4", "serde"] }
crossbeam = "0.7"
futures = {version = "0.3", features=["thread-pool"] }
owning_ref = "0.4"
stable_deref_trait = "1.0.0"
rust-stemmers = "1.2"
downcast-rs = { version="1.0" }
stable_deref_trait = "1"
rust-stemmers = "1"
downcast-rs = "1"
tantivy-query-grammar = { version="0.13", path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.4"
fnv = "1.0.6"
fnv = "1"
owned-read = "0.4"
failure = "0.1"
htmlescape = "0.3.1"
htmlescape = "0.3"
fail = "0.4"
murmurhash32 = "0.2"
chrono = "0.4"
smallvec = "1.0"
smallvec = "1"
rayon = "1"
[target.'cfg(windows)'.dependencies]
@@ -80,6 +80,7 @@ lz4-compression = ["lz4"]
failpoints = ["fail/failpoints"]
unstable = [] # useful for benches.
wasm-bindgen = ["uuid/wasm-bindgen"]
scoref64 = [] # scores are f64 instead of f32. was introduced to debug blockwand.
[workspace]
members = ["query-grammar"]

View File

@@ -14,7 +14,7 @@ use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser;
use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, SegmentReader, TantivyError};
use tantivy::{doc, Index, Score, SegmentReader, TantivyError};
#[derive(Default)]
struct Stats {
@@ -114,7 +114,7 @@ struct StatsSegmentCollector {
impl SegmentCollector for StatsSegmentCollector {
type Fruit = Option<Stats>;
fn collect(&mut self, doc: u32, _score: f32) {
fn collect(&mut self, doc: u32, _score: Score) {
let value = self.fast_field_reader.get(doc) as f64;
self.stats.count += 1;
self.stats.sum += value;

View File

@@ -56,7 +56,7 @@ fn main() -> tantivy::Result<()> {
);
let top_docs_by_custom_score =
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
let mut ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
let facet_dict = ingredient_reader.facet_dict();
let query_ords: HashSet<u64> = facets

View File

@@ -31,22 +31,12 @@ impl Occur {
/// Compose two occur values.
pub fn compose(left: Occur, right: Occur) -> Occur {
match left {
Occur::Should => right,
Occur::Must => {
if right == Occur::MustNot {
Occur::MustNot
} else {
Occur::Must
}
}
Occur::MustNot => {
if right == Occur::MustNot {
Occur::Must
} else {
Occur::MustNot
}
}
match (left, right) {
(Occur::Should, _) => right,
(Occur::Must, Occur::MustNot) => Occur::MustNot,
(Occur::Must, _) => Occur::Must,
(Occur::MustNot, Occur::MustNot) => Occur::Must,
(Occur::MustNot, _) => Occur::MustNot,
}
}
}
@@ -56,3 +46,27 @@ impl fmt::Display for Occur {
f.write_char(self.to_char())
}
}
#[cfg(test)]
mod test {
use crate::Occur;
#[test]
fn test_Occur_compose() {
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
assert_eq!(
Occur::compose(Occur::Should, Occur::MustNot),
Occur::MustNot
);
assert_eq!(Occur::compose(Occur::Must, Occur::Should), Occur::Must);
assert_eq!(Occur::compose(Occur::Must, Occur::Must), Occur::Must);
assert_eq!(Occur::compose(Occur::Must, Occur::MustNot), Occur::MustNot);
assert_eq!(
Occur::compose(Occur::MustNot, Occur::Should),
Occur::MustNot
);
assert_eq!(Occur::compose(Occur::MustNot, Occur::Must), Occur::MustNot);
assert_eq!(Occur::compose(Occur::MustNot, Occur::MustNot), Occur::Must);
}
}

View File

@@ -180,7 +180,7 @@ fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAS
(optional(occur_symbol()), boosted_leaf())
}
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
(many1(digit()), optional((char('.'), many1(digit())))).map(
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
let mut float_str = int_part;
@@ -188,18 +188,18 @@ fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
float_str.push(chr);
float_str.push_str(&decimal_str);
}
float_str.parse::<f32>().unwrap()
float_str.parse::<f64>().unwrap()
},
)
}
fn boost<'a>() -> impl Parser<&'a str, Output = f32> {
fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
(char('^'), positive_float_number()).map(|(_, boost)| boost)
}
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
Some(boost) if (boost - 1.0).abs() > std::f32::EPSILON => {
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
UserInputAST::Boost(Box::new(leaf), boost)
}
_ => leaf,
@@ -282,11 +282,11 @@ mod test {
use super::*;
use combine::parser::Parser;
pub fn nearly_equals(a: f32, b: f32) -> bool {
pub fn nearly_equals(a: f64, b: f64) -> bool {
(a - b).abs() < 0.0005 * (a + b).abs()
}
fn assert_nearly_equals(expected: f32, val: f32) {
fn assert_nearly_equals(expected: f64, val: f64) {
assert!(
nearly_equals(val, expected),
"Got {}, expected {}.",
@@ -303,7 +303,7 @@ mod test {
#[test]
fn test_positive_float_number() {
fn valid_parse(float_str: &str, expected_val: f32, expected_remaining: &str) {
fn valid_parse(float_str: &str, expected_val: f64, expected_remaining: &str) {
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
assert_eq!(remaining, expected_remaining);
assert_nearly_equals(val, expected_val);
@@ -311,9 +311,9 @@ mod test {
fn error_parse(float_str: &str) {
assert!(positive_float_number().parse(float_str).is_err());
}
valid_parse("1.0", 1.0f32, "");
valid_parse("1", 1.0f32, "");
valid_parse("0.234234 aaa", 0.234234f32, " aaa");
valid_parse("1.0", 1.0, "");
valid_parse("1", 1.0, "");
valid_parse("0.234234 aaa", 0.234234f64, " aaa");
error_parse(".3332");
error_parse("1.");
error_parse("-1.");

View File

@@ -87,7 +87,7 @@ impl UserInputBound {
pub enum UserInputAST {
Clause(Vec<(Option<Occur>, UserInputAST)>),
Leaf(Box<UserInputLeaf>),
Boost(Box<UserInputAST>, f32),
Boost(Box<UserInputAST>, f64),
}
impl UserInputAST {

View File

@@ -96,18 +96,18 @@ mod tests {
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
count_collector.collect(0u32, 1.0);
assert_eq!(count_collector.harvest(), 1);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
count_collector.collect(0u32, 1.0);
assert_eq!(count_collector.harvest(), 1);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
count_collector.collect(1u32, 1f32);
count_collector.collect(0u32, 1.0);
count_collector.collect(1u32, 1.0);
assert_eq!(count_collector.harvest(), 2);
}
}

View File

@@ -46,7 +46,7 @@ pub trait CustomScorer<TScore>: Sync {
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
where
TCustomScorer: CustomScorer<TScore>,
TCustomScorer: CustomScorer<TScore> + Send + Sync,
TScore: 'static + PartialOrd + Clone + Send + Sync,
{
type Fruit = Vec<(TScore, DocAddress)>;

View File

@@ -133,7 +133,7 @@ impl<T> Fruit for T where T: Send + downcast_rs::Downcast {}
/// The collection logic itself is in the `SegmentCollector`.
///
/// Segments are not guaranteed to be visited in any specific order.
pub trait Collector: Sync {
pub trait Collector: Sync + Send {
/// `Fruit` is the type for the result of our collection.
/// e.g. `usize` for the `Count` collector.
type Fruit: Fruit;

View File

@@ -55,7 +55,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
type Fruit = Box<dyn Fruit>;
fn collect(&mut self, doc: u32, score: f32) {
fn collect(&mut self, doc: u32, score: Score) {
self.as_mut().collect(doc, score);
}
@@ -65,7 +65,7 @@ impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
}
pub trait BoxableSegmentCollector {
fn collect(&mut self, doc: u32, score: f32);
fn collect(&mut self, doc: u32, score: Score);
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
}
@@ -74,7 +74,7 @@ pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegment
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
for SegmentCollectorWrapper<TSegmentCollector>
{
fn collect(&mut self, doc: u32, score: f32) {
fn collect(&mut self, doc: u32, score: Score) {
self.0.collect(doc, score);
}

View File

@@ -206,7 +206,7 @@ impl Collector for BytesFastFieldTestCollector {
impl SegmentCollector for BytesFastFieldSegmentCollector {
type Fruit = Vec<u8>;
fn collect(&mut self, doc: u32, _score: f32) {
fn collect(&mut self, doc: u32, _score: Score) {
let data = self.reader.get_bytes(doc);
self.vals.extend(data);
}

View File

@@ -52,8 +52,8 @@ use std::fmt;
/// let query = query_parser.parse_query("diary").unwrap();
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
///
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
/// ```
pub struct TopDocs(TopCollector<Score>);
@@ -139,8 +139,8 @@ impl TopDocs {
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
///
/// assert_eq!(top_docs.len(), 2);
/// assert_eq!(&top_docs[0], &(0.5204813, DocAddress(0, 4)));
/// assert_eq!(&top_docs[1], &(0.4793185, DocAddress(0, 3)));
/// assert_eq!(top_docs[0].1, DocAddress(0, 4));
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
/// ```
pub fn and_offset(self, offset: usize) -> TopDocs {
TopDocs(self.0.and_offset(offset))
@@ -303,7 +303,7 @@ impl TopDocs {
/// let popularity: u64 = popularity_reader.get(doc);
/// // Well.. For the sake of the example we use a simple logarithm
/// // function.
/// let popularity_boost_score = ((2u64 + popularity) as f32).log2();
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
/// popularity_boost_score * original_score
/// }
/// });
@@ -324,7 +324,7 @@ impl TopDocs {
where
TScore: 'static + Send + Sync + Clone + PartialOrd,
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker> + Send + Sync,
{
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
}
@@ -438,7 +438,7 @@ impl TopDocs {
where
TScore: 'static + Send + Sync + Clone + PartialOrd,
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer> + Send + Sync,
{
CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
}
@@ -479,7 +479,7 @@ impl Collector for TopDocs {
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
if let Some(delete_bitset) = reader.delete_bitset() {
let mut threshold = f32::MIN;
let mut threshold = Score::MIN;
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
if delete_bitset.is_deleted(doc) {
return threshold;
@@ -491,16 +491,16 @@ impl Collector for TopDocs {
if heap.len() < heap_len {
heap.push(heap_item);
if heap.len() == heap_len {
threshold = heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
}
return threshold;
}
*heap.peek_mut().unwrap() = heap_item;
threshold = heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN);
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
threshold
})?;
} else {
weight.for_each_pruning(f32::MIN, reader, &mut |doc, score| {
weight.for_each_pruning(Score::MIN, reader, &mut |doc, score| {
let heap_item = ComparableDoc {
feature: score,
doc,
@@ -509,17 +509,17 @@ impl Collector for TopDocs {
heap.push(heap_item);
// TODO the threshold is suboptimal for heap.len == heap_len
if heap.len() == heap_len {
return heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
return heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
} else {
return f32::MIN;
return Score::MIN;
}
}
*heap.peek_mut().unwrap() = heap_item;
heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN)
heap.peek().map(|el| el.feature).unwrap_or(Score::MIN)
})?;
}
let fruit: Vec<(Score, DocAddress)> = heap
let fruit = heap
.into_sorted_vec()
.into_iter()
.map(|cid| (cid.feature, DocAddress(segment_ord, cid.doc)))
@@ -570,6 +570,13 @@ mod tests {
index
}
fn assert_results_equals(results: &[(Score, DocAddress)], expected: &[(Score, DocAddress)]) {
for (result, expected) in results.iter().zip(expected.iter()) {
assert_eq!(result.1, expected.1);
crate::assert_nearly_equals!(result.0, expected.0);
}
}
#[test]
fn test_top_collector_not_at_capacity() {
let index = make_index();
@@ -582,13 +589,13 @@ mod tests {
.searcher()
.search(&text_query, &TopDocs::with_limit(4))
.unwrap();
assert_eq!(
score_docs,
vec![
assert_results_equals(
&score_docs,
&[
(0.81221175, DocAddress(0u32, 1)),
(0.5376842, DocAddress(0u32, 2)),
(0.48527452, DocAddress(0, 0))
]
(0.48527452, DocAddress(0, 0)),
],
);
}
@@ -604,7 +611,7 @@ mod tests {
.searcher()
.search(&text_query, &TopDocs::with_limit(4).and_offset(2))
.unwrap();
assert_eq!(score_docs, vec![(0.48527452, DocAddress(0, 0))]);
assert_results_equals(&score_docs[..], &[(0.48527452, DocAddress(0, 0))]);
}
#[test]
@@ -619,12 +626,12 @@ mod tests {
.searcher()
.search(&text_query, &TopDocs::with_limit(2))
.unwrap();
assert_eq!(
score_docs,
vec![
assert_results_equals(
&score_docs,
&[
(0.81221175, DocAddress(0u32, 1)),
(0.5376842, DocAddress(0u32, 2)),
]
],
);
}
@@ -640,12 +647,12 @@ mod tests {
.searcher()
.search(&text_query, &TopDocs::with_limit(2).and_offset(1))
.unwrap();
assert_eq!(
score_docs,
vec![
assert_results_equals(
&score_docs[..],
&[
(0.5376842, DocAddress(0u32, 2)),
(0.48527452, DocAddress(0, 0))
]
(0.48527452, DocAddress(0, 0)),
],
);
}
@@ -706,8 +713,8 @@ mod tests {
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
assert_eq!(
top_docs,
vec![
&top_docs[..],
&[
(64, DocAddress(0, 1)),
(16, DocAddress(0, 2)),
(12, DocAddress(0, 0))

View File

@@ -49,7 +49,7 @@ pub trait ScoreTweaker<TScore>: Sync {
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
where
TScoreTweaker: ScoreTweaker<TScore>,
TScoreTweaker: ScoreTweaker<TScore> + Send + Sync,
TScore: 'static + PartialOrd + Clone + Send + Sync,
{
type Fruit = Vec<(TScore, DocAddress)>;

View File

@@ -10,7 +10,9 @@ pub(crate) use self::bitset::TinySet;
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
pub use self::counting_writer::CountingWriter;
pub use self::serialize::{BinarySerializable, FixedSize};
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
pub use self::vint::{
read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt,
};
pub use byteorder::LittleEndian as Endianness;
/// Segment's max doc must be `< MAX_DOC_LIMIT`.

View File

@@ -10,7 +10,7 @@ pub struct VInt(pub u64);
const STOP_BIT: u8 = 128;
pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
const START_2: u64 = 1 << 7;
const START_3: u64 = 1 << 14;
const START_4: u64 = 1 << 21;
@@ -29,7 +29,7 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
let val = u64::from(val);
const STOP_BIT: u64 = 128u64;
match val {
let (res, num_bytes) = match val {
0..=STOP_1 => (val | STOP_BIT, 1),
START_2..=STOP_2 => (
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
@@ -56,7 +56,9 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
| (STOP_BIT << (8 * 4)),
5,
),
}
};
LittleEndian::write_u64(&mut buf[..], res);
&buf[0..num_bytes]
}
/// Returns the number of bytes covered by a
@@ -85,23 +87,26 @@ fn vint_len(data: &[u8]) -> usize {
/// If the buffer does not start by a valid
/// vint payload
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
let vlen = vint_len(*data);
let (result, vlen) = read_u32_vint_no_advance(*data);
*data = &data[vlen..];
result
}
pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
let vlen = vint_len(data);
let mut result = 0u32;
let mut shift = 0u64;
for &b in &data[..vlen] {
result |= u32::from(b & 127u8) << shift;
shift += 7;
}
*data = &data[vlen..];
result
(result, vlen)
}
/// Write a `u32` as a vint payload.
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
let (val, num_bytes) = serialize_vint_u32(val);
let mut buffer = [0u8; 8];
LittleEndian::write_u64(&mut buffer, val);
writer.write_all(&buffer[..num_bytes])
let mut buf = [0u8; 8];
let data = serialize_vint_u32(val, &mut buf);
writer.write_all(&data)
}
impl VInt {
@@ -172,7 +177,6 @@ mod tests {
use super::serialize_vint_u32;
use super::VInt;
use crate::common::BinarySerializable;
use byteorder::{ByteOrder, LittleEndian};
fn aux_test_vint(val: u64) {
let mut v = [14u8; 10];
@@ -208,12 +212,10 @@ mod tests {
fn aux_test_serialize_vint_u32(val: u32) {
let mut buffer = [0u8; 10];
let mut buffer2 = [0u8; 10];
let mut buffer2 = [0u8; 8];
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
let (vint, len) = serialize_vint_u32(val);
assert_eq!(len, len_vint, "len wrong for val {}", val);
LittleEndian::write_u64(&mut buffer2, vint);
assert_eq!(&buffer[..len], &buffer2[..len], "array wrong for {}", val);
let res2 = serialize_vint_u32(val, &mut buffer2);
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
}
#[test]

View File

@@ -112,8 +112,10 @@ impl SegmentReader {
return None;
}
let term_ords_reader = self.fast_fields().u64s(field)?;
let termdict_source = self.termdict_composite.open_read(field)?;
let termdict = TermDictionary::from_source(&termdict_source);
let termdict = self.termdict_composite
.open_read(field)
.map(|source| TermDictionary::from_source(&source))
.unwrap_or_else(TermDictionary::empty);
let facet_reader = FacetReader::new(term_ords_reader, termdict);
Some(facet_reader)
}
@@ -126,8 +128,8 @@ impl SegmentReader {
/// They are simply stored as a fast field, serialized in
/// the `.fieldnorm` file of the segment.
pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
if let Some(fieldnorm_source) = self.fieldnorm_readers.get_field(field) {
fieldnorm_source
if let Some(fieldnorm_reader) = self.fieldnorm_readers.get_field(field) {
fieldnorm_reader
} else {
let field_name = self.schema.get_field_name(field);
let err_msg = format!(
@@ -179,7 +181,7 @@ impl SegmentReader {
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
let fieldnorm_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::new(fieldnorm_data)?;
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
let delete_bitset_opt = if segment.meta().has_deletes() {
let delete_data = segment.open_read(SegmentComponent::DELETE)?;

View File

@@ -183,23 +183,23 @@ impl BinarySerializable for VersionedFooter {
reader.read_exact(&mut buf[..])?;
let mut cursor = &buf[..];
let version = u32::deserialize(&mut cursor)?;
if version == 1 {
let crc32 = u32::deserialize(&mut cursor)?;
let store_compression = String::deserialize(&mut cursor)?;
Ok(VersionedFooter::V1 {
crc32,
store_compression,
})
} else if version == 2 {
let crc32 = u32::deserialize(&mut cursor)?;
let store_compression = String::deserialize(&mut cursor)?;
Ok(VersionedFooter::V2 {
crc32,
store_compression,
})
} else {
Ok(VersionedFooter::UnknownVersion)
if version != 1 && version != 2 {
return Ok(VersionedFooter::UnknownVersion);
}
let crc32 = u32::deserialize(&mut cursor)?;
let store_compression = String::deserialize(&mut cursor)?;
Ok(if version == 1 {
VersionedFooter::V1 {
crc32,
store_compression,
}
} else {
assert_eq!(version, 2);
VersionedFooter::V2 {
crc32,
store_compression,
}
})
}
}

View File

@@ -5,7 +5,7 @@ use std::sync::RwLock;
use std::sync::Weak;
/// Type alias for callbacks registered when watching files of a `Directory`.
pub type WatchCallback = Box<dyn Fn() -> () + Sync + Send>;
pub type WatchCallback = Box<dyn Fn() + Sync + Send>;
/// Helper struct to implement the watch method in `Directory` implementations.
///
@@ -32,7 +32,7 @@ impl WatchHandle {
}
impl WatchCallbackList {
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
/// Subscribes a new callback and returns a handle that controls the lifetime of the callback.
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
let watch_callback_arc = Arc::new(watch_callback);
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);

View File

@@ -9,6 +9,8 @@ use std::io::Write;
/// Write a delete `BitSet`
///
/// where `delete_bitset` is the set of deleted `DocId`.
/// Warning: this function does not call terminate. The caller is in charge of
/// closing the writer properly.
pub fn write_delete_bitset(
delete_bitset: &BitSet,
max_doc: u32,
@@ -42,6 +44,24 @@ pub struct DeleteBitSet {
}
impl DeleteBitSet {
#[cfg(test)]
pub(crate) fn for_test(docs: &[DocId], max_doc: u32) -> DeleteBitSet {
use crate::directory::{Directory, RAMDirectory, TerminatingWrite};
use std::path::Path;
assert!(docs.iter().all(|&doc| doc < max_doc));
let mut bitset = BitSet::with_max_value(max_doc);
for &doc in docs {
bitset.insert(doc);
}
let mut directory = RAMDirectory::create();
let path = Path::new("dummydeletebitset");
let mut wrt = directory.open_write(path).unwrap();
write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap();
wrt.terminate().unwrap();
let source = directory.open_read(path).unwrap();
Self::open(source)
}
/// Opens a delete bitset given its data source.
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
let num_deleted: usize = data
@@ -83,42 +103,35 @@ impl HasLen for DeleteBitSet {
#[cfg(test)]
mod tests {
use super::*;
use crate::directory::*;
use std::path::PathBuf;
use super::DeleteBitSet;
use crate::common::HasLen;
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
let test_path = PathBuf::from("test");
let mut directory = RAMDirectory::create();
{
let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
writer.terminate().unwrap();
#[test]
fn test_delete_bitset_empty() {
let delete_bitset = DeleteBitSet::for_test(&[], 10);
for doc in 0..10 {
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
}
let source = directory.open_read(&test_path).unwrap();
let delete_bitset = DeleteBitSet::open(source);
for doc in 0..max_doc {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
}
assert_eq!(delete_bitset.len(), bitset.len());
assert_eq!(delete_bitset.len(), 0);
}
#[test]
fn test_delete_bitset() {
{
let mut bitset = BitSet::with_max_value(10);
bitset.insert(1);
bitset.insert(9);
test_delete_bitset_helper(&bitset, 10);
}
{
let mut bitset = BitSet::with_max_value(8);
bitset.insert(1);
bitset.insert(2);
bitset.insert(3);
bitset.insert(5);
bitset.insert(7);
test_delete_bitset_helper(&bitset, 8);
let delete_bitset = DeleteBitSet::for_test(&[1, 9], 10);
assert!(delete_bitset.is_alive(0));
assert!(delete_bitset.is_deleted(1));
assert!(delete_bitset.is_alive(2));
assert!(delete_bitset.is_alive(3));
assert!(delete_bitset.is_alive(4));
assert!(delete_bitset.is_alive(5));
assert!(delete_bitset.is_alive(6));
assert!(delete_bitset.is_alive(6));
assert!(delete_bitset.is_alive(7));
assert!(delete_bitset.is_alive(8));
assert!(delete_bitset.is_deleted(9));
for doc in 0..10 {
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
}
assert_eq!(delete_bitset.len(), 2);
}
}

View File

@@ -73,7 +73,52 @@ impl FacetReader {
}
/// Return the list of facet ordinals associated to a document.
pub fn facet_ords(&mut self, doc: DocId, output: &mut Vec<u64>) {
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) {
self.term_ords.get_vals(doc, output);
}
}
#[cfg(test)]
mod tests {
use crate::{Document, schema::{Facet, SchemaBuilder}};
use crate::Index;
#[test]
fn test_facet_not_populated_for_all_docs() -> crate::Result<()> {
let mut schema_builder = SchemaBuilder::default();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
index_writer.add_document(Document::default());
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let facet_reader = searcher.segment_reader(0u32).facet_reader(facet_field).unwrap();
let mut facet_ords = Vec::new();
facet_reader.facet_ords(0u32, &mut facet_ords);
assert_eq!(&facet_ords, &[2u64]);
facet_reader.facet_ords(1u32, &mut facet_ords);
assert!(facet_ords.is_empty());
Ok(())
}
#[test]
fn test_facet_not_populated_for_any_docs() -> crate::Result<()> {
let mut schema_builder = SchemaBuilder::default();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
index_writer.add_document(Document::default());
index_writer.add_document(Document::default());
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let facet_reader = searcher.segment_reader(0u32).facet_reader(facet_field).unwrap();
let mut facet_ords = Vec::new();
facet_reader.facet_ords(0u32, &mut facet_ords);
assert!(facet_ords.is_empty());
facet_reader.facet_ords(1u32, &mut facet_ords);
assert!(facet_ords.is_empty());
Ok(())
}
}

View File

@@ -126,6 +126,7 @@ impl FastFieldsWriter {
for field_writer in &self.single_value_writers {
field_writer.serialize(serializer)?;
}
for field_writer in &self.multi_values_writers {
let field = field_writer.field();
field_writer.serialize(serializer, mapping.get(&field))?;

View File

@@ -19,7 +19,7 @@ pub struct FieldNormReaders {
impl FieldNormReaders {
/// Creates a field norm reader.
pub fn new(source: ReadOnlySource) -> crate::Result<FieldNormReaders> {
pub fn open(source: ReadOnlySource) -> crate::Result<FieldNormReaders> {
let data = CompositeFile::open(&source)?;
Ok(FieldNormReaders {
data: Arc::new(data),
@@ -103,11 +103,9 @@ impl FieldNormReader {
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
fieldnorm_to_id(fieldnorm)
}
}
#[cfg(test)]
impl From<&[u32]> for FieldNormReader {
fn from(field_norms: &[u32]) -> FieldNormReader {
#[cfg(test)]
pub fn for_test(field_norms: &[u32]) -> FieldNormReader {
let field_norms_id = field_norms
.iter()
.cloned()
@@ -119,3 +117,20 @@ impl From<&[u32]> for FieldNormReader {
}
}
}
#[cfg(test)]
mod tests {
use crate::fieldnorm::FieldNormReader;
#[test]
fn test_from_fieldnorms_array() {
let fieldnorms = &[1, 2, 3, 4, 1_000_000];
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
assert_eq!(fieldnorm_reader.num_docs(), 5);
assert_eq!(fieldnorm_reader.fieldnorm(0), 1);
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
assert_eq!(fieldnorm_reader.fieldnorm(2), 3);
assert_eq!(fieldnorm_reader.fieldnorm(3), 4);
assert_eq!(fieldnorm_reader.fieldnorm(4), 983_064);
}
}

View File

@@ -536,6 +536,7 @@ impl IndexWriter {
/// when no documents are remaining.
///
/// Returns the former segment_ready channel.
#[allow(unused_must_use)]
fn recreate_document_channel(&mut self) -> OperationReceiver {
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
@@ -575,7 +576,7 @@ impl IndexWriter {
//
// This will drop the document queue, and the thread
// should terminate.
mem::replace(self, new_index_writer);
*self = new_index_writer;
// Drains the document receiver pipeline :
// Workers don't need to index the pending documents.

View File

@@ -12,17 +12,18 @@ use crate::fieldnorm::FieldNormsSerializer;
use crate::fieldnorm::FieldNormsWriter;
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::indexer::SegmentSerializer;
use crate::postings::InvertedIndexSerializer;
use crate::postings::Postings;
use crate::postings::{InvertedIndexSerializer, SegmentPostings};
use crate::schema::Cardinality;
use crate::schema::FieldType;
use crate::schema::{Field, Schema};
use crate::store::StoreWriter;
use crate::termdict::TermMerger;
use crate::termdict::TermOrdinal;
use crate::{DocId, SegmentComponent};
use crate::{DocId, InvertedIndexReader, SegmentComponent};
use std::cmp;
use std::collections::HashMap;
use std::sync::Arc;
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
let mut total_tokens = 0u64;
@@ -497,7 +498,7 @@ impl IndexMerger {
) -> crate::Result<Option<TermOrdinalMapping>> {
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
let mut delta_computer = DeltaComputer::new();
let field_readers = self
let field_readers: Vec<Arc<InvertedIndexReader>> = self
.readers
.iter()
.map(|reader| reader.inverted_index(indexed_field))
@@ -563,47 +564,45 @@ impl IndexMerger {
indexed. Have you modified the schema?",
);
let mut segment_postings_containing_the_term: Vec<(usize, SegmentPostings)> = vec![];
while merged_terms.advance() {
segment_postings_containing_the_term.clear();
let term_bytes: &[u8] = merged_terms.key();
let mut total_doc_freq = 0;
// Let's compute the list of non-empty posting lists
let segment_postings: Vec<_> = merged_terms
.current_kvs()
.iter()
.flat_map(|heap_item| {
let segment_ord = heap_item.segment_ord;
let term_info = heap_item.streamer.value();
let segment_reader = &self.readers[heap_item.segment_ord];
let inverted_index = segment_reader.inverted_index(indexed_field);
let mut segment_postings = inverted_index
.read_postings_from_terminfo(term_info, segment_postings_option);
let mut doc = segment_postings.doc();
while doc != TERMINATED {
if !segment_reader.is_deleted(doc) {
return Some((segment_ord, segment_postings));
}
doc = segment_postings.advance();
}
None
})
.collect();
for heap_item in merged_terms.current_kvs() {
let segment_ord = heap_item.segment_ord;
let term_info = heap_item.streamer.value();
let segment_reader = &self.readers[heap_item.segment_ord];
let inverted_index: &InvertedIndexReader = &*field_readers[segment_ord];
let segment_postings =
inverted_index.read_postings_from_terminfo(term_info, segment_postings_option);
let delete_bitset_opt = segment_reader.delete_bitset();
let doc_freq = if let Some(delete_bitset) = delete_bitset_opt {
segment_postings.doc_freq_given_deletes(delete_bitset)
} else {
segment_postings.doc_freq()
};
if doc_freq > 0u32 {
total_doc_freq += doc_freq;
segment_postings_containing_the_term.push((segment_ord, segment_postings));
}
}
// At this point, `segment_postings` contains the posting list
// of all of the segments containing the given term.
// of all of the segments containing the given term (and that are non-empty)
//
// These segments are non-empty and advance has already been called.
if segment_postings.is_empty() {
if total_doc_freq == 0u32 {
// All docs that used to contain the term have been deleted. The `term` will be
// entirely removed.
continue;
}
// If not, the `term` will be entirely removed.
// We know that there is at least one document containing
// the term, so we add it.
let term_doc_freq = segment_postings
.iter()
.map(|(_, segment_posting)| segment_posting.doc_freq())
.sum();
let to_term_ord = field_serializer.new_term(term_bytes, term_doc_freq)?;
let to_term_ord = field_serializer.new_term(term_bytes, total_doc_freq)?;
if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt {
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
@@ -613,7 +612,9 @@ impl IndexMerger {
// We can now serialize this postings, by pushing each document to the
// postings serializer.
for (segment_ord, mut segment_postings) in segment_postings {
for (segment_ord, mut segment_postings) in
segment_postings_containing_the_term.drain(..)
{
let old_to_new_doc_id = &merged_doc_id_map[segment_ord];
let mut doc = segment_postings.doc();
@@ -686,7 +687,7 @@ impl SerializableSegment for IndexMerger {
let fieldnorm_data = serializer
.segment()
.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::new(fieldnorm_data)?;
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
let term_ord_mappings =
self.write_postings(serializer.get_postings_serializer(), fieldnorm_readers)?;
self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?;
@@ -1558,7 +1559,7 @@ mod tests {
let searcher = reader.searcher();
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(searcher.segment_reader(0u32), 1.0f32)?;
.specialized_scorer(searcher.segment_reader(0u32), 1.0)?;
assert_eq!(term_scorer.doc(), 0);
assert_nearly_equals!(term_scorer.block_max_score(), 0.0079681855);
assert_nearly_equals!(term_scorer.score(), 0.0079681855);
@@ -1573,7 +1574,7 @@ mod tests {
for segment_reader in searcher.segment_readers() {
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(segment_reader, 1.0f32)?;
.specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
@@ -1597,7 +1598,7 @@ mod tests {
let segment_reader = searcher.segment_reader(0u32);
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(segment_reader, 1.0f32)?;
.specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);

View File

@@ -130,15 +130,14 @@ fn merge(
// An IndexMerger is like a "view" of our merged segments.
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
let merged_segment_id = merged_segment.id();
// ... we just serialize this index merger in our new segment to merge the two segments.
let segment_serializer = SegmentSerializer::for_segment(merged_segment)?;
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone())?;
let num_docs = merger.write(segment_serializer)?;
let segment_meta = index.new_segment_meta(merged_segment_id, num_docs);
let merged_segment_id = merged_segment.id();
let segment_meta = index.new_segment_meta(merged_segment_id, num_docs);
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
}

View File

@@ -151,7 +151,7 @@ impl SegmentWriter {
if let Some(unordered_term_id) = unordered_term_id_opt {
self.fast_field_writers
.get_multivalue_writer(field)
.expect("multified writer for facet missing")
.expect("writer for facet missing")
.add_val(unordered_term_id);
}
}
@@ -287,7 +287,7 @@ fn write(
let fieldnorm_data = serializer
.segment()
.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::new(fieldnorm_data)?;
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
let term_ord_map =
multifield_postings.serialize(serializer.get_postings_serializer(), fieldnorm_readers)?;
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;

View File

@@ -245,11 +245,18 @@ pub type DocId = u32;
/// with opstamp `n+1`.
pub type Opstamp = u64;
/// A f32 that represents the relevance of the document to the query
/// A Score that represents the relevance of the document to the query
///
/// This is modelled internally as a `f32`. The
/// larger the number, the more relevant the document
/// to the search
/// This is modelled internally as a `f64`, because tantivy was compiled with the `scoref64`
/// feature. The larger the number, the more relevant the document is to the search query.
#[cfg(feature = "scoref64")]
pub type Score = f64;
/// A Score that represents the relevance of the document to the query
///
/// This is modelled internally as a `f32`. The larger the number, the more relevant
/// the document to the search query.
#[cfg(not(feature = "scoref64"))]
pub type Score = f32;
/// A `SegmentLocalId` identifies a segment.
@@ -282,7 +289,6 @@ pub struct DocAddress(pub SegmentLocalId, pub DocId);
#[cfg(test)]
mod tests {
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::core::SegmentReader;
use crate::docset::{DocSet, TERMINATED};

View File

@@ -78,6 +78,7 @@ impl Positions {
}
}
#[derive(Clone)]
pub struct PositionReader {
skip_read: OwnedRead,
position_read: OwnedRead,

View File

@@ -9,9 +9,9 @@ use crate::query::BM25Weight;
use crate::schema::IndexRecordOption;
use crate::{DocId, Score, TERMINATED};
fn max_f32<I: Iterator<Item = f32>>(mut it: I) -> Option<f32> {
fn max_score<I: Iterator<Item = Score>>(mut it: I) -> Option<Score> {
if let Some(first) = it.next() {
Some(it.fold(first, f32::max))
Some(it.fold(first, Score::max))
} else {
None
}
@@ -24,6 +24,7 @@ fn max_f32<I: Iterator<Item = f32>>(mut it: I) -> Option<f32> {
///
/// While it is useful for some very specific high-performance
/// use cases, you should prefer using `SegmentPostings` for most usage.
#[derive(Clone)]
pub struct BlockSegmentPostings {
pub(crate) doc_decoder: BlockDecoder,
loaded_offset: usize,
@@ -58,10 +59,14 @@ fn decode_vint_block(
doc_offset: DocId,
num_vint_docs: usize,
) {
doc_decoder.fill(TERMINATED);
let num_consumed_bytes = doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs);
let num_consumed_bytes =
doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs, TERMINATED);
if let Some(freq_decoder) = freq_decoder_opt {
freq_decoder.uncompress_vint_unsorted(&data[num_consumed_bytes..], num_vint_docs);
freq_decoder.uncompress_vint_unsorted(
&data[num_consumed_bytes..],
num_vint_docs,
TERMINATED,
);
}
}
@@ -124,27 +129,33 @@ impl BlockSegmentPostings {
fieldnorm_reader: &FieldNormReader,
bm25_weight: &BM25Weight,
) -> Score {
let (block_max_score_cache, skip_reader, doc_decoder, freq_decoder) = (
&mut self.block_max_score_cache,
&self.skip_reader,
&self.doc_decoder,
&self.freq_decoder,
);
*block_max_score_cache.get_or_insert_with(|| {
skip_reader
.block_max_score(bm25_weight)
.or_else(|| {
let docs = doc_decoder.output_array();
let freqs = freq_decoder.output_array();
max_f32(docs.iter().cloned().zip(freqs.iter().cloned()).map(
|(doc, term_freq)| {
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
bm25_weight.score(fieldnorm_id, term_freq)
},
))
})
.unwrap_or(0f32)
})
if let Some(score) = self.block_max_score_cache {
return score;
}
if let Some(skip_reader_max_score) = self.skip_reader.block_max_score(bm25_weight) {
// if we are on a full block, the skip reader should have the block max information
// for us
self.block_max_score_cache = Some(skip_reader_max_score);
return skip_reader_max_score;
}
// this is the last block of the segment posting list.
// If it is actually loaded, we can compute block max manually.
if self.block_is_loaded() {
let docs = self.doc_decoder.output_array().iter().cloned();
let freqs = self.freq_decoder.output_array().iter().cloned();
let bm25_scores = docs.zip(freqs).map(|(doc, term_freq)| {
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
bm25_weight.score(fieldnorm_id, term_freq)
});
let block_max_score = max_score(bm25_scores).unwrap_or(0.0);
self.block_max_score_cache = Some(block_max_score);
return block_max_score;
}
// We do not have access to any good block max value. We return bm25_weight.max_score()
// as it is a valid upperbound.
//
// We do not cache it however, so that it gets computed when once block is loaded.
bm25_weight.max_score()
}
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
@@ -164,6 +175,7 @@ impl BlockSegmentPostings {
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: ReadOnlySource) {
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
self.data = ReadOnlySource::new(postings_data);
self.block_max_score_cache = None;
self.loaded_offset = std::usize::MAX;
if let Some(skip_data) = skip_data_opt {
self.skip_reader.reset(skip_data, doc_freq);
@@ -242,7 +254,7 @@ impl BlockSegmentPostings {
/// If all docs are smaller than target, the block loaded may be empty,
/// or be the last an incomplete VInt block.
pub fn seek(&mut self, target_doc: DocId) {
self.skip_reader.seek(target_doc);
self.shallow_seek(target_doc);
self.load_block();
}
@@ -257,7 +269,9 @@ impl BlockSegmentPostings {
/// If all docs are smaller than target, the block loaded may be empty,
/// or be the last an incomplete VInt block.
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
self.skip_reader.seek(target_doc);
if self.skip_reader.seek(target_doc) {
self.block_max_score_cache = None;
}
}
pub(crate) fn block_is_loaded(&self) -> bool {
@@ -265,7 +279,6 @@ impl BlockSegmentPostings {
}
pub(crate) fn load_block(&mut self) {
self.block_max_score_cache = None;
let offset = self.skip_reader.byte_offset();
if self.loaded_offset == offset {
return;
@@ -318,6 +331,7 @@ impl BlockSegmentPostings {
/// Returns false iff there was no remaining blocks.
pub fn advance(&mut self) {
self.skip_reader.advance();
self.block_max_score_cache = None;
self.load_block();
}
@@ -460,21 +474,19 @@ mod tests {
}
#[test]
fn test_block_segment_postings_seek2() {
fn test_block_segment_postings_seek() {
let mut docs = vec![0];
for i in 0..1300 {
docs.push((i * i / 100) + i);
}
let mut block_postings = build_block_postings(&docs[..]);
for i in vec![0, 424, 10000] {
block_postings.shallow_seek(i);
block_postings.load_block();
block_postings.seek(i);
let docs = block_postings.docs();
assert!(docs[0] <= i);
assert!(docs.last().cloned().unwrap_or(0u32) >= i);
}
block_postings.shallow_seek(100_000);
block_postings.load_block();
block_postings.seek(100_000);
assert_eq!(block_postings.doc(COMPRESSION_BLOCK_SIZE - 1), TERMINATED);
}

View File

@@ -1,5 +1,4 @@
use crate::common::FixedSize;
use crate::docset::TERMINATED;
use bitpacking::{BitPacker, BitPacker4x};
pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
@@ -53,8 +52,10 @@ impl BlockEncoder {
/// We ensure that the OutputBuffer is align on 128 bits
/// in order to run SSE2 linear search on it.
#[repr(align(128))]
#[derive(Clone)]
pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
#[derive(Clone)]
pub struct BlockDecoder {
bitpacker: BitPacker4x,
output: AlignedBuffer,
@@ -107,10 +108,6 @@ impl BlockDecoder {
pub fn output(&self, idx: usize) -> u32 {
self.output.0[idx]
}
pub fn fill(&mut self, val: u32) {
self.output.0.iter_mut().for_each(|el| *el = val);
}
}
pub trait VIntEncoder {
@@ -147,11 +144,14 @@ pub trait VIntDecoder {
/// For instance, if delta encoded are `1, 3, 9`, and the
/// `offset` is 5, then the output will be:
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
///
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
fn uncompress_vint_sorted(
&mut self,
compressed_data: &[u8],
offset: u32,
num_els: usize,
padding: u32,
) -> usize;
/// Uncompress an array of `u32s`, compressed using variable
@@ -159,7 +159,14 @@ pub trait VIntDecoder {
///
/// The method takes a number of int to decompress, and returns
/// the amount of bytes that were read to decompress them.
fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize;
///
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
fn uncompress_vint_unsorted(
&mut self,
compressed_data: &[u8],
num_els: usize,
padding: u32,
) -> usize;
}
impl VIntEncoder for BlockEncoder {
@@ -178,13 +185,21 @@ impl VIntDecoder for BlockDecoder {
compressed_data: &[u8],
offset: u32,
num_els: usize,
padding: u32,
) -> usize {
self.output_len = num_els;
self.output.0.iter_mut().for_each(|el| *el = padding);
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
}
fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize {
fn uncompress_vint_unsorted(
&mut self,
compressed_data: &[u8],
num_els: usize,
padding: u32,
) -> usize {
self.output_len = num_els;
self.output.0.iter_mut().for_each(|el| *el = padding);
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
}
}
@@ -193,6 +208,7 @@ impl VIntDecoder for BlockDecoder {
pub mod tests {
use super::*;
use crate::TERMINATED;
#[test]
fn test_encode_sorted_block() {
@@ -271,18 +287,20 @@ pub mod tests {
}
#[test]
fn test_encode_vint() {
{
let expected_length = 154;
let mut encoder = BlockEncoder::new();
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
for offset in &[0u32, 1u32, 2u32] {
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
assert!(encoded_data.len() <= expected_length);
let mut decoder = BlockDecoder::default();
let consumed_num_bytes =
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
assert_eq!(consumed_num_bytes, encoded_data.len());
assert_eq!(input, decoder.output_array());
const PADDING_VALUE: u32 = 234_234_345u32;
let expected_length = 154;
let mut encoder = BlockEncoder::new();
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
for offset in &[0u32, 1u32, 2u32] {
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
assert!(encoded_data.len() <= expected_length);
let mut decoder = BlockDecoder::default();
let consumed_num_bytes =
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len(), PADDING_VALUE);
assert_eq!(consumed_num_bytes, encoded_data.len());
assert_eq!(input, decoder.output_array());
for i in input.len()..COMPRESSION_BLOCK_SIZE {
assert_eq!(decoder.output(i), PADDING_VALUE);
}
}
}

View File

@@ -65,47 +65,42 @@ pub mod tests {
use std::iter;
#[test]
pub fn test_position_write() {
pub fn test_position_write() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut segment = index.new_segment();
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
{
let mut field_serializer = posting_serializer
.new_field(text_field, 120 * 4, None)
.unwrap();
field_serializer.new_term("abc".as_bytes(), 12u32).unwrap();
for doc_id in 0u32..120u32 {
let delta_positions = vec![1, 2, 3, 2];
field_serializer
.write_doc(doc_id, 4, &delta_positions)
.unwrap();
}
field_serializer.close_term().unwrap();
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment)?;
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4, None)?;
field_serializer.new_term("abc".as_bytes(), 12u32)?;
for doc_id in 0u32..120u32 {
let delta_positions = vec![1, 2, 3, 2];
field_serializer.write_doc(doc_id, 4, &delta_positions)?;
}
posting_serializer.close().unwrap();
let read = segment.open_read(SegmentComponent::POSITIONS).unwrap();
field_serializer.close_term()?;
posting_serializer.close()?;
let read = segment.open_read(SegmentComponent::POSITIONS)?;
assert!(read.len() <= 140);
Ok(())
}
#[test]
pub fn test_skip_positions() {
pub fn test_skip_positions() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 30_000_000).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 30_000_000)?;
index_writer.add_document(doc!(title => r#"abc abc abc"#));
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
for _ in 0..1_000 {
index_writer.add_document(doc!(title => r#"abc abc abc"#));
}
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
index_writer.commit().unwrap();
index_writer.commit()?;
let searcher = index.reader().unwrap().searcher();
let searcher = index.reader()?.searcher();
let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
let term = Term::from_field_text(title, "abc");
let mut positions = Vec::new();
@@ -160,6 +155,7 @@ pub mod tests {
postings.positions(&mut positions);
assert_eq!(&[0, 5], &positions[..]);
}
Ok(())
}
#[test]

View File

@@ -75,7 +75,9 @@ pub(crate) trait Recorder: Copy + 'static {
serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena,
) -> io::Result<()>;
/// Returns the number of document containg this term.
/// Returns the number of document containing this term.
///
/// Returns `None` if not available.
fn term_doc_freq(&self) -> Option<u32>;
}

View File

@@ -10,10 +10,10 @@ use crate::postings::BlockSearcher;
use crate::postings::Postings;
use crate::schema::IndexRecordOption;
use crate::DocId;
use crate::{DocId, TERMINATED};
use crate::directory::ReadOnlySource;
use crate::fieldnorm::FieldNormReader;
use crate::fastfield::DeleteBitSet;
use crate::postings::BlockSegmentPostings;
/// `SegmentPostings` represents the inverted list or postings associated to
@@ -21,6 +21,7 @@ use crate::postings::BlockSegmentPostings;
///
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
/// Positions on the other hand, are optionally entirely decoded upfront.
#[derive(Clone)]
pub struct SegmentPostings {
pub(crate) block_cursor: BlockSegmentPostings,
cur: usize,
@@ -39,6 +40,25 @@ impl SegmentPostings {
}
}
/// Compute the number of non-deleted documents.
///
/// This method will clone and scan through the posting lists.
/// (this is a rather expensive operation).
pub fn doc_freq_given_deletes(&self, delete_bitset: &DeleteBitSet) -> u32 {
let mut docset = self.clone();
let mut doc_freq = 0;
loop {
let doc = docset.doc();
if doc == TERMINATED {
return doc_freq;
}
if delete_bitset.is_alive(doc) {
doc_freq += 1u32;
}
docset.advance();
}
}
/// Returns the overall number of documents in the block postings.
/// It does not take in account whether documents are deleted or not.
pub fn doc_freq(&self) -> u32 {
@@ -56,7 +76,8 @@ impl SegmentPostings {
pub fn create_from_docs(docs: &[u32]) -> SegmentPostings {
let mut buffer = Vec::new();
{
let mut postings_serializer = PostingsSerializer::new(&mut buffer, false, false, None);
let mut postings_serializer =
PostingsSerializer::new(&mut buffer, 0.0, false, false, None);
postings_serializer.new_term(docs.len() as u32);
for &doc in docs {
postings_serializer.write_doc(doc, 1u32);
@@ -75,26 +96,48 @@ impl SegmentPostings {
}
/// Helper functions to create `SegmentPostings` for tests.
#[cfg(test)]
pub fn create_from_docs_and_tfs(
doc_and_tfs: &[(u32, u32)],
fieldnorm_reader: Option<FieldNormReader>,
) -> crate::Result<SegmentPostings> {
let mut buffer = Vec::new();
let mut postings_serializer =
PostingsSerializer::new(&mut buffer, true, false, fieldnorm_reader);
fieldnorms: Option<&[u32]>,
) -> SegmentPostings {
use crate::fieldnorm::FieldNormReader;
use crate::Score;
let mut buffer: Vec<u8> = Vec::new();
let fieldnorm_reader = fieldnorms.map(FieldNormReader::for_test);
let average_field_norm = fieldnorms
.map(|fieldnorms| {
if fieldnorms.len() == 0 {
return 0.0;
}
let total_num_tokens: u64 = fieldnorms
.iter()
.map(|&fieldnorm| fieldnorm as u64)
.sum::<u64>();
total_num_tokens as Score / fieldnorms.len() as f32
})
.unwrap_or(0.0);
let mut postings_serializer = PostingsSerializer::new(
&mut buffer,
average_field_norm,
true,
false,
fieldnorm_reader,
);
postings_serializer.new_term(doc_and_tfs.len() as u32);
for &(doc, tf) in doc_and_tfs {
postings_serializer.write_doc(doc, tf);
}
postings_serializer
.close_term(doc_and_tfs.len() as u32)?;
.close_term(doc_and_tfs.len() as u32)
.unwrap();
let block_segment_postings = BlockSegmentPostings::from_data(
doc_and_tfs.len() as u32,
ReadOnlySource::from(buffer),
IndexRecordOption::WithFreqs,
IndexRecordOption::WithFreqs,
);
Ok(SegmentPostings::from_block_postings(block_segment_postings, None))
SegmentPostings::from_block_postings(block_segment_postings, None)
}
/// Reads a Segment postings from an &[u8]
@@ -121,7 +164,7 @@ impl DocSet for SegmentPostings {
// next needs to be called a first time to point to the correct element.
#[inline]
fn advance(&mut self) -> DocId {
assert!(self.block_cursor.block_is_loaded());
debug_assert!(self.block_cursor.block_is_loaded());
if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
self.cur = 0;
self.block_cursor.advance();
@@ -228,6 +271,7 @@ mod tests {
use crate::common::HasLen;
use crate::docset::{DocSet, TERMINATED};
use crate::fastfield::DeleteBitSet;
use crate::postings::postings::Postings;
#[test]
@@ -250,4 +294,14 @@ mod tests {
let postings = SegmentPostings::empty();
assert_eq!(postings.term_freq(), 1);
}
#[test]
fn test_doc_freq() {
let docs = SegmentPostings::create_from_docs(&[0, 2, 10]);
assert_eq!(docs.doc_freq(), 3);
let delete_bitset = DeleteBitSet::for_test(&[2], 12);
assert_eq!(docs.doc_freq_given_deletes(&delete_bitset), 2);
let all_deleted = DeleteBitSet::for_test(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 12);
assert_eq!(docs.doc_freq_given_deletes(&all_deleted), 0);
}
}

View File

@@ -11,7 +11,7 @@ use crate::query::BM25Weight;
use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::DocId;
use crate::{DocId, Score};
use std::cmp::Ordering;
use std::io::{self, Write};
@@ -97,12 +97,12 @@ impl InvertedIndexSerializer {
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
let term_dictionary_write = self.terms_write.for_field(field);
let postings_write = self.postings_write.for_field(field);
total_num_tokens.serialize(postings_write)?;
let positions_write = self.positions_write.for_field(field);
let positionsidx_write = self.positionsidx_write.for_field(field);
let field_type: FieldType = (*field_entry.field_type()).clone();
FieldSerializer::create(
&field_type,
total_num_tokens,
term_dictionary_write,
postings_write,
positions_write,
@@ -135,12 +135,14 @@ pub struct FieldSerializer<'a> {
impl<'a> FieldSerializer<'a> {
fn create(
field_type: &FieldType,
total_num_tokens: u64,
term_dictionary_write: &'a mut CountingWriter<WritePtr>,
postings_write: &'a mut CountingWriter<WritePtr>,
positions_write: &'a mut CountingWriter<WritePtr>,
positionsidx_write: &'a mut CountingWriter<WritePtr>,
fieldnorm_reader: Option<FieldNormReader>,
) -> io::Result<FieldSerializer<'a>> {
total_num_tokens.serialize(postings_write)?;
let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
FieldType::Str(ref text_options) => {
if let Some(text_indexing_options) = text_options.get_indexing_options() {
@@ -153,8 +155,13 @@ impl<'a> FieldSerializer<'a> {
_ => (false, false),
};
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
let average_fieldnorm = fieldnorm_reader
.as_ref()
.map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score))
.unwrap_or(0.0);
let postings_serializer = PostingsSerializer::new(
postings_write,
average_fieldnorm,
term_freq_enabled,
position_enabled,
fieldnorm_reader,
@@ -197,6 +204,7 @@ impl<'a> FieldSerializer<'a> {
!self.term_open,
"Called new_term, while the previous term was not closed."
);
self.term_open = true;
self.postings_serializer.clear();
self.current_term_info = self.current_term_info();
@@ -322,29 +330,18 @@ pub struct PostingsSerializer<W: Write> {
bm25_weight: Option<BM25Weight>,
num_docs: u32, // Number of docs in the segment
avg_fieldnorm: f32, // Average number of term in the field for that segment.
avg_fieldnorm: Score, // Average number of term in the field for that segment.
// this value is used to compute the block wand information.
}
fn get_avg_fieldnorm(fieldnorm_reader: &FieldNormReader) -> f32 {
let num_docs = fieldnorm_reader.num_docs();
let sum_fieldnorm: f32 = (0u32..num_docs)
.map(|doc| fieldnorm_reader.fieldnorm(doc) as f32)
.sum();
sum_fieldnorm / (num_docs as f32)
}
impl<W: Write> PostingsSerializer<W> {
pub fn new(
write: W,
avg_fieldnorm: Score,
termfreq_enabled: bool,
termfreq_sum_enabled: bool,
fieldnorm_reader: Option<FieldNormReader>,
) -> PostingsSerializer<W> {
let avg_fieldnorm: f32 = fieldnorm_reader
.as_ref()
.map(get_avg_fieldnorm)
.unwrap_or(0f32);
let num_docs = fieldnorm_reader
.as_ref()
.map(|fieldnorm_reader| fieldnorm_reader.num_docs())
@@ -403,16 +400,14 @@ impl<W: Write> PostingsSerializer<W> {
let sum_freq = self.block.term_freqs().iter().cloned().sum();
self.skip_write.write_total_term_freq(sum_freq);
}
let mut blockwand_params_opt = None;
let mut blockwand_params = (0u8, 0u32);
if let Some(bm25_weight) = self.bm25_weight.as_ref() {
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
let docs = self.block.doc_ids();
let term_freqs = self.block.term_freqs();
blockwand_params_opt = docs
.iter()
.cloned()
.map(|doc| fieldnorm_reader.fieldnorm_id(doc))
.zip(term_freqs.iter().cloned())
let docs = self.block.doc_ids().iter().cloned();
let term_freqs = self.block.term_freqs().iter().cloned();
let fieldnorms = docs.map(|doc| fieldnorm_reader.fieldnorm_id(doc));
blockwand_params = fieldnorms
.zip(term_freqs)
.max_by(
|(left_fieldnorm_id, left_term_freq),
(right_fieldnorm_id, right_term_freq)| {
@@ -424,10 +419,11 @@ impl<W: Write> PostingsSerializer<W> {
.partial_cmp(&right_score)
.unwrap_or(Ordering::Equal)
},
);
)
.unwrap();
}
}
let (fieldnorm_id, term_freq) = blockwand_params_opt.unwrap_or((0u8, 0u32));
let (fieldnorm_id, term_freq) = blockwand_params;
self.skip_write.write_blockwand_max(fieldnorm_id, term_freq);
}
self.block.clear();

View File

@@ -1,4 +1,4 @@
use crate::common::{BinarySerializable, VInt};
use crate::common::{read_u32_vint_no_advance, serialize_vint_u32, BinarySerializable, VInt};
use crate::directory::ReadOnlySource;
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
use crate::query::BM25Weight;
@@ -43,7 +43,9 @@ impl SkipSerializer {
pub fn write_blockwand_max(&mut self, fieldnorm_id: u8, term_freq: u32) {
self.buffer.push(fieldnorm_id);
VInt(term_freq as u64).serialize_into_vec(&mut self.buffer);
let mut buf = [0u8; 8];
let bytes = serialize_vint_u32(term_freq, &mut buf);
self.buffer.extend_from_slice(bytes);
}
pub fn data(&self) -> &[u8] {
@@ -56,6 +58,7 @@ impl SkipSerializer {
}
}
#[derive(Clone)]
pub(crate) struct SkipReader {
last_doc_in_block: DocId,
pub(crate) last_doc_in_previous_block: DocId,
@@ -69,7 +72,7 @@ pub(crate) struct SkipReader {
position_offset: u64,
}
#[derive(Clone, Copy, Debug)]
#[derive(Clone, Eq, PartialEq, Copy, Debug)]
pub(crate) enum BlockInfo {
BitPacked {
doc_num_bits: u8,
@@ -128,6 +131,10 @@ impl SkipReader {
}
}
// Returns the block max score for this block if available.
//
// The block max score is available for all full bitpacked block,
// but no available for the last VInt encoded incomplete block.
pub fn block_max_score(&self, bm25_weight: &BM25Weight) -> Option<Score> {
match self.block_info {
BlockInfo::BitPacked {
@@ -169,9 +176,9 @@ impl SkipReader {
IndexRecordOption::WithFreqs => {
let tf_num_bits = self.owned_read.get(1);
let block_wand_fieldnorm_id = self.owned_read.get(2);
self.owned_read.advance(3);
let block_wand_term_freq =
VInt::deserialize_u64(&mut self.owned_read).unwrap() as u32;
let data = &self.owned_read.as_ref()[3..];
let (block_wand_term_freq, num_bytes) = read_u32_vint_no_advance(data);
self.owned_read.advance(3 + num_bytes);
self.block_info = BlockInfo::BitPacked {
doc_num_bits,
tf_num_bits,
@@ -207,9 +214,15 @@ impl SkipReader {
///
/// If the target is larger than all documents, the skip_reader
/// then advance to the last Variable In block.
pub fn seek(&mut self, target: DocId) {
while self.last_doc_in_block() < target {
pub fn seek(&mut self, target: DocId) -> bool {
if self.last_doc_in_block() >= target {
return false;
}
loop {
self.advance();
if self.last_doc_in_block() >= target {
return true;
}
}
}
@@ -225,7 +238,7 @@ impl SkipReader {
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
self.position_offset += tf_sum as u64;
}
BlockInfo::VInt { num_docs} => {
BlockInfo::VInt { num_docs } => {
debug_assert_eq!(num_docs, self.remaining_docs);
self.remaining_docs = 0;
self.byte_offset = std::usize::MAX;
@@ -236,7 +249,9 @@ impl SkipReader {
self.read_block_info();
} else {
self.last_doc_in_block = TERMINATED;
self.block_info = BlockInfo::VInt { num_docs: self.remaining_docs };
self.block_info = BlockInfo::VInt {
num_docs: self.remaining_docs,
};
}
}
}
@@ -269,7 +284,7 @@ mod tests {
IndexRecordOption::WithFreqs,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert!(matches!(
assert_eq!(
skip_reader.block_info,
BlockInfo::BitPacked {
doc_num_bits: 2u8,
@@ -278,10 +293,10 @@ mod tests {
block_wand_fieldnorm_id: 13,
block_wand_term_freq: 3
}
));
);
skip_reader.advance();
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
assert!(matches!(
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 5u8,
@@ -290,13 +305,13 @@ mod tests {
block_wand_fieldnorm_id: 8,
block_wand_term_freq: 2
}
));
);
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
}
#[test]
@@ -314,7 +329,7 @@ mod tests {
IndexRecordOption::Basic,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert!(matches!(
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 2u8,
@@ -323,10 +338,10 @@ mod tests {
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
}
));
);
skip_reader.advance();
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
assert!(matches!(
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 5u8,
@@ -335,13 +350,13 @@ mod tests {
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
}
));
);
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
}
#[test]
@@ -358,7 +373,7 @@ mod tests {
IndexRecordOption::Basic,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert!(matches!(
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 2u8,
@@ -367,8 +382,8 @@ mod tests {
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
}
));
);
skip_reader.advance();
assert!(matches!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }));
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
}
}

View File

@@ -9,7 +9,7 @@ use crate::Score;
/// Query that matches all of the documents.
///
/// All of the document get the score 1f32.
/// All of the document get the score 1.0.
#[derive(Clone, Debug)]
pub struct AllQuery;
@@ -23,7 +23,7 @@ impl Query for AllQuery {
pub struct AllWeight;
impl Weight for AllWeight {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let all_scorer = AllScorer {
doc: 0u32,
max_doc: reader.max_doc(),
@@ -35,7 +35,7 @@ impl Weight for AllWeight {
if doc >= reader.max_doc() {
return Err(does_not_match(doc));
}
Ok(Explanation::new("AllQuery", 1f32))
Ok(Explanation::new("AllQuery", 1.0))
}
}
@@ -66,7 +66,7 @@ impl DocSet for AllScorer {
impl Scorer for AllScorer {
fn score(&mut self) -> Score {
1f32
1.0
}
}
@@ -100,7 +100,7 @@ mod tests {
let weight = AllQuery.weight(&searcher, false).unwrap();
{
let reader = searcher.segment_reader(0);
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
let mut scorer = weight.scorer(reader, 1.0).unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.advance(), 1u32);
assert_eq!(scorer.doc(), 1u32);
@@ -108,7 +108,7 @@ mod tests {
}
{
let reader = searcher.segment_reader(1);
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
let mut scorer = weight.scorer(reader, 1.0).unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.advance(), TERMINATED);
}
@@ -122,14 +122,14 @@ mod tests {
let weight = AllQuery.weight(&searcher, false).unwrap();
let reader = searcher.segment_reader(0);
{
let mut scorer = weight.scorer(reader, 2.0f32).unwrap();
let mut scorer = weight.scorer(reader, 2.0).unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 2.0f32);
assert_eq!(scorer.score(), 2.0);
}
{
let mut scorer = weight.scorer(reader, 1.5f32).unwrap();
let mut scorer = weight.scorer(reader, 1.5).unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.5f32);
assert_eq!(scorer.score(), 1.5);
}
}
}

View File

@@ -5,9 +5,9 @@ use crate::query::{BitSetDocSet, Explanation};
use crate::query::{Scorer, Weight};
use crate::schema::{Field, IndexRecordOption};
use crate::termdict::{TermDictionary, TermStreamer};
use crate::DocId;
use crate::Result;
use crate::TantivyError;
use crate::{DocId, Score};
use std::sync::Arc;
use tantivy_fst::Automaton;
@@ -40,7 +40,7 @@ impl<A> Weight for AutomatonWeight<A>
where
A: Automaton + Send + Sync + 'static,
{
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc);
let inverted_index = reader.inverted_index(self.field);
@@ -67,9 +67,9 @@ where
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0f32)?;
let mut scorer = self.scorer(reader, 1.0)?;
if scorer.seek(doc) == doc {
Ok(Explanation::new("AutomatonScorer", 1.0f32))
Ok(Explanation::new("AutomatonScorer", 1.0))
} else {
Err(TantivyError::InvalidArgument(
"Document does not exist".to_string(),
@@ -144,13 +144,13 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let mut scorer = automaton_weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.scorer(searcher.segment_reader(0u32), 1.0)
.unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.0f32);
assert_eq!(scorer.score(), 1.0);
assert_eq!(scorer.advance(), 2u32);
assert_eq!(scorer.doc(), 2u32);
assert_eq!(scorer.score(), 1.0f32);
assert_eq!(scorer.score(), 1.0);
assert_eq!(scorer.advance(), TERMINATED);
}
@@ -162,9 +162,9 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let mut scorer = automaton_weight
.scorer(searcher.segment_reader(0u32), 1.32f32)
.scorer(searcher.segment_reader(0u32), 1.32)
.unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.32f32);
assert_eq!(scorer.score(), 1.32);
}
}

View File

@@ -61,21 +61,23 @@ impl DocSet for BitSetDocSet {
}
fn seek(&mut self, target: DocId) -> DocId {
if target >= self.docs.max_value() {
self.doc = TERMINATED;
return TERMINATED;
}
let target_bucket = target / 64u32;
// Mask for all of the bits greater or equal
// to our target document.
if target_bucket > self.cursor_bucket {
self.go_to_bucket(target_bucket);
let greater_filter: TinySet = TinySet::range_greater_or_equal(target);
self.cursor_tinybitset = self.cursor_tinybitset.intersect(greater_filter);
self.advance();
self.advance()
} else {
let mut doc = self.doc();
while doc < target {
doc = self.advance();
}
doc
}
let mut doc = self.doc();
while doc < target {
doc = self.advance();
}
doc
}
/// Returns the current document
@@ -114,6 +116,13 @@ mod tests {
assert_eq!(empty.advance(), TERMINATED)
}
#[test]
fn test_seek_terminated() {
let bitset = BitSet::with_max_value(1000);
let mut empty = BitSetDocSet::from(bitset);
assert_eq!(empty.seek(TERMINATED), TERMINATED)
}
fn test_go_through_sequential(docs: &[DocId]) {
let mut docset = create_docbitset(docs, 1_000u32);
for &doc in docs {

View File

@@ -6,21 +6,21 @@ use crate::Term;
use serde::Deserialize;
use serde::Serialize;
const K1: f32 = 1.2;
const B: f32 = 0.75;
const K1: Score = 1.2;
const B: Score = 0.75;
fn idf(doc_freq: u64, doc_count: u64) -> f32 {
fn idf(doc_freq: u64, doc_count: u64) -> Score {
assert!(doc_count >= doc_freq, "{} >= {}", doc_count, doc_freq);
let x = ((doc_count - doc_freq) as f32 + 0.5) / (doc_freq as f32 + 0.5);
(1f32 + x).ln()
let x = ((doc_count - doc_freq) as Score + 0.5) / (doc_freq as Score + 0.5);
(1.0 + x).ln()
}
fn cached_tf_component(fieldnorm: u32, average_fieldnorm: f32) -> f32 {
K1 * (1f32 - B + B * fieldnorm as f32 / average_fieldnorm)
fn cached_tf_component(fieldnorm: u32, average_fieldnorm: Score) -> Score {
K1 * (1.0 - B + B * fieldnorm as Score / average_fieldnorm)
}
fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
let mut cache = [0f32; 256];
fn compute_tf_cache(average_fieldnorm: Score) -> [Score; 256] {
let mut cache: [Score; 256] = [0.0; 256];
for (fieldnorm_id, cache_mut) in cache.iter_mut().enumerate() {
let fieldnorm = FieldNormReader::id_to_fieldnorm(fieldnorm_id as u8);
*cache_mut = cached_tf_component(fieldnorm, average_fieldnorm);
@@ -30,19 +30,20 @@ fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct BM25Params {
pub idf: f32,
pub avg_fieldnorm: f32,
pub idf: Score,
pub avg_fieldnorm: Score,
}
#[derive(Clone)]
pub struct BM25Weight {
idf_explain: Explanation,
weight: f32,
cache: [f32; 256],
average_fieldnorm: f32,
weight: Score,
cache: [Score; 256],
average_fieldnorm: Score,
}
impl BM25Weight {
pub fn boost_by(&self, boost: f32) -> BM25Weight {
pub fn boost_by(&self, boost: Score) -> BM25Weight {
BM25Weight {
idf_explain: self.idf_explain.clone(),
weight: self.weight * boost,
@@ -69,7 +70,7 @@ impl BM25Weight {
total_num_tokens += inverted_index.total_num_tokens();
total_num_docs += u64::from(segment_reader.max_doc());
}
let average_fieldnorm = total_num_tokens as f32 / total_num_docs as f32;
let average_fieldnorm = total_num_tokens as Score / total_num_docs as Score;
if terms.len() == 1 {
let term_doc_freq = searcher.doc_freq(&terms[0]);
@@ -81,26 +82,30 @@ impl BM25Weight {
let term_doc_freq = searcher.doc_freq(term);
idf(term_doc_freq, total_num_docs)
})
.sum::<f32>();
.sum::<Score>();
let idf_explain = Explanation::new("idf", idf);
BM25Weight::new(idf_explain, average_fieldnorm)
}
}
pub fn for_one_term(term_doc_freq: u64, total_num_docs: u64, avg_fieldnorm: f32) -> BM25Weight {
pub fn for_one_term(
term_doc_freq: u64,
total_num_docs: u64,
avg_fieldnorm: Score,
) -> BM25Weight {
let idf = idf(term_doc_freq, total_num_docs);
let mut idf_explain =
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
idf_explain.add_const(
"n, number of docs containing this term",
term_doc_freq as f32,
term_doc_freq as Score,
);
idf_explain.add_const("N, total number of docs", total_num_docs as f32);
idf_explain.add_const("N, total number of docs", total_num_docs as Score);
BM25Weight::new(idf_explain, avg_fieldnorm)
}
fn new(idf_explain: Explanation, average_fieldnorm: f32) -> BM25Weight {
let weight = idf_explain.value() * (1f32 + K1);
fn new(idf_explain: Explanation, average_fieldnorm: Score) -> BM25Weight {
let weight = idf_explain.value() * (1.0 + K1);
BM25Weight {
idf_explain,
weight,
@@ -119,8 +124,8 @@ impl BM25Weight {
}
#[inline(always)]
pub(crate) fn tf_factor(&self, fieldnorm_id: u8, term_freq: u32) -> f32 {
let term_freq = term_freq as f32;
pub(crate) fn tf_factor(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
let term_freq = term_freq as Score;
let norm = self.cache[fieldnorm_id as usize];
term_freq / (term_freq + norm)
}
@@ -131,7 +136,7 @@ impl BM25Weight {
let score = self.score(fieldnorm_id, term_freq);
let norm = self.cache[fieldnorm_id as usize];
let term_freq = term_freq as f32;
let term_freq = term_freq as Score;
let right_factor = term_freq / (term_freq + norm);
let mut tf_explanation = Explanation::new(
@@ -144,12 +149,12 @@ impl BM25Weight {
tf_explanation.add_const("b, length normalization parameter", B);
tf_explanation.add_const(
"dl, length of field",
FieldNormReader::id_to_fieldnorm(fieldnorm_id) as f32,
FieldNormReader::id_to_fieldnorm(fieldnorm_id) as Score,
);
tf_explanation.add_const("avgdl, average length of field", self.average_fieldnorm);
let mut explanation = Explanation::new("TermQuery, product of...", score);
explanation.add_detail(Explanation::new("(K1+1)", K1 + 1f32));
explanation.add_detail(Explanation::new("(K1+1)", K1 + 1.0));
explanation.add_detail(self.idf_explain.clone());
explanation.add_detail(tf_explanation);
explanation
@@ -160,10 +165,11 @@ impl BM25Weight {
mod tests {
use super::idf;
use crate::assert_nearly_equals;
use crate::{assert_nearly_equals, Score};
#[test]
fn test_idf() {
assert_nearly_equals!(idf(1, 2), std::f32::consts::LN_2);
let score: Score = 2.0;
assert_nearly_equals!(idf(1, 2), score.ln());
}
}

View File

@@ -1,8 +1,21 @@
use crate::query::term_query::TermScorer;
use crate::query::Scorer;
use crate::{DocId, DocSet, Score, TERMINATED};
use std::ops::DerefMut;
use std::ops::Deref;
use std::ops::DerefMut;
fn is_sorted<I: Iterator<Item = DocId>>(mut it: I) -> bool {
if let Some(first) = it.next() {
let mut prev = first;
for doc in it {
if doc < prev {
return false;
}
prev = doc;
}
}
true
}
/// Takes a term_scorers sorted by their current doc() and a threshold and returns
/// Returns (pivot_len, pivot_ord) defined as follows:
@@ -13,8 +26,11 @@ use std::ops::Deref;
/// We always have `before_pivot_len` < `pivot_len`.
///
/// None is returned if we establish that no document can exceed the threshold.
fn find_pivot_doc(term_scorers: &[TermScorerWithMaxScore], threshold: f32) -> Option<(usize, usize, DocId)> {
let mut max_score = 0.0f32;
fn find_pivot_doc(
term_scorers: &[TermScorerWithMaxScore],
threshold: Score,
) -> Option<(usize, usize, DocId)> {
let mut max_score = 0.0;
let mut before_pivot_len = 0;
let mut pivot_doc = TERMINATED;
while before_pivot_len < term_scorers.len() {
@@ -32,7 +48,8 @@ fn find_pivot_doc(term_scorers: &[TermScorerWithMaxScore], threshold: f32) -> Op
// Right now i is an ordinal, we want a len.
let mut pivot_len = before_pivot_len + 1;
// Some other term_scorer may be positioned on the same document.
pivot_len += term_scorers[pivot_len..].iter()
pivot_len += term_scorers[pivot_len..]
.iter()
.take_while(|term_scorer| term_scorer.doc() == pivot_doc)
.count();
Some((before_pivot_len, pivot_len, pivot_doc))
@@ -40,16 +57,13 @@ fn find_pivot_doc(term_scorers: &[TermScorerWithMaxScore], threshold: f32) -> Op
struct TermScorerWithMaxScore<'a> {
scorer: &'a mut TermScorer,
max_score: f32,
max_score: Score,
}
impl<'a> From<&'a mut TermScorer> for TermScorerWithMaxScore<'a> {
fn from(scorer: &'a mut TermScorer) -> Self {
let max_score = scorer.max_score();
TermScorerWithMaxScore {
scorer,
max_score
}
TermScorerWithMaxScore { scorer, max_score }
}
}
@@ -61,7 +75,6 @@ impl<'a> Deref for TermScorerWithMaxScore<'a> {
}
}
impl<'a> DerefMut for TermScorerWithMaxScore<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.scorer
@@ -69,7 +82,10 @@ impl<'a> DerefMut for TermScorerWithMaxScore<'a> {
}
// Before and after calling this method, scorers need to be sorted by their `.doc()`.
fn block_max_was_too_low_advance_one_scorer(scorers: &mut Vec<TermScorerWithMaxScore>, pivot_len: usize) {
fn block_max_was_too_low_advance_one_scorer(
scorers: &mut Vec<TermScorerWithMaxScore>,
pivot_len: usize,
) {
let mut scorer_to_seek = pivot_len - 1;
let mut doc_to_seek_after = scorers[scorer_to_seek].doc();
for scorer_ord in (0..pivot_len - 1).rev() {
@@ -88,7 +104,6 @@ fn block_max_was_too_low_advance_one_scorer(scorers: &mut Vec<TermScorerWithMaxS
restore_ordering(scorers, scorer_to_seek);
}
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
// except term_scorers[ord] that might be in advance compared to its ranks,
// bubble up term_scorers[ord] in order to restore the ordering.
@@ -100,15 +115,19 @@ fn restore_ordering(term_scorers: &mut Vec<TermScorerWithMaxScore>, ord: usize)
}
term_scorers.swap(i, i - 1);
}
debug_assert!(is_sorted(term_scorers.iter().map(|scorer| scorer.doc())));
}
// Attempts to advance all term_scorers between `&term_scorers[0..before_len]` to the pivot.
// If this works, return true.
// If this fails (ie: one of the term_scorer does not contain `pivot_doc` and seek goes past the
// pivot), reorder the term_scorers to ensure the list is still sorted and returns `false`.
// If a term_scorer reach TERMINATED in the process return false remove the term_scorer and return.
fn align_scorers(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_doc: DocId, before_pivot_len: usize) -> bool {
fn align_scorers(
term_scorers: &mut Vec<TermScorerWithMaxScore>,
pivot_doc: DocId,
before_pivot_len: usize,
) -> bool {
debug_assert_ne!(pivot_doc, TERMINATED);
for i in (0..before_pivot_len).rev() {
let new_doc = term_scorers[i].seek(pivot_doc);
@@ -126,16 +145,20 @@ fn align_scorers(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_doc: DocI
return false;
}
}
return true;
true
}
// Assumes terms_scorers[..pivot_len] are positioned on the same doc (pivot_doc).
// Advance term_scorers[..pivot_len] and out of these removes the terminated scores.
// Restores the ordering of term_scorers.
fn advance_all_scorers_on_pivot(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_len: usize) {
for term_scorer in &mut term_scorers[..pivot_len] {
term_scorer.advance();
}
// TODO use drain_filter when available.
let mut i = 0;
for _ in 0..pivot_len {
if term_scorers[i].advance() == TERMINATED {
while i != term_scorers.len() {
if term_scorers[i].doc() == TERMINATED {
term_scorers.swap_remove(i);
} else {
i += 1;
@@ -146,17 +169,25 @@ fn advance_all_scorers_on_pivot(term_scorers: &mut Vec<TermScorerWithMaxScore>,
pub fn block_wand(
mut scorers: Vec<TermScorer>,
mut threshold: f32,
mut threshold: Score,
callback: &mut dyn FnMut(u32, Score) -> Score,
) {
let mut scorers: Vec<TermScorerWithMaxScore> = scorers.iter_mut().map(TermScorerWithMaxScore::from).collect();
let mut scorers: Vec<TermScorerWithMaxScore> = scorers
.iter_mut()
.map(TermScorerWithMaxScore::from)
.collect();
scorers.sort_by_key(|scorer| scorer.doc());
loop {
// At this point we need to ensure that the scorers are sorted!
if let Some((before_pivot_len, pivot_len, pivot_doc)) = find_pivot_doc(&scorers[..], threshold) {
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
if let Some((before_pivot_len, pivot_len, pivot_doc)) =
find_pivot_doc(&scorers[..], threshold)
{
debug_assert_ne!(pivot_doc, TERMINATED);
debug_assert!(before_pivot_len < pivot_len);
let block_max_score_upperbound: Score = scorers[..pivot_len].iter_mut()
let block_max_score_upperbound: Score = scorers[..pivot_len]
.iter_mut()
.map(|scorer| {
scorer.shallow_seek(pivot_doc);
scorer.block_max_score()
@@ -168,7 +199,7 @@ pub fn block_wand(
//
// `block_segment_postings.load_block()` need to be called separately.
if block_max_score_upperbound <= threshold {
// Block max condition was not reached.
// Block max condition was not reached
// We could get away by simply advancing the scorers to DocId + 1 but it would
// be inefficient. The optimization requires proper explanation and was
// isolated in a different function.
@@ -198,9 +229,204 @@ pub fn block_wand(
}
// let's advance all of the scorers that are currently positioned on the pivot.
advance_all_scorers_on_pivot(&mut scorers, pivot_len);
} else {
return;
}
}
}
#[cfg(test)]
mod tests {
use crate::query::score_combiner::SumCombiner;
use crate::query::term_query::TermScorer;
use crate::query::Union;
use crate::query::{BM25Weight, Scorer};
use crate::{DocId, DocSet, Score, TERMINATED};
use proptest::prelude::*;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use std::iter;
struct Float(Score);
impl Eq for Float {}
impl PartialEq for Float {
fn eq(&self, other: &Self) -> bool {
self.cmp(&other) == Ordering::Equal
}
}
impl PartialOrd for Float {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Float {
fn cmp(&self, other: &Self) -> Ordering {
other.0.partial_cmp(&self.0).unwrap_or(Ordering::Equal)
}
}
fn nearly_equals(left: Score, right: Score) -> bool {
(left - right).abs() < 0.000001 * (left + right).abs()
}
fn compute_checkpoints_for_each_pruning(
term_scorers: Vec<TermScorer>,
n: usize,
) -> Vec<(DocId, Score)> {
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
let mut limit: Score = 0.0;
super::block_wand(term_scorers, Score::MIN, &mut |doc, score| {
heap.push(Float(score));
if heap.len() > n {
heap.pop().unwrap();
}
if heap.len() == n {
limit = heap.peek().unwrap().0;
}
if !nearly_equals(score, limit) {
checkpoints.push((doc, score));
}
return limit;
});
checkpoints
}
fn compute_checkpoints_manual(term_scorers: Vec<TermScorer>, n: usize) -> Vec<(DocId, Score)> {
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
let mut scorer: Union<TermScorer, SumCombiner> = Union::from(term_scorers);
let mut limit = Score::MIN;
loop {
if scorer.doc() == TERMINATED {
break;
}
let doc = scorer.doc();
let score = scorer.score();
if score > limit {
heap.push(Float(score));
if heap.len() > n {
heap.pop().unwrap();
}
if heap.len() == n {
limit = heap.peek().unwrap().0;
}
if !nearly_equals(score, limit) {
checkpoints.push((doc, score));
}
}
scorer.advance();
}
checkpoints
}
const MAX_TERM_FREQ: u32 = 100u32;
fn posting_list(max_doc: u32) -> BoxedStrategy<Vec<(DocId, u32)>> {
(1..max_doc + 1)
.prop_flat_map(move |doc_freq| {
(
proptest::bits::bitset::sampled(doc_freq as usize, 0..max_doc as usize),
proptest::collection::vec(1u32..MAX_TERM_FREQ, doc_freq as usize),
)
})
.prop_map(|(docset, term_freqs)| {
docset
.iter()
.map(|doc| doc as u32)
.zip(term_freqs.iter().cloned())
.collect::<Vec<_>>()
})
.boxed()
}
fn gen_term_scorers(num_scorers: usize) -> BoxedStrategy<(Vec<Vec<(DocId, u32)>>, Vec<u32>)> {
(1u32..100u32)
.prop_flat_map(move |max_doc: u32| {
(
proptest::collection::vec(posting_list(max_doc), num_scorers),
proptest::collection::vec(2u32..10u32 * MAX_TERM_FREQ, max_doc as usize),
)
})
.boxed()
}
fn test_block_wand_aux(posting_lists: &[Vec<(DocId, u32)>], fieldnorms: &[u32]) {
// We virtually repeat all docs 64 times in order to emulate blocks of 2 documents
// and surface blogs more easily.
const REPEAT: usize = 64;
let fieldnorms_expanded = fieldnorms
.iter()
.cloned()
.flat_map(|fieldnorm| iter::repeat(fieldnorm).take(REPEAT))
.collect::<Vec<u32>>();
let postings_lists_expanded: Vec<Vec<(DocId, u32)>> = posting_lists
.iter()
.map(|posting_list| {
posting_list
.into_iter()
.cloned()
.flat_map(|(doc, term_freq)| {
(0 as u32..REPEAT as u32).map(move |offset| {
(
doc * (REPEAT as u32) + offset,
if offset == 0 { term_freq } else { 1 },
)
})
})
.collect::<Vec<(DocId, u32)>>()
})
.collect::<Vec<_>>();
let total_fieldnorms: u64 = fieldnorms_expanded
.iter()
.cloned()
.map(|fieldnorm| fieldnorm as u64)
.sum();
let average_fieldnorm = (total_fieldnorms as Score) / (fieldnorms_expanded.len() as Score);
let max_doc = fieldnorms_expanded.len();
let term_scorers: Vec<TermScorer> = postings_lists_expanded
.iter()
.map(|postings| {
let bm25_weight = BM25Weight::for_one_term(
postings.len() as u64,
max_doc as u64,
average_fieldnorm,
);
TermScorer::create_for_test(postings, &fieldnorms_expanded[..], bm25_weight)
})
.collect();
for top_k in 1..4 {
let checkpoints_for_each_pruning =
compute_checkpoints_for_each_pruning(term_scorers.clone(), top_k);
let checkpoints_manual = compute_checkpoints_manual(term_scorers.clone(), top_k);
assert_eq!(checkpoints_for_each_pruning.len(), checkpoints_manual.len());
for (&(left_doc, left_score), &(right_doc, right_score)) in checkpoints_for_each_pruning
.iter()
.zip(checkpoints_manual.iter())
{
assert_eq!(left_doc, right_doc);
assert!(nearly_equals(left_score, right_score));
}
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(500))]
#[test]
fn test_block_wand_two_term_scorers((posting_lists, fieldnorms) in gen_term_scorers(2)) {
test_block_wand_aux(&posting_lists[..], &fieldnorms[..]);
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(500))]
#[test]
fn test_block_wand_three_term_scorers((posting_lists, fieldnorms) in gen_term_scorers(3)) {
test_block_wand_aux(&posting_lists[..], &fieldnorms[..]);
}
}
}

View File

@@ -78,7 +78,7 @@ impl BooleanWeight {
fn per_occur_scorers(
&self,
reader: &SegmentReader,
boost: f32,
boost: Score,
) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
for &(ref occur, ref subweight) in &self.weights {
@@ -94,7 +94,7 @@ impl BooleanWeight {
fn complex_scorer<TScoreCombiner: ScoreCombiner>(
&self,
reader: &SegmentReader,
boost: f32,
boost: Score,
) -> crate::Result<SpecializedScorer> {
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
@@ -147,7 +147,7 @@ impl BooleanWeight {
}
impl Weight for BooleanWeight {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
if self.weights.is_empty() {
Ok(Box::new(EmptyScorer))
} else if self.weights.len() == 1 {
@@ -169,12 +169,12 @@ impl Weight for BooleanWeight {
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0f32)?;
let mut scorer = self.scorer(reader, 1.0)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
if !self.scoring_enabled {
return Ok(Explanation::new("BooleanQuery with no scoring", 1f32));
return Ok(Explanation::new("BooleanQuery with no scoring", 1.0));
}
let mut explanation = Explanation::new("BooleanClause. Sum of ...", scorer.score());
@@ -193,7 +193,7 @@ impl Weight for BooleanWeight {
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let mut union_scorer =
@@ -219,11 +219,11 @@ impl Weight for BooleanWeight {
/// important optimization (e.g. BlockWAND for union).
fn for_each_pruning(
&self,
threshold: f32,
threshold: Score,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
super::block_wand(term_scorers, threshold, callback);

View File

@@ -61,9 +61,7 @@ mod tests {
let query = query_parser.parse_query("+a").unwrap();
let searcher = index.reader().unwrap().searcher();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
assert!(scorer.is::<TermScorer>());
}
@@ -75,17 +73,13 @@ mod tests {
{
let query = query_parser.parse_query("+a +b +c").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
assert!(scorer.is::<Intersection<TermScorer>>());
}
{
let query = query_parser.parse_query("+a +(b c)").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
assert!(scorer.is::<Intersection<Box<dyn Scorer>>>());
}
}
@@ -98,9 +92,7 @@ mod tests {
{
let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
assert!(scorer.is::<RequiredOptionalScorer<
Box<dyn Scorer>,
Box<dyn Scorer>,
@@ -110,9 +102,7 @@ mod tests {
{
let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&searcher, false).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
assert!(scorer.is::<TermScorer>());
}
}
@@ -255,17 +245,17 @@ mod tests {
let boolean_weight = boolean_query.weight(&searcher, true).unwrap();
{
let mut boolean_scorer = boolean_weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.scorer(searcher.segment_reader(0u32), 1.0)
.unwrap();
assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals!(boolean_scorer.score(), 0.84163445f32);
assert_nearly_equals!(boolean_scorer.score(), 0.84163445);
}
{
let mut boolean_scorer = boolean_weight
.scorer(searcher.segment_reader(0u32), 2.0f32)
.scorer(searcher.segment_reader(0u32), 2.0)
.unwrap();
assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals!(boolean_scorer.score(), 1.6832689f32);
assert_nearly_equals!(boolean_scorer.score(), 1.6832689);
}
}
@@ -295,170 +285,9 @@ mod tests {
(Occur::Must, make_term_query("a")),
(Occur::Must, make_term_query("b")),
]);
assert_eq!(score_docs(&boolean_query), vec![0.977973, 0.84699446]);
let scores = score_docs(&boolean_query);
assert_nearly_equals!(scores[0], 0.977973);
assert_nearly_equals!(scores[1], 0.84699446);
}
}
// motivated by #554
#[test]
fn test_bm25_several_fields() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT);
let text = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(
// tf = 1 0
title => "Законы притяжения Оксана Кулакова",
// tf = 1 0
text => "Законы притяжения Оксана Кулакова] \n\nТема: Сексуальное искусство, Женственность\nТип товара: Запись вебинара (аудио)\nПродолжительность: 1,5 часа\n\nСсылка на вебинар:\n ",
));
index_writer.add_document(doc!(
// tf = 1 0
title => "Любимые русские пироги (Оксана Путан)",
// tf = 2 0
text => "http://i95.fastpic.ru/big/2017/0628/9a/615b9c8504d94a3893d7f496ac53539a.jpg \n\nОт издателя\nОксана Путан профессиональный повар, автор кулинарных книг и известный кулинарный блогер. Ее рецепты отличаются практичностью, доступностью и пользуются огромной популярностью в русскоязычном интернете. Это третья книга автора о самом вкусном и ароматном настоящих русских пирогах и выпечке!\nДаже новички на кухне легко готовят по ее рецептам. Оксана описывает процесс приготовления настолько подробно и понятно, что вам остается только наслаждаться готовкой и не тратить время на лишние усилия. Готовьте легко и просто!\n\nhttps://www.ozon.ru/context/detail/id/139872462/"
));
index_writer.add_document(doc!(
// tf = 1 1
title => "PDF Мастер Класс \"Морячок\" (Оксана Лифенко)",
// tf = 0 0
text => "https://i.ibb.co/pzvHrDN/I3d U T6 Gg TM.jpg\nhttps://i.ibb.co/NFrb6v6/N0ls Z9nwjb U.jpg\nВ описание входит штаны, кофта, берет, матросский воротник. Описание продается в формате PDF, состоит из 12 страниц формата А4 и может быть напечатано на любом принтере.\nОписание предназначено для кукол BJD RealPuki от FairyLand, но может подойти и другим подобным куклам. Также вы можете вязать этот наряд из обычной пряжи, и он подойдет для куколок побольше.\nhttps://vk.com/market 95724412?w=product 95724412_2212"
));
for _ in 0..1_000 {
index_writer.add_document(doc!(
title => "a b d e f g",
text => "maitre corbeau sur un arbre perche tenait dans son bec un fromage Maitre rnard par lodeur alleche lui tint a peu pres ce langage."
));
}
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title, text]);
let query = query_parser.parse_query("Оксана Лифенко").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let mut scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
scorer.advance();
let explanation = query.explain(&searcher, DocAddress(0u32, 0u32)).unwrap();
assert_eq!(
explanation.to_pretty_json(),
r#"{
"value": 12.997711,
"description": "BooleanClause. Sum of ...",
"details": [
{
"value": 12.997711,
"description": "BooleanClause. Sum of ...",
"details": [
{
"value": 6.551476,
"description": "TermQuery, product of...",
"details": [
{
"value": 2.2,
"description": "(K1+1)"
},
{
"value": 5.658984,
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))",
"details": [
{
"value": 3.0,
"description": "n, number of docs containing this term"
},
{
"value": 1003.0,
"description": "N, total number of docs"
}
]
},
{
"value": 0.5262329,
"description": "freq / (freq + k1 * (1 - b + b * dl / avgdl))",
"details": [
{
"value": 1.0,
"description": "freq, occurrences of term within document"
},
{
"value": 1.2,
"description": "k1, term saturation parameter"
},
{
"value": 0.75,
"description": "b, length normalization parameter"
},
{
"value": 4.0,
"description": "dl, length of field"
},
{
"value": 5.997009,
"description": "avgdl, average length of field"
}
]
}
]
},
{
"value": 6.446235,
"description": "TermQuery, product of...",
"details": [
{
"value": 2.2,
"description": "(K1+1)"
},
{
"value": 5.9954567,
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))",
"details": [
{
"value": 2.0,
"description": "n, number of docs containing this term"
},
{
"value": 1003.0,
"description": "N, total number of docs"
}
]
},
{
"value": 0.4887212,
"description": "freq / (freq + k1 * (1 - b + b * dl / avgdl))",
"details": [
{
"value": 1.0,
"description": "freq, occurrences of term within document"
},
{
"value": 1.2,
"description": "k1, term saturation parameter"
},
{
"value": 0.75,
"description": "b, length normalization parameter"
},
{
"value": 20.0,
"description": "dl, length of field"
},
{
"value": 24.123629,
"description": "avgdl, average length of field"
}
]
}
]
}
]
}
]
}"#
);
}
}

View File

@@ -1,7 +1,7 @@
use crate::fastfield::DeleteBitSet;
use crate::query::explanation::does_not_match;
use crate::query::{Explanation, Query, Scorer, Weight};
use crate::{DocId, DocSet, Searcher, SegmentReader, Term};
use crate::{DocId, DocSet, Score, Searcher, SegmentReader, Term};
use std::collections::BTreeSet;
use std::fmt;
@@ -12,12 +12,12 @@ use std::fmt;
/// factor.
pub struct BoostQuery {
query: Box<dyn Query>,
boost: f32,
boost: Score,
}
impl BoostQuery {
/// Builds a boost query.
pub fn new(query: Box<dyn Query>, boost: f32) -> BoostQuery {
pub fn new(query: Box<dyn Query>, boost: Score) -> BoostQuery {
BoostQuery { query, boost }
}
}
@@ -55,22 +55,22 @@ impl Query for BoostQuery {
pub(crate) struct BoostWeight {
weight: Box<dyn Weight>,
boost: f32,
boost: Score,
}
impl BoostWeight {
pub fn new(weight: Box<dyn Weight>, boost: f32) -> Self {
pub fn new(weight: Box<dyn Weight>, boost: Score) -> Self {
BoostWeight { weight, boost }
}
}
impl Weight for BoostWeight {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
self.weight.scorer(reader, boost * self.boost)
}
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0f32)?;
let mut scorer = self.scorer(reader, 1.0)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
@@ -88,11 +88,11 @@ impl Weight for BoostWeight {
pub(crate) struct BoostScorer<S: Scorer> {
underlying: S,
boost: f32,
boost: Score,
}
impl<S: Scorer> BoostScorer<S> {
pub fn new(underlying: S, boost: f32) -> BoostScorer<S> {
pub fn new(underlying: S, boost: Score) -> BoostScorer<S> {
BoostScorer { underlying, boost }
}
}
@@ -128,7 +128,7 @@ impl<S: Scorer> DocSet for BoostScorer<S> {
}
impl<S: Scorer> Scorer for BoostScorer<S> {
fn score(&mut self) -> f32 {
fn score(&mut self) -> Score {
self.underlying.score() * self.boost
}
}

View File

@@ -34,7 +34,7 @@ impl Query for EmptyQuery {
/// It is useful for tests and handling edge cases.
pub struct EmptyWeight;
impl Weight for EmptyWeight {
fn scorer(&self, _reader: &SegmentReader, _boost: f32) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, _reader: &SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
Ok(Box::new(EmptyScorer))
}
@@ -64,7 +64,7 @@ impl DocSet for EmptyScorer {
impl Scorer for EmptyScorer {
fn score(&mut self) -> Score {
0f32
0.0
}
}

View File

@@ -1,4 +1,4 @@
use crate::{DocId, TantivyError};
use crate::{DocId, Score, TantivyError};
use serde::Serialize;
use std::fmt;
@@ -13,7 +13,7 @@ pub(crate) fn does_not_match(doc: DocId) -> TantivyError {
/// representation of this tree when debugging a given score.
#[derive(Clone, Serialize)]
pub struct Explanation {
value: f32,
value: Score,
description: String,
#[serde(skip_serializing_if = "Vec::is_empty")]
details: Vec<Explanation>,
@@ -27,7 +27,7 @@ impl fmt::Debug for Explanation {
impl Explanation {
/// Creates a new explanation object.
pub fn new<T: ToString>(description: T, value: f32) -> Explanation {
pub fn new<T: ToString>(description: T, value: Score) -> Explanation {
Explanation {
value,
description: description.to_string(),
@@ -36,7 +36,7 @@ impl Explanation {
}
/// Returns the value associated to the current node.
pub fn value(&self) -> f32 {
pub fn value(&self) -> Score {
self.value
}
@@ -48,7 +48,7 @@ impl Explanation {
}
/// Shortcut for `self.details.push(Explanation::new(name, value));`
pub fn add_const<T: ToString>(&mut self, name: T, value: f32) {
pub fn add_const<T: ToString>(&mut self, name: T, value: Score) {
self.details.push(Explanation::new(name, value));
}

View File

@@ -199,7 +199,7 @@ mod test {
.unwrap();
assert_eq!(top_docs.len(), 1, "Expected only 1 document");
let (score, _) = top_docs[0];
assert_nearly_equals!(1f32, score);
assert_nearly_equals!(1.0, score);
}
// fails because non-prefix Levenshtein distance is more than 1 (add 'a' and 'n')
@@ -223,7 +223,7 @@ mod test {
.unwrap();
assert_eq!(top_docs.len(), 1, "Expected only 1 document");
let (score, _) = top_docs[0];
assert_nearly_equals!(1f32, score);
assert_nearly_equals!(1.0, score);
}
}
}

View File

@@ -79,7 +79,7 @@ pub mod tests {
.collect();
let phrase_query = PhraseQuery::new(terms);
let phrase_weight = phrase_query.phrase_weight(&searcher, false)?;
let mut phrase_scorer = phrase_weight.scorer(searcher.segment_reader(0), 1.0f32)?;
let mut phrase_scorer = phrase_weight.scorer(searcher.segment_reader(0), 1.0)?;
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.advance(), TERMINATED);
Ok(())

View File

@@ -3,7 +3,7 @@ use crate::fieldnorm::FieldNormReader;
use crate::postings::Postings;
use crate::query::bm25::BM25Weight;
use crate::query::{Intersection, Scorer};
use crate::DocId;
use crate::{DocId, Score};
use std::cmp::Ordering;
struct PostingsWithOffset<TPostings> {
@@ -257,7 +257,7 @@ impl<TPostings: Postings> DocSet for PhraseScorer<TPostings> {
}
impl<TPostings: Postings> Scorer for PhraseScorer<TPostings> {
fn score(&mut self) -> f32 {
fn score(&mut self) -> Score {
let doc = self.doc();
let fieldnorm_id = self.fieldnorm_reader.fieldnorm_id(doc);
self.similarity_weight

View File

@@ -9,8 +9,8 @@ use crate::query::Weight;
use crate::query::{EmptyScorer, Explanation};
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::Result;
use crate::{DocId, DocSet};
use crate::{Result, Score};
pub struct PhraseWeight {
phrase_terms: Vec<(usize, Term)>,
@@ -40,7 +40,7 @@ impl PhraseWeight {
fn phrase_scorer(
&self,
reader: &SegmentReader,
boost: f32,
boost: Score,
) -> Result<Option<PhraseScorer<SegmentPostings>>> {
let similarity_weight = self.similarity_weight.boost_by(boost);
let fieldnorm_reader = self.fieldnorm_reader(reader);
@@ -85,7 +85,7 @@ impl PhraseWeight {
}
impl Weight for PhraseWeight {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
Ok(Box::new(scorer))
} else {
@@ -94,7 +94,7 @@ impl Weight for PhraseWeight {
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let scorer_opt = self.phrase_scorer(reader, 1.0f32)?;
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
if scorer_opt.is_none() {
return Err(does_not_match(doc));
}
@@ -130,7 +130,7 @@ mod tests {
]);
let phrase_weight = phrase_query.phrase_weight(&searcher, true).unwrap();
let mut phrase_scorer = phrase_weight
.phrase_scorer(searcher.segment_reader(0u32), 1.0f32)
.phrase_scorer(searcher.segment_reader(0u32), 1.0)
.unwrap()
.unwrap();
assert_eq!(phrase_scorer.doc(), 1);

View File

@@ -40,7 +40,7 @@ use std::fmt;
///
/// When implementing a new type of `Query`, it is normal to implement a
/// dedicated `Query`, `Weight` and `Scorer`.
pub trait Query: QueryClone + downcast_rs::Downcast + fmt::Debug {
pub trait Query: QueryClone + Send + Sync + downcast_rs::Downcast + fmt::Debug {
/// Create the weight associated to a query.
///
/// If scoring is not required, setting `scoring_enabled` to `false`

View File

@@ -2,6 +2,7 @@ use crate::query::Occur;
use crate::schema::Field;
use crate::schema::Term;
use crate::schema::Type;
use crate::Score;
use std::fmt;
use std::ops::Bound;
@@ -21,12 +22,12 @@ pub enum LogicalLiteral {
pub enum LogicalAST {
Clause(Vec<(Occur, LogicalAST)>),
Leaf(Box<LogicalLiteral>),
Boost(Box<LogicalAST>, f32),
Boost(Box<LogicalAST>, Score),
}
impl LogicalAST {
pub fn boost(self, boost: f32) -> LogicalAST {
if (boost - 1.0f32).abs() < std::f32::EPSILON {
pub fn boost(self, boost: Score) -> LogicalAST {
if (boost - 1.0).abs() < Score::EPSILON {
self
} else {
LogicalAST::Boost(Box::new(self), boost)

View File

@@ -12,6 +12,7 @@ use crate::schema::{Facet, IndexRecordOption};
use crate::schema::{Field, Schema};
use crate::schema::{FieldType, Term};
use crate::tokenizer::TokenizerManager;
use crate::Score;
use std::borrow::Cow;
use std::collections::HashMap;
use std::num::{ParseFloatError, ParseIntError};
@@ -172,7 +173,7 @@ pub struct QueryParser {
default_fields: Vec<Field>,
conjunction_by_default: bool,
tokenizer_manager: TokenizerManager,
boost: HashMap<Field, f32>,
boost: HashMap<Field, Score>,
}
fn all_negative(ast: &LogicalAST) -> bool {
@@ -228,7 +229,7 @@ impl QueryParser {
/// If the query defines a query boost through the query language (e.g: `country:France^3.0`),
/// the two boosts (the one defined in the query, and the one defined in the `QueryParser`)
/// are multiplied together.
pub fn set_field_boost(&mut self, field: Field, boost: f32) {
pub fn set_field_boost(&mut self, field: Field, boost: Score) {
self.boost.insert(field, boost);
}
@@ -440,14 +441,14 @@ impl QueryParser {
}
UserInputAST::Boost(ast, boost) => {
let ast = self.compute_logical_ast_with_occur(*ast)?;
Ok(ast.boost(boost))
Ok(ast.boost(boost as Score))
}
UserInputAST::Leaf(leaf) => self.compute_logical_ast_from_leaf(*leaf),
}
}
fn field_boost(&self, field: Field) -> f32 {
self.boost.get(&field).cloned().unwrap_or(1.0f32)
fn field_boost(&self, field: Field) -> Score {
self.boost.get(&field).cloned().unwrap_or(1.0)
}
fn compute_logical_ast_from_leaf(
@@ -658,7 +659,7 @@ mod test {
let mut query_parser = make_query_parser();
let schema = make_schema();
let text_field = schema.get_field("text").unwrap();
query_parser.set_field_boost(text_field, 2.0f32);
query_parser.set_field_boost(text_field, 2.0);
let query = query_parser.parse_query("text:hello").unwrap();
assert_eq!(
format!("{:?}", query),
@@ -671,7 +672,7 @@ mod test {
let mut query_parser = make_query_parser();
let schema = make_schema();
let title_field = schema.get_field("title").unwrap();
query_parser.set_field_boost(title_field, 2.0f32);
query_parser.set_field_boost(title_field, 2.0);
let query = query_parser.parse_query("title:[A TO B]").unwrap();
assert_eq!(
format!("{:?}", query),
@@ -684,7 +685,7 @@ mod test {
let mut query_parser = make_query_parser();
let schema = make_schema();
let text_field = schema.get_field("text").unwrap();
query_parser.set_field_boost(text_field, 2.0f32);
query_parser.set_field_boost(text_field, 2.0);
let query = query_parser.parse_query("text:hello^2").unwrap();
assert_eq!(
format!("{:?}", query),

View File

@@ -9,8 +9,8 @@ use crate::query::{Query, Scorer, Weight};
use crate::schema::Type;
use crate::schema::{Field, IndexRecordOption, Term};
use crate::termdict::{TermDictionary, TermStreamer};
use crate::DocId;
use crate::Result;
use crate::{DocId, Score};
use std::collections::Bound;
use std::ops::Range;
@@ -289,7 +289,7 @@ impl RangeWeight {
}
impl Weight for RangeWeight {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc);
@@ -316,11 +316,11 @@ impl Weight for RangeWeight {
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0f32)?;
let mut scorer = self.scorer(reader, 1.0)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
Ok(Explanation::new("RangeQuery", 1.0f32))
Ok(Explanation::new("RangeQuery", 1.0))
}
}
@@ -328,8 +328,9 @@ impl Weight for RangeWeight {
mod tests {
use super::RangeQuery;
use crate::collector::Count;
use crate::schema::{Document, Field, Schema, INDEXED};
use crate::collector::{Count, TopDocs};
use crate::query::QueryParser;
use crate::schema::{Document, Field, Schema, INDEXED, TEXT};
use crate::Index;
use std::collections::Bound;
@@ -476,4 +477,28 @@ mod tests {
91
);
}
#[test]
fn test_bug_reproduce_range_query() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title", TEXT);
schema_builder.add_i64_field("year", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
let title = schema.get_field("title").unwrap();
let year = schema.get_field("year").unwrap();
index_writer.add_document(doc!(
title => "hemoglobin blood",
year => 1990 as i64
));
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title]);
let query = query_parser.parse_query("hemoglobin AND year:[1970 TO 1990]")?;
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
assert_eq!(top_docs.len(), 1);
Ok(())
}
}

View File

@@ -129,7 +129,7 @@ mod test {
.unwrap();
assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
let (score, _) = scored_docs[0];
assert_nearly_equals!(1f32, score);
assert_nearly_equals!(1.0, score);
}
let top_docs = searcher
.search(&query_matching_zero, &TopDocs::with_limit(2))

View File

@@ -112,47 +112,47 @@ mod tests {
fn test_reqopt_scorer() {
let mut reqoptscorer: RequiredOptionalScorer<_, _, SumCombiner> =
RequiredOptionalScorer::new(
ConstScorer::new(VecDocSet::from(vec![1, 3, 7, 8, 9, 10, 13, 15]), 1.0f32),
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15]), 1.0f32),
ConstScorer::new(VecDocSet::from(vec![1, 3, 7, 8, 9, 10, 13, 15]), 1.0),
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15]), 1.0),
);
{
assert_eq!(reqoptscorer.doc(), 1);
assert_eq!(reqoptscorer.score(), 2f32);
assert_eq!(reqoptscorer.score(), 2.0);
}
{
assert_eq!(reqoptscorer.advance(), 3);
assert_eq!(reqoptscorer.doc(), 3);
assert_eq!(reqoptscorer.score(), 1f32);
assert_eq!(reqoptscorer.score(), 1.0);
}
{
assert_eq!(reqoptscorer.advance(), 7);
assert_eq!(reqoptscorer.doc(), 7);
assert_eq!(reqoptscorer.score(), 2f32);
assert_eq!(reqoptscorer.score(), 2.0);
}
{
assert_eq!(reqoptscorer.advance(), 8);
assert_eq!(reqoptscorer.doc(), 8);
assert_eq!(reqoptscorer.score(), 1f32);
assert_eq!(reqoptscorer.score(), 1.0);
}
{
assert_eq!(reqoptscorer.advance(), 9);
assert_eq!(reqoptscorer.doc(), 9);
assert_eq!(reqoptscorer.score(), 1f32);
assert_eq!(reqoptscorer.score(), 1.0);
}
{
assert_eq!(reqoptscorer.advance(), 10);
assert_eq!(reqoptscorer.doc(), 10);
assert_eq!(reqoptscorer.score(), 1f32);
assert_eq!(reqoptscorer.score(), 1.0);
}
{
assert_eq!(reqoptscorer.advance(), 13);
assert_eq!(reqoptscorer.doc(), 13);
assert_eq!(reqoptscorer.score(), 1f32);
assert_eq!(reqoptscorer.score(), 1.0);
}
{
assert_eq!(reqoptscorer.advance(), 15);
assert_eq!(reqoptscorer.doc(), 15);
assert_eq!(reqoptscorer.score(), 2f32);
assert_eq!(reqoptscorer.score(), 2.0);
}
assert_eq!(reqoptscorer.advance(), TERMINATED);
}

View File

@@ -31,7 +31,7 @@ impl ScoreCombiner for DoNothingCombiner {
fn clear(&mut self) {}
fn score(&self) -> Score {
1f32
1.0
}
}
@@ -47,7 +47,7 @@ impl ScoreCombiner for SumCombiner {
}
fn clear(&mut self) {
self.score = 0f32;
self.score = 0.0;
}
fn score(&self) -> Score {
@@ -70,7 +70,7 @@ impl ScoreCombiner for SumWithCoordsCombiner {
}
fn clear(&mut self) {
self.score = 0f32;
self.score = 0.0;
self.num_fields = 0;
}

View File

@@ -35,14 +35,14 @@ pub struct ConstScorer<TDocSet: DocSet> {
impl<TDocSet: DocSet> ConstScorer<TDocSet> {
/// Creates a new `ConstScorer`.
pub fn new(docset: TDocSet, score: f32) -> ConstScorer<TDocSet> {
pub fn new(docset: TDocSet, score: Score) -> ConstScorer<TDocSet> {
ConstScorer { docset, score }
}
}
impl<TDocSet: DocSet> From<TDocSet> for ConstScorer<TDocSet> {
fn from(docset: TDocSet) -> Self {
ConstScorer::new(docset, 1.0f32)
ConstScorer::new(docset, 1.0)
}
}

View File

@@ -15,7 +15,7 @@ mod tests {
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::query::{Query, QueryParser, Scorer, TermQuery};
use crate::schema::{Field, IndexRecordOption, Schema, STRING, TEXT};
use crate::{Term, Index, TERMINATED};
use crate::{Index, Term, TERMINATED};
#[test]
pub fn test_term_query_no_freq() {
@@ -37,9 +37,9 @@ mod tests {
);
let term_weight = term_query.weight(&searcher, true).unwrap();
let segment_reader = searcher.segment_reader(0);
let mut term_scorer = term_weight.scorer(segment_reader, 1.0f32).unwrap();
let mut term_scorer = term_weight.scorer(segment_reader, 1.0).unwrap();
assert_eq!(term_scorer.doc(), 0);
assert_eq!(term_scorer.score(), 0.28768212);
assert_nearly_equals!(term_scorer.score(), 0.28768212);
}
#[test]
@@ -64,7 +64,7 @@ mod tests {
);
let term_weight = term_query.weight(&searcher, true)?;
let segment_reader = searcher.segment_reader(0);
let mut term_scorer = term_weight.scorer(segment_reader, 1.0f32)?;
let mut term_scorer = term_weight.scorer(segment_reader, 1.0)?;
for i in 0u32..COMPRESSION_BLOCK_SIZE as u32 {
assert_eq!(term_scorer.doc(), i);
if i == COMPRESSION_BLOCK_SIZE as u32 - 1u32 {
@@ -161,7 +161,7 @@ mod tests {
let term_query = TermQuery::new(term_a, IndexRecordOption::Basic);
let searcher = index.reader()?.searcher();
let term_weight = term_query.weight(&searcher, false)?;
let mut term_scorer = term_weight.scorer(searcher.segment_reader(0u32), 1.0f32)?;
let mut term_scorer = term_weight.scorer(searcher.segment_reader(0u32), 1.0)?;
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.seek(1u32);
assert_eq!(term_scorer.doc(), 1u32);

View File

@@ -8,6 +8,7 @@ use crate::postings::SegmentPostings;
use crate::postings::{FreqReadingOption, Postings};
use crate::query::bm25::BM25Weight;
#[derive(Clone)]
pub struct TermScorer {
postings: SegmentPostings,
fieldnorm_reader: FieldNormReader,
@@ -28,34 +29,28 @@ impl TermScorer {
}
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
self.postings.block_cursor.shallow_seek(target_doc)
self.postings.block_cursor.shallow_seek(target_doc);
}
#[cfg(test)]
pub fn create_for_test(
doc_and_tfs: &[(DocId, u32)],
fieldnorm_vals: &[u32],
fieldnorms: &[u32],
similarity_weight: BM25Weight,
) -> crate::Result<TermScorer> {
) -> TermScorer {
assert!(!doc_and_tfs.is_empty());
assert!(doc_and_tfs.len() <= fieldnorm_vals.len());
let doc_freq = doc_and_tfs.len();
let max_doc = doc_and_tfs.last().unwrap().0 + 1;
let mut fieldnorms: Vec<u32> = std::iter::repeat(1).take(max_doc as usize).collect();
for i in 0..doc_freq {
let doc = doc_and_tfs[i].0;
let fieldnorm = fieldnorm_vals[i];
fieldnorms[doc as usize] = fieldnorm;
}
let fieldnorm_reader = FieldNormReader::from(&fieldnorms[..]);
assert!(
doc_and_tfs
.iter()
.map(|(doc, _tf)| *doc)
.max()
.unwrap_or(0u32)
< fieldnorms.len() as u32
);
let segment_postings =
SegmentPostings::create_from_docs_and_tfs(doc_and_tfs, Some(fieldnorm_reader.clone()))?;
Ok(TermScorer::new(segment_postings, fieldnorm_reader, similarity_weight))
SegmentPostings::create_from_docs_and_tfs(doc_and_tfs, Some(fieldnorms));
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
TermScorer::new(segment_postings, fieldnorm_reader, similarity_weight)
}
/// See `FreqReadingOption`.
@@ -86,10 +81,6 @@ impl TermScorer {
self.postings.term_freq()
}
pub fn doc_freq(&self) -> usize {
self.postings.doc_freq() as usize
}
pub fn fieldnorm_id(&self) -> u8 {
self.fieldnorm_reader.fieldnorm_id(self.doc())
}
@@ -100,7 +91,7 @@ impl TermScorer {
self.similarity_weight.explain(fieldnorm_id, term_freq)
}
pub fn max_score(&self) -> f32 {
pub fn max_score(&self) -> Score {
self.similarity_weight.max_score()
}
@@ -137,48 +128,53 @@ impl Scorer for TermScorer {
#[cfg(test)]
mod tests {
use crate::assert_nearly_equals;
use crate::merge_policy::NoMergePolicy;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::query::term_query::TermScorer;
use crate::query::{BM25Weight, Scorer};
use crate::query::{BM25Weight, Scorer, TermQuery};
use crate::schema::{IndexRecordOption, Schema, TEXT};
use crate::Score;
use crate::{assert_nearly_equals, Index, Searcher, SegmentId, Term};
use crate::{DocId, DocSet, TERMINATED};
use futures::executor::block_on;
use proptest::prelude::*;
#[test]
fn test_term_scorer_max_score() -> crate::Result<()> {
let bm25_weight = BM25Weight::for_one_term(3, 6, 10f32);
let mut term_scorer =
TermScorer::create_for_test(&[(2, 3), (3, 12), (7, 8)], &[10, 12, 100], bm25_weight)?;
let bm25_weight = BM25Weight::for_one_term(3, 6, 10.0);
let mut term_scorer = TermScorer::create_for_test(
&[(2, 3), (3, 12), (7, 8)],
&[0, 0, 10, 12, 0, 0, 0, 100],
bm25_weight,
);
let max_scorer = term_scorer.max_score();
assert_eq!(max_scorer, 1.3990127f32);
crate::assert_nearly_equals!(max_scorer, 1.3990127);
assert_eq!(term_scorer.doc(), 2);
assert_eq!(term_scorer.term_freq(), 3);
assert_nearly_equals!(term_scorer.block_max_score(), 1.3676447f32);
assert_nearly_equals!(term_scorer.score(), 1.0892314f32);
assert_nearly_equals!(term_scorer.block_max_score(), 1.3676447);
assert_nearly_equals!(term_scorer.score(), 1.0892314);
assert_eq!(term_scorer.advance(), 3);
assert_eq!(term_scorer.doc(), 3);
assert_eq!(term_scorer.term_freq(), 12);
assert_nearly_equals!(term_scorer.score(), 1.3676447f32);
assert_nearly_equals!(term_scorer.score(), 1.3676447);
assert_eq!(term_scorer.advance(), 7);
assert_eq!(term_scorer.doc(), 7);
assert_eq!(term_scorer.term_freq(), 8);
assert_nearly_equals!(term_scorer.score(), 0.72015285f32);
assert_nearly_equals!(term_scorer.score(), 0.72015285);
assert_eq!(term_scorer.advance(), TERMINATED);
Ok(())
}
#[test]
fn test_term_scorer_shallow_advance() -> crate::Result<()> {
let bm25_weight = BM25Weight::for_one_term(300, 1024, 10f32);
let bm25_weight = BM25Weight::for_one_term(300, 1024, 10.0);
let mut doc_and_tfs = vec![];
for i in 0u32..300u32 {
let doc = i * 10;
doc_and_tfs.push((doc, 1u32 + doc % 3u32));
}
let fieldnorms: Vec<u32> = std::iter::repeat(10u32).take(1024).collect();
let mut term_scorer =
TermScorer::create_for_test(&doc_and_tfs, &fieldnorms, bm25_weight)?;
let fieldnorms: Vec<u32> = std::iter::repeat(10u32).take(3_000).collect();
let mut term_scorer = TermScorer::create_for_test(&doc_and_tfs, &fieldnorms, bm25_weight);
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.shallow_seek(1289);
assert_eq!(term_scorer.doc(), 0u32);
@@ -205,7 +201,7 @@ mod tests {
let average_fieldnorm = fieldnorms
.iter()
.cloned()
.sum::<u32>() as f32 / term_doc_freq as f32;
.sum::<u32>() as Score / term_doc_freq as Score;
// Average fieldnorm is over the entire index,
// not necessarily the docs that are in the posting list.
// For this reason we multiply by 1.1 to make a realistic value.
@@ -214,12 +210,12 @@ mod tests {
average_fieldnorm);
let mut term_scorer =
TermScorer::create_for_test(&doc_tfs[..], &fieldnorms[..], bm25_weight).unwrap();
TermScorer::create_for_test(&doc_tfs[..], &fieldnorms[..], bm25_weight);
let docs: Vec<DocId> = (0..term_doc_freq).map(|doc| doc as DocId).collect();
for block in docs.chunks(COMPRESSION_BLOCK_SIZE) {
let block_max_score = term_scorer.block_max_score();
let mut block_max_score_computed = 0.0f32;
let block_max_score: Score = term_scorer.block_max_score();
let mut block_max_score_computed: Score = 0.0;
for &doc in block {
assert_eq!(term_scorer.doc(), doc);
block_max_score_computed = block_max_score_computed.max(term_scorer.score());
@@ -229,4 +225,113 @@ mod tests {
}
}
}
#[test]
fn test_block_wand() {
let mut doc_tfs: Vec<(u32, u32)> = vec![];
for doc in 0u32..128u32 {
doc_tfs.push((doc, 1u32));
}
for doc in 128u32..256u32 {
doc_tfs.push((doc, if doc == 200 { 2u32 } else { 1u32 }));
}
doc_tfs.push((256, 1u32));
doc_tfs.push((257, 3u32));
doc_tfs.push((258, 1u32));
let fieldnorms: Vec<u32> = std::iter::repeat(20u32).take(300).collect();
let bm25_weight = BM25Weight::for_one_term(10, 129, 20.0);
let mut docs = TermScorer::create_for_test(&doc_tfs[..], &fieldnorms[..], bm25_weight);
assert_nearly_equals!(docs.block_max_score(), 2.5161593);
docs.shallow_seek(135);
assert_nearly_equals!(docs.block_max_score(), 3.4597192);
docs.shallow_seek(256);
// the block is not loaded yet.
assert_nearly_equals!(docs.block_max_score(), 5.2971773);
assert_eq!(256, docs.seek(256));
assert_nearly_equals!(docs.block_max_score(), 3.9539647);
}
fn test_block_wand_aux(term_query: &TermQuery, searcher: &Searcher) -> crate::Result<()> {
let term_weight = term_query.specialized_weight(&searcher, true);
for reader in searcher.segment_readers() {
let mut block_max_scores = vec![];
let mut block_max_scores_b = vec![];
let mut docs = vec![];
{
let mut term_scorer = term_weight.specialized_scorer(reader, 1.0)?;
while term_scorer.doc() != TERMINATED {
let mut score = term_scorer.score();
docs.push(term_scorer.doc());
for _ in 0..128 {
score = score.max(term_scorer.score());
if term_scorer.advance() == TERMINATED {
break;
}
}
block_max_scores.push(score);
}
}
{
let mut term_scorer = term_weight.specialized_scorer(reader, 1.0)?;
for d in docs {
term_scorer.shallow_seek(d);
block_max_scores_b.push(term_scorer.block_max_score());
}
}
for (l, r) in block_max_scores
.iter()
.cloned()
.zip(block_max_scores_b.iter().cloned())
{
assert_nearly_equals!(l, r);
}
}
Ok(())
}
#[ignore]
#[test]
fn test_block_wand_long_test() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut writer = index.writer_with_num_threads(3, 30_000_000)?;
use rand::Rng;
let mut rng = rand::thread_rng();
writer.set_merge_policy(Box::new(NoMergePolicy));
for _ in 0..3_000 {
let term_freq = rng.gen_range(1, 10000);
let words: Vec<&str> = std::iter::repeat("bbbb").take(term_freq).collect();
let text = words.join(" ");
writer.add_document(doc!(text_field=>text));
}
writer.commit()?;
let term_query = TermQuery::new(
Term::from_field_text(text_field, &"bbbb"),
IndexRecordOption::WithFreqs,
);
let segment_ids: Vec<SegmentId>;
let reader = index.reader()?;
{
let searcher = reader.searcher();
segment_ids = searcher
.segment_readers()
.iter()
.map(|segment| segment.segment_id())
.collect();
test_block_wand_aux(&term_query, &searcher)?;
}
{
let _ = block_on(writer.merge(&segment_ids[..]));
}
{
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
test_block_wand_aux(&term_query, &searcher)?;
}
Ok(())
}
}

View File

@@ -19,13 +19,13 @@ pub struct TermWeight {
}
impl Weight for TermWeight {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> Result<Box<dyn Scorer>> {
let term_scorer = self.specialized_scorer(reader, boost)?;
Ok(Box::new(term_scorer))
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.specialized_scorer(reader, 1.0f32)?;
let mut scorer = self.specialized_scorer(reader, 1.0)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
@@ -34,7 +34,7 @@ impl Weight for TermWeight {
fn count(&self, reader: &SegmentReader) -> Result<u32> {
if let Some(delete_bitset) = reader.delete_bitset() {
Ok(self.scorer(reader, 1.0f32)?.count(delete_bitset))
Ok(self.scorer(reader, 1.0)?.count(delete_bitset))
} else {
let field = self.term.field();
Ok(reader
@@ -52,7 +52,7 @@ impl Weight for TermWeight {
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let mut scorer = self.specialized_scorer(reader, 1.0f32)?;
let mut scorer = self.specialized_scorer(reader, 1.0)?;
for_each_scorer(&mut scorer, callback);
Ok(())
}
@@ -69,11 +69,11 @@ impl Weight for TermWeight {
/// important optimization (e.g. BlockWAND for union).
fn for_each_pruning(
&self,
threshold: f32,
threshold: Score,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0f32)?;
let mut scorer = self.scorer(reader, 1.0)?;
for_each_pruning_scorer(&mut scorer, threshold, callback);
Ok(())
}
@@ -92,7 +92,11 @@ impl TermWeight {
}
}
pub fn specialized_scorer(&self, reader: &SegmentReader, boost: f32) -> Result<TermScorer> {
pub(crate) fn specialized_scorer(
&self,
reader: &SegmentReader,
boost: Score,
) -> Result<TermScorer> {
let field = self.term.field();
let inverted_index = reader.inverted_index(field);
let fieldnorm_reader = reader.get_fieldnorms_reader(field);

View File

@@ -55,7 +55,7 @@ where
cursor: HORIZON_NUM_TINYBITSETS,
offset: 0,
doc: 0,
score: 0f32,
score: 0.0,
};
if union.refill() {
union.advance();
@@ -274,7 +274,7 @@ mod tests {
vals.iter()
.cloned()
.map(VecDocSet::from)
.map(|docset| ConstScorer::new(docset, 1.0f32))
.map(|docset| ConstScorer::new(docset, 1.0))
.collect::<Vec<ConstScorer<VecDocSet>>>(),
)
};
@@ -321,7 +321,7 @@ mod tests {
.iter()
.map(|docs| docs.clone())
.map(VecDocSet::from)
.map(|docset| ConstScorer::new(docset, 1.0f32))
.map(|docset| ConstScorer::new(docset, 1.0))
.collect::<Vec<_>>(),
));
res

View File

@@ -28,7 +28,7 @@ pub(crate) fn for_each_scorer<TScorer: Scorer + ?Sized>(
/// important optimization (e.g. BlockWAND for union).
pub(crate) fn for_each_pruning_scorer<TScorer: Scorer + ?Sized>(
scorer: &mut TScorer,
mut threshold: f32,
mut threshold: Score,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) {
let mut doc = scorer.doc();
@@ -51,14 +51,14 @@ pub trait Weight: Send + Sync + 'static {
/// `boost` is a multiplier to apply to the score.
///
/// See [`Query`](./trait.Query.html).
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>>;
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>>;
/// Returns an `Explanation` for the given document.
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
/// Returns the number documents within the given `SegmentReader`.
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
let mut scorer = self.scorer(reader, 1.0f32)?;
let mut scorer = self.scorer(reader, 1.0)?;
if let Some(delete_bitset) = reader.delete_bitset() {
Ok(scorer.count(delete_bitset))
} else {
@@ -73,7 +73,7 @@ pub trait Weight: Send + Sync + 'static {
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0f32)?;
let mut scorer = self.scorer(reader, 1.0)?;
for_each_scorer(scorer.as_mut(), callback);
Ok(())
}
@@ -90,11 +90,11 @@ pub trait Weight: Send + Sync + 'static {
/// important optimization (e.g. BlockWAND for union).
fn for_each_pruning(
&self,
threshold: f32,
threshold: Score,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0f32)?;
let mut scorer = self.scorer(reader, 1.0)?;
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
Ok(())
}

View File

@@ -2,8 +2,8 @@ use crate::query::Query;
use crate::schema::Field;
use crate::schema::Value;
use crate::tokenizer::{TextAnalyzer, Token};
use crate::Document;
use crate::Searcher;
use crate::{Document, Score};
use htmlescape::encode_minimal;
use std::cmp::Ordering;
use std::collections::BTreeMap;
@@ -30,7 +30,7 @@ impl HighlightSection {
#[derive(Debug)]
pub struct FragmentCandidate {
score: f32,
score: Score,
start_offset: usize,
stop_offset: usize,
num_chars: usize,
@@ -58,7 +58,7 @@ impl FragmentCandidate {
/// taking the token and terms, the token is added to the fragment.
/// if the token is one of the terms, the score
/// and highlighted fields are updated in the fragment.
fn try_add_token(&mut self, token: &Token, terms: &BTreeMap<String, f32>) {
fn try_add_token(&mut self, token: &Token, terms: &BTreeMap<String, Score>) {
self.stop_offset = token.offset_to;
if let Some(&score) = terms.get(&token.text.to_lowercase()) {
@@ -142,7 +142,7 @@ impl Snippet {
fn search_fragments<'a>(
tokenizer: &TextAnalyzer,
text: &'a str,
terms: &BTreeMap<String, f32>,
terms: &BTreeMap<String, Score>,
max_num_chars: usize,
) -> Vec<FragmentCandidate> {
let mut token_stream = tokenizer.token_stream(text);
@@ -248,7 +248,7 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
/// # }
/// ```
pub struct SnippetGenerator {
terms_text: BTreeMap<String, f32>,
terms_text: BTreeMap<String, Score>,
tokenizer: TextAnalyzer,
field: Field,
max_num_chars: usize,
@@ -263,12 +263,12 @@ impl SnippetGenerator {
) -> crate::Result<SnippetGenerator> {
let mut terms = BTreeSet::new();
query.query_terms(&mut terms);
let terms_text: BTreeMap<String, f32> = terms
let terms_text: BTreeMap<String, Score> = terms
.into_iter()
.filter(|term| term.field() == field)
.flat_map(|term| {
let doc_freq = searcher.doc_freq(&term);
let score = 1f32 / (1f32 + doc_freq as f32);
let score = 1.0 / (1.0 + doc_freq as Score);
if doc_freq > 0 {
Some((term.text().to_string(), score))
} else {
@@ -291,7 +291,7 @@ impl SnippetGenerator {
}
#[cfg(test)]
pub fn terms_text(&self) -> &BTreeMap<String, f32> {
pub fn terms_text(&self) -> &BTreeMap<String, Score> {
&self.terms_text
}
@@ -373,8 +373,8 @@ Survey in 2016, 2017, and 2018."#;
fn test_snippet_scored_fragment() {
{
let terms = btreemap! {
String::from("rust") =>1.0f32,
String::from("language") => 0.9f32
String::from("rust") =>1.0,
String::from("language") => 0.9
};
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
{
@@ -387,8 +387,8 @@ Survey in 2016, 2017, and 2018."#;
}
{
let terms = btreemap! {
String::from("rust") =>0.9f32,
String::from("language") => 1.0f32
String::from("rust") =>0.9,
String::from("language") => 1.0
};
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
//assert_eq!(fragments.len(), 7);
@@ -525,7 +525,7 @@ Survey in 2016, 2017, and 2018."#;
let snippet_generator =
SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
assert_eq!(
&btreemap!("a".to_string() => 0.25f32),
&btreemap!("a".to_string() => 0.25),
snippet_generator.terms_text()
);
}
@@ -534,7 +534,7 @@ Survey in 2016, 2017, and 2018."#;
let snippet_generator =
SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
assert_eq!(
&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5),
&btreemap!("a".to_string() => 0.25, "b".to_string() => 0.5),
snippet_generator.terms_text()
);
}
@@ -543,7 +543,7 @@ Survey in 2016, 2017, and 2018."#;
let snippet_generator =
SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
assert_eq!(
&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5),
&btreemap!("a".to_string() => 0.25, "b".to_string() => 0.5),
snippet_generator.terms_text()
);
}