mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 21:12:54 +00:00
Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
22a702c17f | ||
|
|
14f0c6d01a |
@@ -4,8 +4,6 @@ Tantivy 0.12.0
|
||||
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
|
||||
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
|
||||
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
|
||||
- Important Bugfix #777, causing tantivy to retain memory mapping. (diagnosed by @poljar)
|
||||
- Added support for field boosting. (#547, @fulmicoton)
|
||||
|
||||
## How to update?
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
||||
tantivy-fst = "0.2.1"
|
||||
memmap = {version = "0.7", optional=true}
|
||||
lz4 = {version="1.20", optional=true}
|
||||
snap = "1"
|
||||
snap = {version="0.2"}
|
||||
atomicwrites = {version="0.2.2", optional=true}
|
||||
tempfile = "3.0"
|
||||
log = "0.4"
|
||||
@@ -40,7 +40,7 @@ owning_ref = "0.4"
|
||||
stable_deref_trait = "1.0.0"
|
||||
rust-stemmers = "1.2"
|
||||
downcast-rs = { version="1.0" }
|
||||
tantivy-query-grammar = { version="0.12", path="./query-grammar" }
|
||||
tantivy-query-grammar = { version="0.11", path="./query-grammar" }
|
||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||
census = "0.4"
|
||||
fnv = "1.0.6"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.12.0"
|
||||
version = "0.11.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -13,4 +13,4 @@ keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
combine = "4"
|
||||
combine = ">=3.6.0,<4.0.0"
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
use super::user_input_ast::*;
|
||||
use crate::Occur;
|
||||
use combine::char::*;
|
||||
use combine::error::StreamError;
|
||||
use combine::parser::char::{char, digit, letter, space, spaces, string};
|
||||
use combine::stream::StreamErrorFor;
|
||||
use combine::{
|
||||
attempt, choice, eof, many, many1, one_of, optional, parser, satisfy, skip_many1, value, Stream,
|
||||
};
|
||||
use combine::*;
|
||||
|
||||
parser! {
|
||||
fn field[I]()(I) -> String
|
||||
where [I: Stream<Token = char>] {
|
||||
where [I: Stream<Item = char>] {
|
||||
(
|
||||
letter(),
|
||||
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
|
||||
@@ -19,10 +17,10 @@ parser! {
|
||||
|
||||
parser! {
|
||||
fn word[I]()(I) -> String
|
||||
where [I: Stream<Token = char>] {
|
||||
where [I: Stream<Item = char>] {
|
||||
(
|
||||
satisfy(|c: char| !c.is_whitespace() && !['-', '^', '`', ':', '{', '}', '"', '[', ']', '(',')'].contains(&c) ),
|
||||
many(satisfy(|c: char| !c.is_whitespace() && ![':', '^', '{', '}', '"', '[', ']', '(',')'].contains(&c)))
|
||||
satisfy(|c: char| !c.is_whitespace() && !['-', '`', ':', '{', '}', '"', '[', ']', '(',')'].contains(&c) ),
|
||||
many(satisfy(|c: char| !c.is_whitespace() && ![':', '{', '}', '"', '[', ']', '(',')'].contains(&c)))
|
||||
)
|
||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||
.and_then(|s: String|
|
||||
@@ -37,7 +35,7 @@ parser! {
|
||||
|
||||
parser! {
|
||||
fn literal[I]()(I) -> UserInputLeaf
|
||||
where [I: Stream<Token = char>]
|
||||
where [I: Stream<Item = char>]
|
||||
{
|
||||
let term_val = || {
|
||||
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
|
||||
@@ -62,10 +60,10 @@ parser! {
|
||||
|
||||
parser! {
|
||||
fn negative_number[I]()(I) -> String
|
||||
where [I: Stream<Token = char>]
|
||||
where [I: Stream<Item = char>]
|
||||
{
|
||||
(char('-'), many1(digit()),
|
||||
optional((char('.'), many1(digit()))))
|
||||
(char('-'), many1(satisfy(char::is_numeric)),
|
||||
optional((char('.'), many1(satisfy(char::is_numeric)))))
|
||||
.map(|(s1, s2, s3): (char, String, Option<(char, String)>)| {
|
||||
if let Some(('.', s3)) = s3 {
|
||||
format!("{}{}.{}", s1, s2, s3)
|
||||
@@ -78,7 +76,7 @@ parser! {
|
||||
|
||||
parser! {
|
||||
fn spaces1[I]()(I) -> ()
|
||||
where [I: Stream<Token = char>] {
|
||||
where [I: Stream<Item = char>] {
|
||||
skip_many1(space())
|
||||
}
|
||||
}
|
||||
@@ -89,7 +87,7 @@ parser! {
|
||||
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
|
||||
/// [a TO *], [a TO c], [abc TO bcd}
|
||||
fn range[I]()(I) -> UserInputLeaf
|
||||
where [I: Stream<Token = char>] {
|
||||
where [I: Stream<Item = char>] {
|
||||
let range_term_val = || {
|
||||
word().or(negative_number()).or(char('*').with(value("*".to_string())))
|
||||
};
|
||||
@@ -159,7 +157,7 @@ fn must(expr: UserInputAST) -> UserInputAST {
|
||||
|
||||
parser! {
|
||||
fn leaf[I]()(I) -> UserInputAST
|
||||
where [I: Stream<Token = char>] {
|
||||
where [I: Stream<Item = char>] {
|
||||
char('-').with(leaf()).map(negate)
|
||||
.or(char('+').with(leaf()).map(must))
|
||||
.or(char('(').with(ast()).skip(char(')')))
|
||||
@@ -170,48 +168,6 @@ parser! {
|
||||
}
|
||||
}
|
||||
|
||||
parser! {
|
||||
fn positive_float_number[I]()(I) -> f32
|
||||
where [I: Stream<Token = char>] {
|
||||
(
|
||||
many1(digit()),
|
||||
optional(
|
||||
(char('.'), many1(digit()))
|
||||
)
|
||||
)
|
||||
.map(|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
||||
let mut float_str = int_part;
|
||||
if let Some((chr, decimal_str)) = decimal_part_opt {
|
||||
float_str.push(chr);
|
||||
float_str.push_str(&decimal_str);
|
||||
}
|
||||
float_str.parse::<f32>().unwrap()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
parser! {
|
||||
fn boost[I]()(I) -> f32
|
||||
where [I: Stream<Token = char>] {
|
||||
(char('^'), positive_float_number())
|
||||
.map(|(_, boost)| boost)
|
||||
}
|
||||
}
|
||||
|
||||
parser! {
|
||||
fn boosted_leaf[I]()(I) -> UserInputAST
|
||||
where [I: Stream<Token = char>] {
|
||||
(leaf(), optional(boost()))
|
||||
.map(|(leaf, boost_opt)|
|
||||
match boost_opt {
|
||||
Some(boost) if (boost - 1.0).abs() > std::f32::EPSILON =>
|
||||
UserInputAST::Boost(Box::new(leaf), boost),
|
||||
_ => leaf
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
enum BinaryOperand {
|
||||
Or,
|
||||
@@ -220,7 +176,7 @@ enum BinaryOperand {
|
||||
|
||||
parser! {
|
||||
fn binary_operand[I]()(I) -> BinaryOperand
|
||||
where [I: Stream<Token = char>]
|
||||
where [I: Stream<Item = char>]
|
||||
{
|
||||
string("AND").with(value(BinaryOperand::And))
|
||||
.or(string("OR").with(value(BinaryOperand::Or)))
|
||||
@@ -254,12 +210,12 @@ fn aggregate_binary_expressions(
|
||||
|
||||
parser! {
|
||||
pub fn ast[I]()(I) -> UserInputAST
|
||||
where [I: Stream<Token = char>]
|
||||
where [I: Stream<Item = char>]
|
||||
{
|
||||
let operand_leaf = (binary_operand().skip(spaces()), boosted_leaf().skip(spaces()));
|
||||
let boolean_expr = (boosted_leaf().skip(spaces().silent()), many1(operand_leaf)).map(
|
||||
let operand_leaf = (binary_operand().skip(spaces()), leaf().skip(spaces()));
|
||||
let boolean_expr = (leaf().skip(spaces().silent()), many1(operand_leaf)).map(
|
||||
|(left, right)| aggregate_binary_expressions(left,right));
|
||||
let whitespace_separated_leaves = many1(boosted_leaf().skip(spaces().silent()))
|
||||
let whitespace_separated_leaves = many1(leaf().skip(spaces().silent()))
|
||||
.map(|subqueries: Vec<UserInputAST>|
|
||||
if subqueries.len() == 1 {
|
||||
subqueries.into_iter().next().unwrap()
|
||||
@@ -273,7 +229,7 @@ parser! {
|
||||
|
||||
parser! {
|
||||
pub fn parse_to_ast[I]()(I) -> UserInputAST
|
||||
where [I: Stream<Token = char>]
|
||||
where [I: Stream<Item = char>]
|
||||
{
|
||||
spaces().with(optional(ast()).skip(eof())).map(|opt_ast| opt_ast.unwrap_or_else(UserInputAST::empty_query))
|
||||
}
|
||||
@@ -283,38 +239,6 @@ parser! {
|
||||
mod test {
|
||||
|
||||
use super::*;
|
||||
use combine::parser::Parser;
|
||||
|
||||
pub fn nearly_equals(a: f32, b: f32) -> bool {
|
||||
(a - b).abs() < 0.0005 * (a + b).abs()
|
||||
}
|
||||
|
||||
fn assert_nearly_equals(expected: f32, val: f32) {
|
||||
assert!(
|
||||
nearly_equals(val, expected),
|
||||
"Got {}, expected {}.",
|
||||
val,
|
||||
expected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_positive_float_number() {
|
||||
fn valid_parse(float_str: &str, expected_val: f32, expected_remaining: &str) {
|
||||
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
|
||||
assert_eq!(remaining, expected_remaining);
|
||||
assert_nearly_equals(val, expected_val);
|
||||
}
|
||||
fn error_parse(float_str: &str) {
|
||||
assert!(positive_float_number().parse(float_str).is_err());
|
||||
}
|
||||
valid_parse("1.0", 1.0f32, "");
|
||||
valid_parse("1", 1.0f32, "");
|
||||
valid_parse("0.234234 aaa", 0.234234f32, " aaa");
|
||||
error_parse(".3332");
|
||||
error_parse("1.");
|
||||
error_parse("-1.");
|
||||
}
|
||||
|
||||
fn test_parse_query_to_ast_helper(query: &str, expected: &str) {
|
||||
let query = parse_to_ast().parse(query).unwrap().0;
|
||||
@@ -348,15 +272,6 @@ mod test {
|
||||
test_parse_query_to_ast_helper("NOT a", "-(\"a\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boosting() {
|
||||
assert!(parse_to_ast().parse("a^2^3").is_err());
|
||||
assert!(parse_to_ast().parse("a^2^").is_err());
|
||||
test_parse_query_to_ast_helper("a^3", "(\"a\")^3");
|
||||
test_parse_query_to_ast_helper("a^3 b^2", "((\"a\")^3 (\"b\")^2)");
|
||||
test_parse_query_to_ast_helper("a^1", "\"a\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_binary_op() {
|
||||
test_parse_query_to_ast_helper("a AND b", "(+(\"a\") +(\"b\"))");
|
||||
|
||||
@@ -88,7 +88,6 @@ pub enum UserInputAST {
|
||||
Clause(Vec<UserInputAST>),
|
||||
Unary(Occur, Box<UserInputAST>),
|
||||
Leaf(Box<UserInputLeaf>),
|
||||
Boost(Box<UserInputAST>, f32),
|
||||
}
|
||||
|
||||
impl UserInputAST {
|
||||
@@ -155,7 +154,6 @@ impl fmt::Debug for UserInputAST {
|
||||
write!(formatter, "{}({:?})", occur, subquery)
|
||||
}
|
||||
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
||||
UserInputAST::Boost(ref leaf, boost) => write!(formatter, "({:?})^{}", leaf, boost),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,8 +84,7 @@ impl CustomScorer<u64> for ScorerByField {
|
||||
.u64(self.field)
|
||||
.ok_or_else(|| {
|
||||
crate::TantivyError::SchemaError(format!(
|
||||
"Field requested ({:?}) is not a i64/u64 fast field.",
|
||||
self.field
|
||||
"Field requested is not a i64/u64 fast field."
|
||||
))
|
||||
})?;
|
||||
Ok(ScorerByFastFieldReader { ff_reader })
|
||||
@@ -615,10 +614,7 @@ mod tests {
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||
let err = top_collector.for_segment(0, segment);
|
||||
if let Err(crate::TantivyError::SchemaError(msg)) = err {
|
||||
assert_eq!(
|
||||
msg,
|
||||
"Field requested (Field(1)) is not a i64/u64 fast field."
|
||||
);
|
||||
assert_eq!(msg, "Field requested is not a i64/u64 fast field.");
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ fn collect_segment<C: Collector>(
|
||||
segment_ord: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<C::Fruit> {
|
||||
let mut scorer = weight.scorer(segment_reader, 1.0f32)?;
|
||||
let mut scorer = weight.scorer(segment_reader)?;
|
||||
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
|
||||
if let Some(delete_bitset) = segment_reader.delete_bitset() {
|
||||
scorer.for_each(&mut |doc, score| {
|
||||
|
||||
@@ -142,7 +142,7 @@ impl MmapCache {
|
||||
}
|
||||
|
||||
struct WatcherWrapper {
|
||||
_watcher: Mutex<notify::RecommendedWatcher>,
|
||||
_watcher: Mutex<notify::PollWatcher>,
|
||||
watcher_router: Arc<WatchCallbackList>,
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ impl WatcherWrapper {
|
||||
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
|
||||
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
|
||||
// We need to initialize the
|
||||
let watcher = notify::raw_watcher(tx)
|
||||
let watcher = notify::poll::PollWatcher::with_delay_ms(tx, 1)
|
||||
.and_then(|mut watcher| {
|
||||
watcher.watch(path, RecursiveMode::Recursive)?;
|
||||
Ok(watcher)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use crate::core::Searcher;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::docset::DocSet;
|
||||
use crate::query::boost_query::BoostScorer;
|
||||
use crate::query::explanation::does_not_match;
|
||||
use crate::query::{Explanation, Query, Scorer, Weight};
|
||||
use crate::DocId;
|
||||
@@ -23,13 +22,12 @@ impl Query for AllQuery {
|
||||
pub struct AllWeight;
|
||||
|
||||
impl Weight for AllWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
|
||||
let all_scorer = AllScorer {
|
||||
fn scorer(&self, reader: &SegmentReader) -> crate::Result<Box<dyn Scorer>> {
|
||||
Ok(Box::new(AllScorer {
|
||||
state: State::NotStarted,
|
||||
doc: 0u32,
|
||||
max_doc: reader.max_doc(),
|
||||
};
|
||||
Ok(Box::new(BoostScorer::new(all_scorer, boost)))
|
||||
}))
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
@@ -92,12 +90,14 @@ impl Scorer for AllScorer {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::AllQuery;
|
||||
use crate::query::Query;
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use crate::Index;
|
||||
|
||||
fn create_test_index() -> Index {
|
||||
#[test]
|
||||
fn test_all_query() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
@@ -108,18 +108,13 @@ mod tests {
|
||||
index_writer.commit().unwrap();
|
||||
index_writer.add_document(doc!(field=>"ccc"));
|
||||
index_writer.commit().unwrap();
|
||||
index
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_query() {
|
||||
let index = create_test_index();
|
||||
let reader = index.reader().unwrap();
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let weight = AllQuery.weight(&searcher, false).unwrap();
|
||||
{
|
||||
let reader = searcher.segment_reader(0);
|
||||
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
|
||||
let mut scorer = weight.scorer(reader).unwrap();
|
||||
assert!(scorer.advance());
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert!(scorer.advance());
|
||||
@@ -128,31 +123,10 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let reader = searcher.segment_reader(1);
|
||||
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
|
||||
let mut scorer = weight.scorer(reader).unwrap();
|
||||
assert!(scorer.advance());
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert!(!scorer.advance());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_query_with_boost() {
|
||||
let index = create_test_index();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let weight = AllQuery.weight(&searcher, false).unwrap();
|
||||
let reader = searcher.segment_reader(0);
|
||||
{
|
||||
let mut scorer = weight.scorer(reader, 2.0f32).unwrap();
|
||||
assert!(scorer.advance());
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert_eq!(scorer.score(), 2.0f32);
|
||||
}
|
||||
{
|
||||
let mut scorer = weight.scorer(reader, 1.5f32).unwrap();
|
||||
assert!(scorer.advance());
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert_eq!(scorer.score(), 1.5f32);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ impl<A> Weight for AutomatonWeight<A>
|
||||
where
|
||||
A: Automaton + Send + Sync + 'static,
|
||||
{
|
||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
|
||||
let max_doc = reader.max_doc();
|
||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||
|
||||
@@ -58,12 +58,11 @@ where
|
||||
}
|
||||
}
|
||||
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
||||
let const_scorer = ConstScorer::new(doc_bitset, boost);
|
||||
Ok(Box::new(const_scorer))
|
||||
Ok(Box::new(ConstScorer::new(doc_bitset)))
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
||||
let mut scorer = self.scorer(reader)?;
|
||||
if scorer.skip_next(doc) == SkipResult::Reached {
|
||||
Ok(Explanation::new("AutomatonScorer", 1.0f32))
|
||||
} else {
|
||||
@@ -73,95 +72,3 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::AutomatonWeight;
|
||||
use crate::query::Weight;
|
||||
use crate::schema::{Schema, STRING};
|
||||
use crate::Index;
|
||||
use tantivy_fst::Automaton;
|
||||
|
||||
fn create_index() -> Index {
|
||||
let mut schema = Schema::builder();
|
||||
let title = schema.add_text_field("title", STRING);
|
||||
let index = Index::create_in_ram(schema.build());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(title=>"abc"));
|
||||
index_writer.add_document(doc!(title=>"bcd"));
|
||||
index_writer.add_document(doc!(title=>"abcd"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
index
|
||||
}
|
||||
|
||||
enum State {
|
||||
Start,
|
||||
NotMatching,
|
||||
AfterA,
|
||||
}
|
||||
|
||||
struct PrefixedByA;
|
||||
|
||||
impl Automaton for PrefixedByA {
|
||||
type State = State;
|
||||
|
||||
fn start(&self) -> Self::State {
|
||||
State::Start
|
||||
}
|
||||
|
||||
fn is_match(&self, state: &Self::State) -> bool {
|
||||
match *state {
|
||||
State::AfterA => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn accept(&self, state: &Self::State, byte: u8) -> Self::State {
|
||||
match *state {
|
||||
State::Start => {
|
||||
if byte == b'a' {
|
||||
State::AfterA
|
||||
} else {
|
||||
State::NotMatching
|
||||
}
|
||||
}
|
||||
State::AfterA => State::AfterA,
|
||||
State::NotMatching => State::NotMatching,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_automaton_weight() {
|
||||
let index = create_index();
|
||||
let field = index.schema().get_field("title").unwrap();
|
||||
let automaton_weight = AutomatonWeight::new(field, PrefixedByA);
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let mut scorer = automaton_weight
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||
.unwrap();
|
||||
assert!(scorer.advance());
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert_eq!(scorer.score(), 1.0f32);
|
||||
assert!(scorer.advance());
|
||||
assert_eq!(scorer.doc(), 2u32);
|
||||
assert_eq!(scorer.score(), 1.0f32);
|
||||
assert!(!scorer.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_automaton_weight_boost() {
|
||||
let index = create_index();
|
||||
let field = index.schema().get_field("title").unwrap();
|
||||
let automaton_weight = AutomatonWeight::new(field, PrefixedByA);
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let mut scorer = automaton_weight
|
||||
.scorer(searcher.segment_reader(0u32), 1.32f32)
|
||||
.unwrap();
|
||||
assert!(scorer.advance());
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert_eq!(scorer.score(), 1.32f32);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
|
||||
cache
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BM25Weight {
|
||||
idf_explain: Explanation,
|
||||
weight: f32,
|
||||
@@ -33,15 +34,6 @@ pub struct BM25Weight {
|
||||
}
|
||||
|
||||
impl BM25Weight {
|
||||
pub fn boost_by(&self, boost: f32) -> BM25Weight {
|
||||
BM25Weight {
|
||||
idf_explain: self.idf_explain.clone(),
|
||||
weight: self.weight * boost,
|
||||
cache: self.cache,
|
||||
average_fieldnorm: self.average_fieldnorm,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> BM25Weight {
|
||||
assert!(!terms.is_empty(), "BM25 requires at least one term");
|
||||
let field = terms[0].field();
|
||||
|
||||
@@ -55,11 +55,10 @@ impl BooleanWeight {
|
||||
fn per_occur_scorers(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: f32,
|
||||
) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
|
||||
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
|
||||
for &(ref occur, ref subweight) in &self.weights {
|
||||
let sub_scorer: Box<dyn Scorer> = subweight.scorer(reader, boost)?;
|
||||
let sub_scorer: Box<dyn Scorer> = subweight.scorer(reader)?;
|
||||
per_occur_scorers
|
||||
.entry(*occur)
|
||||
.or_insert_with(Vec::new)
|
||||
@@ -71,9 +70,8 @@ impl BooleanWeight {
|
||||
fn complex_scorer<TScoreCombiner: ScoreCombiner>(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: f32,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
|
||||
let mut per_occur_scorers = self.per_occur_scorers(reader)?;
|
||||
|
||||
let should_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
||||
.remove(&Occur::Should)
|
||||
@@ -114,7 +112,7 @@ impl BooleanWeight {
|
||||
}
|
||||
|
||||
impl Weight for BooleanWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &SegmentReader) -> crate::Result<Box<dyn Scorer>> {
|
||||
if self.weights.is_empty() {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
} else if self.weights.len() == 1 {
|
||||
@@ -122,17 +120,17 @@ impl Weight for BooleanWeight {
|
||||
if occur == Occur::MustNot {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
} else {
|
||||
weight.scorer(reader, boost)
|
||||
weight.scorer(reader)
|
||||
}
|
||||
} else if self.scoring_enabled {
|
||||
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
|
||||
self.complex_scorer::<SumWithCoordsCombiner>(reader)
|
||||
} else {
|
||||
self.complex_scorer::<DoNothingCombiner>(reader, boost)
|
||||
self.complex_scorer::<DoNothingCombiner>(reader)
|
||||
}
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
||||
let mut scorer = self.scorer(reader)?;
|
||||
if scorer.skip_next(doc) != SkipResult::Reached {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ mod tests {
|
||||
use crate::query::Scorer;
|
||||
use crate::query::TermQuery;
|
||||
use crate::schema::*;
|
||||
use crate::tests::assert_nearly_equals;
|
||||
use crate::Index;
|
||||
use crate::{DocAddress, DocId};
|
||||
|
||||
@@ -71,9 +70,7 @@ mod tests {
|
||||
let query = query_parser.parse_query("+a").unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||
.unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(scorer.is::<TermScorer>());
|
||||
}
|
||||
|
||||
@@ -85,17 +82,13 @@ mod tests {
|
||||
{
|
||||
let query = query_parser.parse_query("+a +b +c").unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||
.unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(scorer.is::<Intersection<TermScorer>>());
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("+a +(b c)").unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||
.unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(scorer.is::<Intersection<Box<dyn Scorer>>>());
|
||||
}
|
||||
}
|
||||
@@ -108,9 +101,7 @@ mod tests {
|
||||
{
|
||||
let query = query_parser.parse_query("+a b").unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||
.unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(scorer.is::<RequiredOptionalScorer<
|
||||
Box<dyn Scorer>,
|
||||
Box<dyn Scorer>,
|
||||
@@ -120,9 +111,7 @@ mod tests {
|
||||
{
|
||||
let query = query_parser.parse_query("+a b").unwrap();
|
||||
let weight = query.weight(&searcher, false).unwrap();
|
||||
let scorer = weight
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||
.unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(scorer.is::<TermScorer>());
|
||||
}
|
||||
}
|
||||
@@ -190,50 +179,6 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_boolean_query_with_weight() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field => "a b c"));
|
||||
index_writer.add_document(doc!(text_field => "a c"));
|
||||
index_writer.add_document(doc!(text_field => "b c"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
let term_a: Box<dyn Query> = Box::new(TermQuery::new(
|
||||
Term::from_field_text(text_field, "a"),
|
||||
IndexRecordOption::WithFreqs,
|
||||
));
|
||||
let term_b: Box<dyn Query> = Box::new(TermQuery::new(
|
||||
Term::from_field_text(text_field, "b"),
|
||||
IndexRecordOption::WithFreqs,
|
||||
));
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let boolean_query =
|
||||
BooleanQuery::from(vec![(Occur::Should, term_a), (Occur::Should, term_b)]);
|
||||
let boolean_weight = boolean_query.weight(&searcher, true).unwrap();
|
||||
{
|
||||
let mut boolean_scorer = boolean_weight
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||
.unwrap();
|
||||
assert!(boolean_scorer.advance());
|
||||
assert_eq!(boolean_scorer.doc(), 0u32);
|
||||
assert_nearly_equals(boolean_scorer.score(), 0.84163445f32);
|
||||
}
|
||||
{
|
||||
let mut boolean_scorer = boolean_weight
|
||||
.scorer(searcher.segment_reader(0u32), 2.0f32)
|
||||
.unwrap();
|
||||
assert!(boolean_scorer.advance());
|
||||
assert_eq!(boolean_scorer.doc(), 0u32);
|
||||
assert_nearly_equals(boolean_scorer.score(), 1.6832689f32);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_intersection_score() {
|
||||
let (index, text_field) = aux_test_helper();
|
||||
@@ -304,9 +249,7 @@ mod tests {
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, text]);
|
||||
let query = query_parser.parse_query("Оксана Лифенко").unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let mut scorer = weight
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||
.unwrap();
|
||||
let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
scorer.advance();
|
||||
|
||||
let explanation = query.explain(&searcher, DocAddress(0u32, 0u32)).unwrap();
|
||||
|
||||
@@ -1,164 +0,0 @@
|
||||
use crate::common::BitSet;
|
||||
use crate::fastfield::DeleteBitSet;
|
||||
use crate::query::explanation::does_not_match;
|
||||
use crate::query::{Explanation, Query, Scorer, Weight};
|
||||
use crate::{DocId, DocSet, Searcher, SegmentReader, SkipResult, Term};
|
||||
use std::collections::BTreeSet;
|
||||
use std::fmt;
|
||||
|
||||
/// `BoostQuery` is a wrapper over a query used to boost its score.
|
||||
///
|
||||
/// The document set matched by the `BoostQuery` is strictly the same as the underlying query.
|
||||
/// The score of each document, is the score of the underlying query multiplied by the `boost`
|
||||
/// factor.
|
||||
pub struct BoostQuery {
|
||||
query: Box<dyn Query>,
|
||||
boost: f32,
|
||||
}
|
||||
|
||||
impl BoostQuery {
|
||||
/// Builds a boost query.
|
||||
pub fn new(query: Box<dyn Query>, boost: f32) -> BoostQuery {
|
||||
BoostQuery { query, boost }
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for BoostQuery {
|
||||
fn clone(&self) -> Self {
|
||||
BoostQuery {
|
||||
query: self.query.box_clone(),
|
||||
boost: self.boost,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for BoostQuery {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Boost(query={:?}, boost={})", self.query, self.boost)
|
||||
}
|
||||
}
|
||||
|
||||
impl Query for BoostQuery {
|
||||
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
|
||||
let weight_without_boost = self.query.weight(searcher, scoring_enabled)?;
|
||||
let boosted_weight = if scoring_enabled {
|
||||
Box::new(BoostWeight::new(weight_without_boost, self.boost))
|
||||
} else {
|
||||
weight_without_boost
|
||||
};
|
||||
Ok(boosted_weight)
|
||||
}
|
||||
|
||||
fn query_terms(&self, term_set: &mut BTreeSet<Term>) {
|
||||
self.query.query_terms(term_set)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct BoostWeight {
|
||||
weight: Box<dyn Weight>,
|
||||
boost: f32,
|
||||
}
|
||||
|
||||
impl BoostWeight {
|
||||
pub fn new(weight: Box<dyn Weight>, boost: f32) -> Self {
|
||||
BoostWeight { weight, boost }
|
||||
}
|
||||
}
|
||||
|
||||
impl Weight for BoostWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
|
||||
self.weight.scorer(reader, boost * self.boost)
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
||||
if scorer.skip_next(doc) != SkipResult::Reached {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
let mut explanation =
|
||||
Explanation::new(format!("Boost x{} of ...", self.boost), scorer.score());
|
||||
let underlying_explanation = self.weight.explain(reader, doc)?;
|
||||
explanation.add_detail(underlying_explanation);
|
||||
Ok(explanation)
|
||||
}
|
||||
|
||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||
self.weight.count(reader)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct BoostScorer<S: Scorer> {
|
||||
underlying: S,
|
||||
boost: f32,
|
||||
}
|
||||
|
||||
impl<S: Scorer> BoostScorer<S> {
|
||||
pub fn new(underlying: S, boost: f32) -> BoostScorer<S> {
|
||||
BoostScorer { underlying, boost }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Scorer> DocSet for BoostScorer<S> {
|
||||
fn advance(&mut self) -> bool {
|
||||
self.underlying.advance()
|
||||
}
|
||||
|
||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||
self.underlying.skip_next(target)
|
||||
}
|
||||
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||
self.underlying.fill_buffer(buffer)
|
||||
}
|
||||
|
||||
fn doc(&self) -> u32 {
|
||||
self.underlying.doc()
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> u32 {
|
||||
self.underlying.size_hint()
|
||||
}
|
||||
|
||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||
self.underlying.append_to_bitset(bitset)
|
||||
}
|
||||
|
||||
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||
self.underlying.count(delete_bitset)
|
||||
}
|
||||
|
||||
fn count_including_deleted(&mut self) -> u32 {
|
||||
self.underlying.count_including_deleted()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Scorer> Scorer for BoostScorer<S> {
|
||||
fn score(&mut self) -> f32 {
|
||||
self.underlying.score() * self.boost
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::BoostQuery;
|
||||
use crate::query::{AllQuery, Query};
|
||||
use crate::schema::Schema;
|
||||
use crate::{DocAddress, Document, Index};
|
||||
|
||||
#[test]
|
||||
fn test_boost_query_explain() {
|
||||
let schema = Schema::builder().build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(Document::new());
|
||||
assert!(index_writer.commit().is_ok());
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let query = BoostQuery::new(Box::new(AllQuery), 0.2);
|
||||
let explanation = query.explain(&searcher, DocAddress(0, 0u32)).unwrap();
|
||||
assert_eq!(
|
||||
explanation.to_pretty_json(),
|
||||
"{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\"\n }\n ]\n}"
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -33,7 +33,7 @@ impl Query for EmptyQuery {
|
||||
/// It is useful for tests and handling edge cases.
|
||||
pub struct EmptyWeight;
|
||||
impl Weight for EmptyWeight {
|
||||
fn scorer(&self, _reader: &SegmentReader, _boost: f32) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, _reader: &SegmentReader) -> crate::Result<Box<dyn Scorer>> {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ mod automaton_weight;
|
||||
mod bitset;
|
||||
mod bm25;
|
||||
mod boolean_query;
|
||||
mod boost_query;
|
||||
mod empty_query;
|
||||
mod exclude;
|
||||
mod explanation;
|
||||
@@ -38,7 +37,6 @@ pub use self::all_query::{AllQuery, AllScorer, AllWeight};
|
||||
pub use self::automaton_weight::AutomatonWeight;
|
||||
pub use self::bitset::BitSetDocSet;
|
||||
pub use self::boolean_query::BooleanQuery;
|
||||
pub use self::boost_query::BoostQuery;
|
||||
pub use self::empty_query::{EmptyQuery, EmptyScorer, EmptyWeight};
|
||||
pub use self::exclude::Exclude;
|
||||
pub use self::explanation::Explanation;
|
||||
|
||||
@@ -7,7 +7,7 @@ pub use self::phrase_scorer::PhraseScorer;
|
||||
pub use self::phrase_weight::PhraseWeight;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::collector::tests::{TEST_COLLECTOR_WITHOUT_SCORE, TEST_COLLECTOR_WITH_SCORE};
|
||||
@@ -15,10 +15,10 @@ pub mod tests {
|
||||
use crate::error::TantivyError;
|
||||
use crate::schema::{Schema, Term, TEXT};
|
||||
use crate::tests::assert_nearly_equals;
|
||||
use crate::DocAddress;
|
||||
use crate::DocId;
|
||||
use crate::{DocAddress, DocSet};
|
||||
|
||||
pub fn create_index(texts: &[&'static str]) -> Index {
|
||||
fn create_index(texts: &[&'static str]) -> Index {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
@@ -102,6 +102,30 @@ pub mod tests {
|
||||
assert!(test_query(vec!["g", "a"]).is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_phrase_count() {
|
||||
let index = create_index(&["a c", "a a b d a b c", " a b"]);
|
||||
let schema = index.schema();
|
||||
let text_field = schema.get_field("text").unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let phrase_query = PhraseQuery::new(vec![
|
||||
Term::from_field_text(text_field, "a"),
|
||||
Term::from_field_text(text_field, "b"),
|
||||
]);
|
||||
let phrase_weight = phrase_query.phrase_weight(&searcher, true).unwrap();
|
||||
let mut phrase_scorer = phrase_weight
|
||||
.phrase_scorer(searcher.segment_reader(0u32))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(phrase_scorer.advance());
|
||||
assert_eq!(phrase_scorer.doc(), 1);
|
||||
assert_eq!(phrase_scorer.phrase_count(), 2);
|
||||
assert!(phrase_scorer.advance());
|
||||
assert_eq!(phrase_scorer.doc(), 2);
|
||||
assert_eq!(phrase_scorer.phrase_count(), 1);
|
||||
assert!(!phrase_scorer.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_phrase_query_no_positions() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
@@ -37,12 +37,11 @@ impl PhraseWeight {
|
||||
reader.get_fieldnorms_reader(field)
|
||||
}
|
||||
|
||||
fn phrase_scorer(
|
||||
pub fn phrase_scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: f32,
|
||||
) -> Result<Option<PhraseScorer<SegmentPostings>>> {
|
||||
let similarity_weight = self.similarity_weight.boost_by(boost);
|
||||
let similarity_weight = self.similarity_weight.clone();
|
||||
let fieldnorm_reader = self.fieldnorm_reader(reader);
|
||||
if reader.has_deletes() {
|
||||
let mut term_postings_list = Vec::new();
|
||||
@@ -85,8 +84,8 @@ impl PhraseWeight {
|
||||
}
|
||||
|
||||
impl Weight for PhraseWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
|
||||
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
|
||||
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
|
||||
if let Some(scorer) = self.phrase_scorer(reader)? {
|
||||
Ok(Box::new(scorer))
|
||||
} else {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
@@ -94,7 +93,7 @@ impl Weight for PhraseWeight {
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
||||
let scorer_opt = self.phrase_scorer(reader, 1.0f32)?;
|
||||
let scorer_opt = self.phrase_scorer(reader)?;
|
||||
if scorer_opt.is_none() {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
@@ -110,34 +109,3 @@ impl Weight for PhraseWeight {
|
||||
Ok(explanation)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::super::tests::create_index;
|
||||
use crate::query::PhraseQuery;
|
||||
use crate::{DocSet, Term};
|
||||
|
||||
#[test]
|
||||
pub fn test_phrase_count() {
|
||||
let index = create_index(&["a c", "a a b d a b c", " a b"]);
|
||||
let schema = index.schema();
|
||||
let text_field = schema.get_field("text").unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let phrase_query = PhraseQuery::new(vec![
|
||||
Term::from_field_text(text_field, "a"),
|
||||
Term::from_field_text(text_field, "b"),
|
||||
]);
|
||||
let phrase_weight = phrase_query.phrase_weight(&searcher, true).unwrap();
|
||||
let mut phrase_scorer = phrase_weight
|
||||
.phrase_scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(phrase_scorer.advance());
|
||||
assert_eq!(phrase_scorer.doc(), 1);
|
||||
assert_eq!(phrase_scorer.phrase_count(), 2);
|
||||
assert!(phrase_scorer.advance());
|
||||
assert_eq!(phrase_scorer.doc(), 2);
|
||||
assert_eq!(phrase_scorer.phrase_count(), 1);
|
||||
assert!(!phrase_scorer.advance());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,17 +21,6 @@ pub enum LogicalLiteral {
|
||||
pub enum LogicalAST {
|
||||
Clause(Vec<(Occur, LogicalAST)>),
|
||||
Leaf(Box<LogicalLiteral>),
|
||||
Boost(Box<LogicalAST>, f32),
|
||||
}
|
||||
|
||||
impl LogicalAST {
|
||||
pub fn boost(self, boost: f32) -> LogicalAST {
|
||||
if (boost - 1.0f32).abs() < std::f32::EPSILON {
|
||||
self
|
||||
} else {
|
||||
LogicalAST::Boost(Box::new(self), boost)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn occur_letter(occur: Occur) -> &'static str {
|
||||
@@ -58,7 +47,6 @@ impl fmt::Debug for LogicalAST {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
LogicalAST::Boost(ref ast, boost) => write!(formatter, "{:?}^{}", ast, boost),
|
||||
LogicalAST::Leaf(ref literal) => write!(formatter, "{:?}", literal),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use super::logical_ast::*;
|
||||
use crate::core::Index;
|
||||
use crate::query::AllQuery;
|
||||
use crate::query::BooleanQuery;
|
||||
use crate::query::EmptyQuery;
|
||||
use crate::query::Occur;
|
||||
@@ -7,13 +8,11 @@ use crate::query::PhraseQuery;
|
||||
use crate::query::Query;
|
||||
use crate::query::RangeQuery;
|
||||
use crate::query::TermQuery;
|
||||
use crate::query::{AllQuery, BoostQuery};
|
||||
use crate::schema::{Facet, IndexRecordOption};
|
||||
use crate::schema::{Field, Schema};
|
||||
use crate::schema::{FieldType, Term};
|
||||
use crate::tokenizer::TokenizerManager;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::num::{ParseFloatError, ParseIntError};
|
||||
use std::ops::Bound;
|
||||
use std::str::FromStr;
|
||||
@@ -145,6 +144,7 @@ fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
|
||||
///
|
||||
/// * must terms: By prepending a term by a `+`, a term can be made required for the search.
|
||||
///
|
||||
///
|
||||
/// * phrase terms: Quoted terms become phrase searches on fields that have positions indexed.
|
||||
/// e.g., `title:"Barack Obama"` will only find documents that have "barack" immediately followed
|
||||
/// by "obama".
|
||||
@@ -158,20 +158,12 @@ fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
|
||||
///
|
||||
/// * all docs query: A plain `*` will match all documents in the index.
|
||||
///
|
||||
/// Parts of the queries can be boosted by appending `^boostfactor`.
|
||||
/// For instance, `"SRE"^2.0 OR devops^0.4` will boost documents containing `SRE` instead of
|
||||
/// devops. Negative boosts are not allowed.
|
||||
///
|
||||
/// It is also possible to define a boost for a some specific field, at the query parser level.
|
||||
/// (See [`set_boost(...)`](#method.set_field_boost) ). Typically you may want to boost a title
|
||||
/// field.
|
||||
#[derive(Clone)]
|
||||
pub struct QueryParser {
|
||||
schema: Schema,
|
||||
default_fields: Vec<Field>,
|
||||
conjunction_by_default: bool,
|
||||
tokenizer_manager: TokenizerManager,
|
||||
boost: HashMap<Field, f32>,
|
||||
}
|
||||
|
||||
impl QueryParser {
|
||||
@@ -189,7 +181,6 @@ impl QueryParser {
|
||||
default_fields,
|
||||
tokenizer_manager,
|
||||
conjunction_by_default: false,
|
||||
boost: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,17 +201,6 @@ impl QueryParser {
|
||||
self.conjunction_by_default = true;
|
||||
}
|
||||
|
||||
/// Sets a boost for a specific field.
|
||||
///
|
||||
/// The parse query will automatically boost this field.
|
||||
///
|
||||
/// If the query defines a query boost through the query language (e.g: `country:France^3.0`),
|
||||
/// the two boosts (the one defined in the query, and the one defined in the `QueryParser`)
|
||||
/// are multiplied together.
|
||||
pub fn set_field_boost(&mut self, field: Field, boost: f32) {
|
||||
self.boost.insert(field, boost);
|
||||
}
|
||||
|
||||
/// Parse a query
|
||||
///
|
||||
/// Note that `parse_query` returns an error if the input
|
||||
@@ -427,10 +407,6 @@ impl QueryParser {
|
||||
self.compute_logical_ast_with_occur(*subquery)?;
|
||||
Ok((Occur::compose(left_occur, right_occur), logical_sub_queries))
|
||||
}
|
||||
UserInputAST::Boost(ast, boost) => {
|
||||
let (occur, ast_without_occur) = self.compute_logical_ast_with_occur(*ast)?;
|
||||
Ok((occur, ast_without_occur.boost(boost)))
|
||||
}
|
||||
UserInputAST::Leaf(leaf) => {
|
||||
let result_ast = self.compute_logical_ast_from_leaf(*leaf)?;
|
||||
Ok((Occur::Should, result_ast))
|
||||
@@ -438,10 +414,6 @@ impl QueryParser {
|
||||
}
|
||||
}
|
||||
|
||||
fn field_boost(&self, field: Field) -> f32 {
|
||||
self.boost.get(&field).cloned().unwrap_or(1.0f32)
|
||||
}
|
||||
|
||||
fn compute_logical_ast_from_leaf(
|
||||
&self,
|
||||
leaf: UserInputLeaf,
|
||||
@@ -467,9 +439,7 @@ impl QueryParser {
|
||||
let mut asts: Vec<LogicalAST> = Vec::new();
|
||||
for (field, phrase) in term_phrases {
|
||||
if let Some(ast) = self.compute_logical_ast_for_leaf(field, &phrase)? {
|
||||
// Apply some field specific boost defined at the query parser level.
|
||||
let boost = self.field_boost(field);
|
||||
asts.push(LogicalAST::Leaf(Box::new(ast)).boost(boost));
|
||||
asts.push(LogicalAST::Leaf(Box::new(ast)));
|
||||
}
|
||||
}
|
||||
let result_ast: LogicalAST = if asts.len() == 1 {
|
||||
@@ -489,16 +459,14 @@ impl QueryParser {
|
||||
let mut clauses = fields
|
||||
.iter()
|
||||
.map(|&field| {
|
||||
let boost = self.field_boost(field);
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let value_type = field_entry.field_type().value_type();
|
||||
let logical_ast = LogicalAST::Leaf(Box::new(LogicalLiteral::Range {
|
||||
Ok(LogicalAST::Leaf(Box::new(LogicalLiteral::Range {
|
||||
field,
|
||||
value_type,
|
||||
lower: self.resolve_bound(field, &lower)?,
|
||||
upper: self.resolve_bound(field, &upper)?,
|
||||
}));
|
||||
Ok(logical_ast.boost(boost))
|
||||
})))
|
||||
})
|
||||
.collect::<Result<Vec<_>, QueryParserError>>()?;
|
||||
let result_ast = if clauses.len() == 1 {
|
||||
@@ -551,11 +519,6 @@ fn convert_to_query(logical_ast: LogicalAST) -> Box<dyn Query> {
|
||||
Some(LogicalAST::Leaf(trimmed_logical_literal)) => {
|
||||
convert_literal_to_query(*trimmed_logical_literal)
|
||||
}
|
||||
Some(LogicalAST::Boost(ast, boost)) => {
|
||||
let query = convert_to_query(*ast);
|
||||
let boosted_query = BoostQuery::new(query, boost);
|
||||
Box::new(boosted_query)
|
||||
}
|
||||
None => Box::new(EmptyQuery),
|
||||
}
|
||||
}
|
||||
@@ -575,7 +538,7 @@ mod test {
|
||||
use crate::Index;
|
||||
use matches::assert_matches;
|
||||
|
||||
fn make_schema() -> Schema {
|
||||
fn make_query_parser() -> QueryParser {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field_indexing = TextFieldIndexing::default()
|
||||
.set_tokenizer("en_with_stop_words")
|
||||
@@ -583,8 +546,8 @@ mod test {
|
||||
let text_options = TextOptions::default()
|
||||
.set_indexing_options(text_field_indexing)
|
||||
.set_stored();
|
||||
schema_builder.add_text_field("title", TEXT);
|
||||
schema_builder.add_text_field("text", TEXT);
|
||||
let title = schema_builder.add_text_field("title", TEXT);
|
||||
let text = schema_builder.add_text_field("text", TEXT);
|
||||
schema_builder.add_i64_field("signed", INDEXED);
|
||||
schema_builder.add_u64_field("unsigned", INDEXED);
|
||||
schema_builder.add_text_field("notindexed_text", STORED);
|
||||
@@ -595,15 +558,8 @@ mod test {
|
||||
schema_builder.add_date_field("date", INDEXED);
|
||||
schema_builder.add_f64_field("float", INDEXED);
|
||||
schema_builder.add_facet_field("facet");
|
||||
schema_builder.build()
|
||||
}
|
||||
|
||||
fn make_query_parser() -> QueryParser {
|
||||
let schema = make_schema();
|
||||
let default_fields: Vec<Field> = vec!["title", "text"]
|
||||
.into_iter()
|
||||
.flat_map(|field_name| schema.get_field(field_name))
|
||||
.collect();
|
||||
let schema = schema_builder.build();
|
||||
let default_fields = vec![title, text];
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
tokenizer_manager.register(
|
||||
"en_with_stop_words",
|
||||
@@ -645,45 +601,6 @@ mod test {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_parse_query_with_boost() {
|
||||
let mut query_parser = make_query_parser();
|
||||
let schema = make_schema();
|
||||
let text_field = schema.get_field("text").unwrap();
|
||||
query_parser.set_field_boost(text_field, 2.0f32);
|
||||
let query = query_parser.parse_query("text:hello").unwrap();
|
||||
assert_eq!(
|
||||
format!("{:?}", query),
|
||||
"Boost(query=TermQuery(Term(field=1,bytes=[104, 101, 108, 108, 111])), boost=2)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_parse_query_range_with_boost() {
|
||||
let mut query_parser = make_query_parser();
|
||||
let schema = make_schema();
|
||||
let title_field = schema.get_field("title").unwrap();
|
||||
query_parser.set_field_boost(title_field, 2.0f32);
|
||||
let query = query_parser.parse_query("title:[A TO B]").unwrap();
|
||||
assert_eq!(
|
||||
format!("{:?}", query),
|
||||
"Boost(query=RangeQuery { field: Field(0), value_type: Str, left_bound: Included([97]), right_bound: Included([98]) }, boost=2)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_parse_query_with_default_boost_and_custom_boost() {
|
||||
let mut query_parser = make_query_parser();
|
||||
let schema = make_schema();
|
||||
let text_field = schema.get_field("text").unwrap();
|
||||
query_parser.set_field_boost(text_field, 2.0f32);
|
||||
let query = query_parser.parse_query("text:hello^2").unwrap();
|
||||
assert_eq!(
|
||||
format!("{:?}", query),
|
||||
"Boost(query=Boost(query=TermQuery(Term(field=1,bytes=[104, 101, 108, 108, 111])), boost=2), boost=2)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_parse_nonindexed_field_yields_error() {
|
||||
let query_parser = make_query_parser();
|
||||
|
||||
@@ -289,7 +289,7 @@ impl RangeWeight {
|
||||
}
|
||||
|
||||
impl Weight for RangeWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
|
||||
let max_doc = reader.max_doc();
|
||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||
|
||||
@@ -307,11 +307,11 @@ impl Weight for RangeWeight {
|
||||
}
|
||||
}
|
||||
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
||||
Ok(Box::new(ConstScorer::new(doc_bitset, boost)))
|
||||
Ok(Box::new(ConstScorer::new(doc_bitset)))
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
||||
let mut scorer = self.scorer(reader)?;
|
||||
if scorer.skip_next(doc) != SkipResult::Reached {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
|
||||
@@ -115,8 +115,8 @@ mod tests {
|
||||
let req = vec![1, 3, 7];
|
||||
let mut reqoptscorer: RequiredOptionalScorer<_, _, SumCombiner> =
|
||||
RequiredOptionalScorer::new(
|
||||
ConstScorer::from(VecDocSet::from(req.clone())),
|
||||
ConstScorer::from(VecDocSet::from(vec![])),
|
||||
ConstScorer::new(VecDocSet::from(req.clone())),
|
||||
ConstScorer::new(VecDocSet::from(vec![])),
|
||||
);
|
||||
let mut docs = vec![];
|
||||
while reqoptscorer.advance() {
|
||||
@@ -129,8 +129,8 @@ mod tests {
|
||||
fn test_reqopt_scorer() {
|
||||
let mut reqoptscorer: RequiredOptionalScorer<_, _, SumCombiner> =
|
||||
RequiredOptionalScorer::new(
|
||||
ConstScorer::new(VecDocSet::from(vec![1, 3, 7, 8, 9, 10, 13, 15]), 1.0f32),
|
||||
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15]), 1.0f32),
|
||||
ConstScorer::new(VecDocSet::from(vec![1, 3, 7, 8, 9, 10, 13, 15])),
|
||||
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15])),
|
||||
);
|
||||
{
|
||||
assert!(reqoptscorer.advance());
|
||||
@@ -183,8 +183,8 @@ mod tests {
|
||||
test_skip_against_unoptimized(
|
||||
|| {
|
||||
Box::new(RequiredOptionalScorer::<_, _, DoNothingCombiner>::new(
|
||||
ConstScorer::from(VecDocSet::from(req_docs.clone())),
|
||||
ConstScorer::from(VecDocSet::from(opt_docs.clone())),
|
||||
ConstScorer::new(VecDocSet::from(req_docs.clone())),
|
||||
ConstScorer::new(VecDocSet::from(opt_docs.clone())),
|
||||
))
|
||||
},
|
||||
skip_docs,
|
||||
|
||||
@@ -49,14 +49,16 @@ pub struct ConstScorer<TDocSet: DocSet> {
|
||||
|
||||
impl<TDocSet: DocSet> ConstScorer<TDocSet> {
|
||||
/// Creates a new `ConstScorer`.
|
||||
pub fn new(docset: TDocSet, score: f32) -> ConstScorer<TDocSet> {
|
||||
ConstScorer { docset, score }
|
||||
pub fn new(docset: TDocSet) -> ConstScorer<TDocSet> {
|
||||
ConstScorer {
|
||||
docset,
|
||||
score: 1f32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet> From<TDocSet> for ConstScorer<TDocSet> {
|
||||
fn from(docset: TDocSet) -> Self {
|
||||
ConstScorer::new(docset, 1.0f32)
|
||||
/// Sets the constant score to a different value.
|
||||
pub fn set_score(&mut self, score: Score) {
|
||||
self.score = score;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,6 +90,6 @@ impl<TDocSet: DocSet> DocSet for ConstScorer<TDocSet> {
|
||||
|
||||
impl<TDocSet: DocSet + 'static> Scorer for ConstScorer<TDocSet> {
|
||||
fn score(&mut self) -> Score {
|
||||
self.score
|
||||
1f32
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ mod tests {
|
||||
);
|
||||
let term_weight = term_query.weight(&searcher, true).unwrap();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut term_scorer = term_weight.scorer(segment_reader, 1.0f32).unwrap();
|
||||
let mut term_scorer = term_weight.scorer(segment_reader).unwrap();
|
||||
assert!(term_scorer.advance());
|
||||
assert_eq!(term_scorer.doc(), 0);
|
||||
assert_eq!(term_scorer.score(), 0.28768212);
|
||||
|
||||
@@ -18,13 +18,13 @@ pub struct TermWeight {
|
||||
}
|
||||
|
||||
impl Weight for TermWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
|
||||
let term_scorer = self.scorer_specialized(reader, boost)?;
|
||||
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
|
||||
let term_scorer = self.scorer_specialized(reader)?;
|
||||
Ok(Box::new(term_scorer))
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
||||
let mut scorer = self.scorer_specialized(reader, 1.0f32)?;
|
||||
let mut scorer = self.scorer_specialized(reader)?;
|
||||
if scorer.skip_next(doc) != SkipResult::Reached {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
@@ -33,7 +33,7 @@ impl Weight for TermWeight {
|
||||
|
||||
fn count(&self, reader: &SegmentReader) -> Result<u32> {
|
||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||
Ok(self.scorer(reader, 1.0f32)?.count(delete_bitset))
|
||||
Ok(self.scorer(reader)?.count(delete_bitset))
|
||||
} else {
|
||||
let field = self.term.field();
|
||||
Ok(reader
|
||||
@@ -58,11 +58,11 @@ impl TermWeight {
|
||||
}
|
||||
}
|
||||
|
||||
fn scorer_specialized(&self, reader: &SegmentReader, boost: f32) -> Result<TermScorer> {
|
||||
fn scorer_specialized(&self, reader: &SegmentReader) -> Result<TermScorer> {
|
||||
let field = self.term.field();
|
||||
let inverted_index = reader.inverted_index(field);
|
||||
let fieldnorm_reader = reader.get_fieldnorms_reader(field);
|
||||
let similarity_weight = self.similarity_weight.boost_by(boost);
|
||||
let similarity_weight = self.similarity_weight.clone();
|
||||
let postings_opt: Option<SegmentPostings> =
|
||||
inverted_index.read_postings(&self.term, self.index_record_option);
|
||||
if let Some(segment_postings) = postings_opt {
|
||||
|
||||
@@ -145,6 +145,26 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn count_including_deleted(&mut self) -> u32 {
|
||||
let mut count = self.bitsets[self.cursor..HORIZON_NUM_TINYBITSETS]
|
||||
.iter()
|
||||
.map(|bitset| bitset.len())
|
||||
.sum::<u32>();
|
||||
for bitset in self.bitsets.iter_mut() {
|
||||
bitset.clear();
|
||||
}
|
||||
while self.refill() {
|
||||
count += self.bitsets.iter().map(|bitset| bitset.len()).sum::<u32>();
|
||||
for bitset in self.bitsets.iter_mut() {
|
||||
bitset.clear();
|
||||
}
|
||||
}
|
||||
self.cursor = HORIZON_NUM_TINYBITSETS;
|
||||
count
|
||||
}
|
||||
|
||||
// TODO implement `count` efficiently.
|
||||
|
||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||
if !self.advance() {
|
||||
return SkipResult::End;
|
||||
@@ -223,8 +243,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
// TODO implement `count` efficiently.
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
self.doc
|
||||
}
|
||||
@@ -232,24 +250,6 @@ where
|
||||
fn size_hint(&self) -> u32 {
|
||||
0u32
|
||||
}
|
||||
|
||||
fn count_including_deleted(&mut self) -> u32 {
|
||||
let mut count = self.bitsets[self.cursor..HORIZON_NUM_TINYBITSETS]
|
||||
.iter()
|
||||
.map(|bitset| bitset.len())
|
||||
.sum::<u32>();
|
||||
for bitset in self.bitsets.iter_mut() {
|
||||
bitset.clear();
|
||||
}
|
||||
while self.refill() {
|
||||
count += self.bitsets.iter().map(|bitset| bitset.len()).sum::<u32>();
|
||||
for bitset in self.bitsets.iter_mut() {
|
||||
bitset.clear();
|
||||
}
|
||||
}
|
||||
self.cursor = HORIZON_NUM_TINYBITSETS;
|
||||
count
|
||||
}
|
||||
}
|
||||
|
||||
impl<TScorer, TScoreCombiner> Scorer for Union<TScorer, TScoreCombiner>
|
||||
@@ -290,7 +290,7 @@ mod tests {
|
||||
vals.iter()
|
||||
.cloned()
|
||||
.map(VecDocSet::from)
|
||||
.map(|docset| ConstScorer::new(docset, 1.0f32))
|
||||
.map(ConstScorer::new)
|
||||
.collect::<Vec<ConstScorer<VecDocSet>>>(),
|
||||
)
|
||||
};
|
||||
@@ -339,7 +339,7 @@ mod tests {
|
||||
.iter()
|
||||
.map(|docs| docs.clone())
|
||||
.map(VecDocSet::from)
|
||||
.map(|docset| ConstScorer::new(docset, 1.0f32))
|
||||
.map(ConstScorer::new)
|
||||
.collect::<Vec<_>>(),
|
||||
));
|
||||
res
|
||||
@@ -369,8 +369,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_union_skip_corner_case3() {
|
||||
let mut docset = Union::<_, DoNothingCombiner>::from(vec![
|
||||
ConstScorer::from(VecDocSet::from(vec![0u32, 5u32])),
|
||||
ConstScorer::from(VecDocSet::from(vec![1u32, 4u32])),
|
||||
ConstScorer::new(VecDocSet::from(vec![0u32, 5u32])),
|
||||
ConstScorer::new(VecDocSet::from(vec![1u32, 4u32])),
|
||||
]);
|
||||
assert!(docset.advance());
|
||||
assert_eq!(docset.doc(), 0u32);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use super::Scorer;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::query::Explanation;
|
||||
use crate::DocId;
|
||||
use crate::{DocId, Result};
|
||||
|
||||
/// A Weight is the specialization of a Query
|
||||
/// for a given set of segments.
|
||||
@@ -9,18 +9,15 @@ use crate::DocId;
|
||||
/// See [`Query`](./trait.Query.html).
|
||||
pub trait Weight: Send + Sync + 'static {
|
||||
/// Returns the scorer for the given segment.
|
||||
///
|
||||
/// `boost` is a multiplier to apply to the score.
|
||||
///
|
||||
/// See [`Query`](./trait.Query.html).
|
||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>>;
|
||||
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>>;
|
||||
|
||||
/// Returns an `Explanation` for the given document.
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation>;
|
||||
|
||||
/// Returns the number documents within the given `SegmentReader`.
|
||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
||||
fn count(&self, reader: &SegmentReader) -> Result<u32> {
|
||||
let mut scorer = self.scorer(reader)?;
|
||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||
Ok(scorer.count(delete_bitset))
|
||||
} else {
|
||||
|
||||
@@ -68,9 +68,7 @@ impl<T> Pool<T> {
|
||||
/// After publish, all new `Searcher` acquired will be
|
||||
/// of the new generation.
|
||||
pub fn publish_new_generation(&self, items: Vec<T>) {
|
||||
assert!(!items.is_empty());
|
||||
let next_generation = self.next_generation.fetch_add(1, Ordering::SeqCst) + 1;
|
||||
let num_items = items.len();
|
||||
for item in items {
|
||||
let gen_item = GenerationItem {
|
||||
item,
|
||||
@@ -79,23 +77,6 @@ impl<T> Pool<T> {
|
||||
self.queue.push(gen_item);
|
||||
}
|
||||
self.advertise_generation(next_generation);
|
||||
// Purge possible previous searchers.
|
||||
//
|
||||
// Assuming at this point no searcher is held more than duration T by the user,
|
||||
// this guarantees that an obsolete searcher will not be uselessly held (and its associated
|
||||
// mmap) for more than duration T.
|
||||
//
|
||||
// Proof: At this point, obsolete searcher that are held by the user will be held for less
|
||||
// than T. When released, they will be dropped as their generation is detected obsolete.
|
||||
//
|
||||
// We still need to ensure that the searcher that are obsolete and in the pool get removed.
|
||||
// The queue currently contains up to 2n searchers, in any random order.
|
||||
//
|
||||
// Half of them are obsoletes. By requesting `(n+1)` fresh searchers, we ensure that all
|
||||
// searcher will be inspected.
|
||||
for _ in 0..=num_items {
|
||||
let _ = self.acquire();
|
||||
}
|
||||
}
|
||||
|
||||
/// At the exit of this method,
|
||||
|
||||
@@ -125,7 +125,7 @@ impl Facet {
|
||||
|
||||
/// This function is the inverse of Facet::from(&str).
|
||||
pub fn to_path_string(&self) -> String {
|
||||
format!("{}", self)
|
||||
format!("{}", self.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ pub const COMPRESSION: &str = "snappy";
|
||||
|
||||
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
|
||||
compressed.clear();
|
||||
let mut encoder = snap::write::FrameEncoder::new(compressed);
|
||||
let mut encoder = snap::Writer::new(compressed);
|
||||
encoder.write_all(&uncompressed)?;
|
||||
encoder.flush()?;
|
||||
Ok(())
|
||||
@@ -17,6 +17,6 @@ pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()>
|
||||
|
||||
pub fn decompress(compressed: &[u8], decompressed: &mut Vec<u8>) -> io::Result<()> {
|
||||
decompressed.clear();
|
||||
snap::read::FrameDecoder::new(compressed).read_to_end(decompressed)?;
|
||||
snap::Reader::new(compressed).read_to_end(decompressed)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user