Compare commits

...

23 Commits

Author SHA1 Message Date
Paul Masurel
ee6c839ee6 Moving the StoreWriter out of the SegmentSerializer. 2020-03-10 09:53:56 +09:00
Paul Masurel
b3f0ef0878 Avoid writing a new delete file if there was no actual deletes. (#787)
When applying the delete operations in the delete queue, it is possible
that there was no new deleted document.

In this case, avoid creating a new delete file, and updating the delete
opstamp.
2020-03-08 13:04:21 +09:00
Paul Masurel
04304262ba cargo fmt 2020-03-08 09:58:42 +09:00
Paul Masurel
920ced364a Added a method to persist the RAMDirectory into a different directory. 2020-03-07 17:00:50 +09:00
Paul Masurel
e0499118e2 Minor refactoring 2020-03-07 15:56:03 +09:00
Paul Masurel
50b5efae46 Added derive feature to serde crate 2020-03-06 23:46:29 +09:00
Paul Masurel
486b8fa9c5 Removing serde-derive dependency (#786) 2020-03-06 23:33:58 +09:00
Minoru Osuka
b2baed9bdd Add Lindera to README.md (#785)
* Add Lindera to README.md

* Put lindera in first place
2020-03-03 20:23:59 +09:00
Paul Masurel
b591542c0b Removing err.description() before deprecation. 2020-03-03 09:58:49 +09:00
Paul Masurel
a83fa00ac4 Faster compilation of query-grammar. (#784) 2020-03-02 22:12:42 +09:00
Paul Masurel
7ff5c7c797 Removing the fst feature in the levenshtein_automata crate. 2020-03-02 21:47:05 +09:00
Paul Masurel
1748602691 ignore -> compile_fail 2020-03-02 09:59:48 +09:00
Paul Masurel
6542dd5337 Removing parenthesis. 2020-03-01 09:41:53 +09:00
Nicholas Connor
c64a44b9e1 Slight re-organization to increase contrast of "Getting Started" (#783) 2020-02-28 08:42:38 +09:00
Paul Masurel
fccc5b3bed Closes #758 2020-02-27 17:58:43 +09:00
Paul Masurel
98b9d5c6c4 Closes #780. Will be fixed on the next published release. 2020-02-21 09:41:52 +09:00
Paul Masurel
afd2c1a8ad Merge branch 'master' of github.com:tantivy-search/tantivy 2020-02-19 22:08:44 +09:00
Paul Masurel
81f35a3ceb Bumped tantivy-grammar version 2020-02-19 22:08:31 +09:00
Paul Masurel
7e2e765f4a Bumped tantivy-grammar version 2020-02-19 22:07:54 +09:00
Paul Masurel
7d6cfa58e1 [WIP] Alternative take on boosted queries (#772)
* Alternative take on boosted queries

* Fixing unit test

* Added boosting to the query grammar.

* Made BoostQuery public.

* Added support for boosting field in QueryParser

Closes #547
2020-02-19 11:04:38 +09:00
Paul Masurel
14735ce3aa Update snap version to 1. (#781) 2020-02-17 10:41:44 +09:00
Paul Masurel
72f7cc1569 Closes #777 (#779) 2020-02-17 09:53:38 +09:00
Paul Masurel
abef5c4e74 Updating combine to version 4 (#775) 2020-02-06 23:02:48 +09:00
62 changed files with 1247 additions and 377 deletions

View File

@@ -4,6 +4,8 @@ Tantivy 0.12.0
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
- Important Bugfix #777, causing tantivy to retain memory mapping. (diagnosed by @poljar)
- Added support for field boosting. (#547, @fulmicoton)
## How to update?

View File

@@ -5,7 +5,7 @@ authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
description = """Search engine library"""
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
documentation = "https://docs.rs/tantivy/"
homepage = "https://github.com/tantivy-search/tantivy"
repository = "https://github.com/tantivy-search/tantivy"
readme = "README.md"
@@ -21,17 +21,16 @@ regex ={version = "1.3.0", default-features = false, features = ["std"]}
tantivy-fst = "0.2.1"
memmap = {version = "0.7", optional=true}
lz4 = {version="1.20", optional=true}
snap = {version="0.2"}
snap = "1"
atomicwrites = {version="0.2.2", optional=true}
tempfile = "3.0"
log = "0.4"
serde = "1.0"
serde_derive = "1.0"
serde = {version="1.0", features=["derive"]}
serde_json = "1.0"
num_cpus = "1.2"
fs2={version="0.4", optional=true}
itertools = "0.8"
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
levenshtein_automata = "0.1"
notify = {version="4", optional=true}
uuid = { version = "0.8", features = ["v4", "serde"] }
crossbeam = "0.7"
@@ -40,7 +39,7 @@ owning_ref = "0.4"
stable_deref_trait = "1.0.0"
rust-stemmers = "1.2"
downcast-rs = { version="1.0" }
tantivy-query-grammar = { version="0.11", path="./query-grammar" }
tantivy-query-grammar = { version="0.12", path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.4"
fnv = "1.0.6"

View File

@@ -40,7 +40,7 @@ performance for different type of queries / collection.
# Features
- Full-text search
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter))
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy) and [tantivy-tokenizer-tiny-segmente](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
- Tiny startup time (<10ms), perfect for command line tools
- BM25 scoring (the same as Lucene)
@@ -59,18 +59,17 @@ performance for different type of queries / collection.
- Configurable indexing (optional term frequency and position indexing)
- Cheesy logo with a horse
# Non-features
## Non-features
- Distributed search is out of the scope of Tantivy. That being said, Tantivy is a
library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
are within the scope of Tantivy.
# Supported OS and compiler
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows.
# Getting started
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows.
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
index documents, and search via the CLI or a small server with a REST API.

View File

@@ -9,11 +9,10 @@
// - import tokenized text straight from json,
// - perform a search on documents with pre-tokenized text
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, Tokenizer};
use tantivy::collector::{Count, TopDocs};
use tantivy::query::TermQuery;
use tantivy::schema::*;
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, Tokenizer};
use tantivy::{doc, Index, ReloadPolicy};
use tempfile::TempDir;

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy-query-grammar"
version = "0.11.0"
version = "0.12.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
@@ -13,4 +13,4 @@ keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies]
combine = ">=3.6.0,<4.0.0"
combine = {version="4", default-features=false, features=[] }

View File

@@ -1,5 +1,3 @@
#![recursion_limit = "100"]
mod occur;
mod query_grammar;
mod user_input_ast;

View File

@@ -1,150 +1,153 @@
use super::user_input_ast::*;
use super::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
use crate::Occur;
use combine::char::*;
use combine::error::StreamError;
use combine::stream::StreamErrorFor;
use combine::*;
use combine::error::StringStreamError;
use combine::parser::char::{char, digit, letter, space, spaces, string};
use combine::parser::Parser;
use combine::{
attempt, choice, eof, many, many1, one_of, optional, parser, satisfy, skip_many1, value,
};
parser! {
fn field[I]()(I) -> String
where [I: Stream<Item = char>] {
(
letter(),
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
).skip(char(':')).map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
}
}
parser! {
fn word[I]()(I) -> String
where [I: Stream<Item = char>] {
(
satisfy(|c: char| !c.is_whitespace() && !['-', '`', ':', '{', '}', '"', '[', ']', '(',')'].contains(&c) ),
many(satisfy(|c: char| !c.is_whitespace() && ![':', '{', '}', '"', '[', ']', '(',')'].contains(&c)))
)
fn field<'a>() -> impl Parser<&'a str, Output = String> {
(
letter(),
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
)
.skip(char(':'))
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
.and_then(|s: String|
match s.as_str() {
"OR" => Err(StreamErrorFor::<I>::unexpected_static_message("OR")),
"AND" => Err(StreamErrorFor::<I>::unexpected_static_message("AND")),
"NOT" => Err(StreamErrorFor::<I>::unexpected_static_message("NOT")),
_ => Ok(s)
})
}
}
parser! {
fn literal[I]()(I) -> UserInputLeaf
where [I: Stream<Item = char>]
{
let term_val = || {
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
phrase.or(word())
};
let term_val_with_field = negative_number().or(term_val());
let term_query =
(field(), term_val_with_field)
.map(|(field_name, phrase)| UserInputLiteral {
field_name: Some(field_name),
phrase,
});
let term_default_field = term_val().map(|phrase| UserInputLiteral {
field_name: None,
phrase,
});
attempt(term_query)
.or(term_default_field)
.map(UserInputLeaf::from)
}
}
parser! {
fn negative_number[I]()(I) -> String
where [I: Stream<Item = char>]
{
(char('-'), many1(satisfy(char::is_numeric)),
optional((char('.'), many1(satisfy(char::is_numeric)))))
.map(|(s1, s2, s3): (char, String, Option<(char, String)>)| {
if let Some(('.', s3)) = s3 {
format!("{}{}.{}", s1, s2, s3)
} else {
format!("{}{}", s1, s2)
}
})
}
}
parser! {
fn spaces1[I]()(I) -> ()
where [I: Stream<Item = char>] {
skip_many1(space())
}
}
parser! {
/// Function that parses a range out of a Stream
/// Supports ranges like:
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
/// [a TO *], [a TO c], [abc TO bcd}
fn range[I]()(I) -> UserInputLeaf
where [I: Stream<Item = char>] {
let range_term_val = || {
word().or(negative_number()).or(char('*').with(value("*".to_string())))
};
// check for unbounded range in the form of <5, <=10, >5, >=5
let elastic_unbounded_range = (choice([attempt(string(">=")),
attempt(string("<=")),
attempt(string("<")),
attempt(string(">"))])
.skip(spaces()),
range_term_val()).
map(|(comparison_sign, bound): (&str, String)|
match comparison_sign {
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
"<=" => (UserInputBound::Unbounded, UserInputBound::Inclusive(bound)),
"<" => (UserInputBound::Unbounded, UserInputBound::Exclusive(bound)),
">" => (UserInputBound::Exclusive(bound), UserInputBound::Unbounded),
// default case
_ => (UserInputBound::Unbounded, UserInputBound::Unbounded)
});
let lower_bound = (one_of("{[".chars()), range_term_val())
.map(|(boundary_char, lower_bound): (char, String)|
if lower_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '{' {
UserInputBound::Exclusive(lower_bound)
} else {
UserInputBound::Inclusive(lower_bound)
});
let upper_bound = (range_term_val(), one_of("}]".chars()))
.map(|(higher_bound, boundary_char): (String, char)|
if higher_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '}' {
UserInputBound::Exclusive(higher_bound)
} else {
UserInputBound::Inclusive(higher_bound)
});
// return only lower and upper
let lower_to_upper = (lower_bound.
skip((spaces(),
string("TO"),
spaces())),
upper_bound);
(optional(field()).skip(spaces()),
// try elastic first, if it matches, the range is unbounded
attempt(elastic_unbounded_range).or(lower_to_upper))
.map(|(field, (lower, upper))|
// Construct the leaf from extracted field (optional)
// and bounds
UserInputLeaf::Range {
field,
lower,
upper
fn word<'a>() -> impl Parser<&'a str, Output = String> {
(
satisfy(|c: char| {
!c.is_whitespace()
&& !['-', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
}),
many(satisfy(|c: char| {
!c.is_whitespace() && ![':', '^', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
})),
)
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
.and_then(|s: String| match s.as_str() {
"OR" | "AND " | "NOT" => Err(StringStreamError::UnexpectedParse),
_ => Ok(s),
})
}
}
fn term_val<'a>() -> impl Parser<&'a str, Output = String> {
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
phrase.or(word())
}
fn term_query<'a>() -> impl Parser<&'a str, Output = UserInputLiteral> {
let term_val_with_field = negative_number().or(term_val());
(field(), term_val_with_field).map(|(field_name, phrase)| UserInputLiteral {
field_name: Some(field_name),
phrase,
})
}
fn literal<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
let term_default_field = term_val().map(|phrase| UserInputLiteral {
field_name: None,
phrase,
});
attempt(term_query())
.or(term_default_field)
.map(UserInputLeaf::from)
}
fn negative_number<'a>() -> impl Parser<&'a str, Output = String> {
(
char('-'),
many1(digit()),
optional((char('.'), many1(digit()))),
)
.map(|(s1, s2, s3): (char, String, Option<(char, String)>)| {
if let Some(('.', s3)) = s3 {
format!("{}{}.{}", s1, s2, s3)
} else {
format!("{}{}", s1, s2)
}
})
}
fn spaces1<'a>() -> impl Parser<&'a str, Output = ()> {
skip_many1(space())
}
/// Function that parses a range out of a Stream
/// Supports ranges like:
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
/// [a TO *], [a TO c], [abc TO bcd}
fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
let range_term_val = || {
word()
.or(negative_number())
.or(char('*').with(value("*".to_string())))
};
// check for unbounded range in the form of <5, <=10, >5, >=5
let elastic_unbounded_range = (
choice([
attempt(string(">=")),
attempt(string("<=")),
attempt(string("<")),
attempt(string(">")),
])
.skip(spaces()),
range_term_val(),
)
.map(
|(comparison_sign, bound): (&str, String)| match comparison_sign {
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
"<=" => (UserInputBound::Unbounded, UserInputBound::Inclusive(bound)),
"<" => (UserInputBound::Unbounded, UserInputBound::Exclusive(bound)),
">" => (UserInputBound::Exclusive(bound), UserInputBound::Unbounded),
// default case
_ => (UserInputBound::Unbounded, UserInputBound::Unbounded),
},
);
let lower_bound = (one_of("{[".chars()), range_term_val()).map(
|(boundary_char, lower_bound): (char, String)| {
if lower_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '{' {
UserInputBound::Exclusive(lower_bound)
} else {
UserInputBound::Inclusive(lower_bound)
}
},
);
let upper_bound = (range_term_val(), one_of("}]".chars())).map(
|(higher_bound, boundary_char): (String, char)| {
if higher_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '}' {
UserInputBound::Exclusive(higher_bound)
} else {
UserInputBound::Inclusive(higher_bound)
}
},
);
// return only lower and upper
let lower_to_upper = (
lower_bound.skip((spaces(), string("TO"), spaces())),
upper_bound,
);
(
optional(field()).skip(spaces()),
// try elastic first, if it matches, the range is unbounded
attempt(elastic_unbounded_range).or(lower_to_upper),
)
.map(|(field, (lower, upper))|
// Construct the leaf from extracted field (optional)
// and bounds
UserInputLeaf::Range {
field,
lower,
upper
})
}
fn negate(expr: UserInputAST) -> UserInputAST {
@@ -155,17 +158,48 @@ fn must(expr: UserInputAST) -> UserInputAST {
expr.unary(Occur::Must)
}
parser! {
fn leaf[I]()(I) -> UserInputAST
where [I: Stream<Item = char>] {
char('-').with(leaf()).map(negate)
.or(char('+').with(leaf()).map(must))
.or(char('(').with(ast()).skip(char(')')))
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
.or(attempt(string("NOT").skip(spaces1()).with(leaf()).map(negate)))
.or(attempt(range().map(UserInputAST::from)))
.or(literal().map(UserInputAST::from))
}
fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
parser(|input| {
char('-')
.with(leaf())
.map(negate)
.or(char('+').with(leaf()).map(must))
.or(char('(').with(ast()).skip(char(')')))
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
.or(attempt(
string("NOT").skip(spaces1()).with(leaf()).map(negate),
))
.or(attempt(range().map(UserInputAST::from)))
.or(literal().map(UserInputAST::from))
.parse_stream(input)
.into_result()
})
}
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
(many1(digit()), optional((char('.'), many1(digit())))).map(
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
let mut float_str = int_part;
if let Some((chr, decimal_str)) = decimal_part_opt {
float_str.push(chr);
float_str.push_str(&decimal_str);
}
float_str.parse::<f32>().unwrap()
},
)
}
fn boost<'a>() -> impl Parser<&'a str, Output = f32> {
(char('^'), positive_float_number()).map(|(_, boost)| boost)
}
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
Some(boost) if (boost - 1.0).abs() > std::f32::EPSILON => {
UserInputAST::Boost(Box::new(leaf), boost)
}
_ => leaf,
})
}
#[derive(Clone, Copy)]
@@ -174,13 +208,10 @@ enum BinaryOperand {
And,
}
parser! {
fn binary_operand[I]()(I) -> BinaryOperand
where [I: Stream<Item = char>]
{
string("AND").with(value(BinaryOperand::And))
.or(string("OR").with(value(BinaryOperand::Or)))
}
fn binary_operand<'a>() -> impl Parser<&'a str, Output = BinaryOperand> {
string("AND")
.with(value(BinaryOperand::And))
.or(string("OR").with(value(BinaryOperand::Or)))
}
fn aggregate_binary_expressions(
@@ -208,37 +239,67 @@ fn aggregate_binary_expressions(
}
}
parser! {
pub fn ast[I]()(I) -> UserInputAST
where [I: Stream<Item = char>]
{
let operand_leaf = (binary_operand().skip(spaces()), leaf().skip(spaces()));
let boolean_expr = (leaf().skip(spaces().silent()), many1(operand_leaf)).map(
|(left, right)| aggregate_binary_expressions(left,right));
let whitespace_separated_leaves = many1(leaf().skip(spaces().silent()))
.map(|subqueries: Vec<UserInputAST>|
pub fn ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
let operand_leaf = (
binary_operand().skip(spaces()),
boosted_leaf().skip(spaces()),
);
let boolean_expr = (boosted_leaf().skip(spaces().silent()), many1(operand_leaf))
.map(|(left, right)| aggregate_binary_expressions(left, right));
let whitespace_separated_leaves =
many1(boosted_leaf().skip(spaces().silent())).map(|subqueries: Vec<UserInputAST>| {
if subqueries.len() == 1 {
subqueries.into_iter().next().unwrap()
} else {
UserInputAST::Clause(subqueries.into_iter().collect())
});
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
spaces().with(expr).skip(spaces())
}
}
});
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
spaces().with(expr).skip(spaces())
}
parser! {
pub fn parse_to_ast[I]()(I) -> UserInputAST
where [I: Stream<Item = char>]
{
spaces().with(optional(ast()).skip(eof())).map(|opt_ast| opt_ast.unwrap_or_else(UserInputAST::empty_query))
}
pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
spaces()
.with(optional(ast()).skip(eof()))
.map(|opt_ast| opt_ast.unwrap_or_else(UserInputAST::empty_query))
}
#[cfg(test)]
mod test {
use super::*;
use combine::parser::Parser;
pub fn nearly_equals(a: f32, b: f32) -> bool {
(a - b).abs() < 0.0005 * (a + b).abs()
}
fn assert_nearly_equals(expected: f32, val: f32) {
assert!(
nearly_equals(val, expected),
"Got {}, expected {}.",
val,
expected
);
}
#[test]
fn test_positive_float_number() {
fn valid_parse(float_str: &str, expected_val: f32, expected_remaining: &str) {
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
assert_eq!(remaining, expected_remaining);
assert_nearly_equals(val, expected_val);
}
fn error_parse(float_str: &str) {
assert!(positive_float_number().parse(float_str).is_err());
}
valid_parse("1.0", 1.0f32, "");
valid_parse("1", 1.0f32, "");
valid_parse("0.234234 aaa", 0.234234f32, " aaa");
error_parse(".3332");
error_parse("1.");
error_parse("-1.");
}
fn test_parse_query_to_ast_helper(query: &str, expected: &str) {
let query = parse_to_ast().parse(query).unwrap().0;
@@ -272,6 +333,15 @@ mod test {
test_parse_query_to_ast_helper("NOT a", "-(\"a\")");
}
#[test]
fn test_boosting() {
assert!(parse_to_ast().parse("a^2^3").is_err());
assert!(parse_to_ast().parse("a^2^").is_err());
test_parse_query_to_ast_helper("a^3", "(\"a\")^3");
test_parse_query_to_ast_helper("a^3 b^2", "((\"a\")^3 (\"b\")^2)");
test_parse_query_to_ast_helper("a^1", "\"a\"");
}
#[test]
fn test_parse_query_to_ast_binary_op() {
test_parse_query_to_ast_helper("a AND b", "(+(\"a\") +(\"b\"))");

View File

@@ -88,6 +88,7 @@ pub enum UserInputAST {
Clause(Vec<UserInputAST>),
Unary(Occur, Box<UserInputAST>),
Leaf(Box<UserInputLeaf>),
Boost(Box<UserInputAST>, f32),
}
impl UserInputAST {
@@ -154,6 +155,7 @@ impl fmt::Debug for UserInputAST {
write!(formatter, "{}({:?})", occur, subquery)
}
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
UserInputAST::Boost(ref leaf, boost) => write!(formatter, "({:?})^{}", leaf, boost),
}
}
}

View File

@@ -84,7 +84,8 @@ impl CustomScorer<u64> for ScorerByField {
.u64(self.field)
.ok_or_else(|| {
crate::TantivyError::SchemaError(format!(
"Field requested is not a i64/u64 fast field."
"Field requested ({:?}) is not a i64/u64 fast field.",
self.field
))
})?;
Ok(ScorerByFastFieldReader { ff_reader })
@@ -614,7 +615,10 @@ mod tests {
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
let err = top_collector.for_segment(0, segment);
if let Err(crate::TantivyError::SchemaError(msg)) = err {
assert_eq!(msg, "Field requested is not a i64/u64 fast field.");
assert_eq!(
msg,
"Field requested (Field(1)) is not a i64/u64 fast field."
);
} else {
assert!(false);
}

View File

@@ -1,4 +1,3 @@
use super::segment::create_segment;
use super::segment::Segment;
use crate::core::Executor;
use crate::core::IndexMeta;
@@ -337,7 +336,7 @@ impl Index {
#[doc(hidden)]
pub fn segment(&self, segment_meta: SegmentMeta) -> Segment {
create_segment(self.clone(), segment_meta)
Segment::for_index(self.clone(), segment_meta)
}
/// Creates a new segment.

View File

@@ -4,6 +4,7 @@ use crate::schema::Schema;
use crate::Opstamp;
use census::{Inventory, TrackedObject};
use serde;
use serde::{Deserialize, Serialize};
use serde_json;
use std::collections::HashSet;
use std::fmt;

View File

@@ -23,7 +23,7 @@ fn collect_segment<C: Collector>(
segment_ord: u32,
segment_reader: &SegmentReader,
) -> crate::Result<C::Fruit> {
let mut scorer = weight.scorer(segment_reader)?;
let mut scorer = weight.scorer(segment_reader, 1.0f32)?;
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
if let Some(delete_bitset) = segment_reader.delete_bitset() {
scorer.for_each(&mut |doc, score| {

View File

@@ -24,15 +24,12 @@ impl fmt::Debug for Segment {
}
}
/// Creates a new segment given an `Index` and a `SegmentId`
///
/// The function is here to make it private outside `tantivy`.
/// #[doc(hidden)]
pub fn create_segment(index: Index, meta: SegmentMeta) -> Segment {
Segment { index, meta }
}
impl Segment {
/// Creates a new segment given an `Index` and a `SegmentId`
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
Segment { index, meta }
}
/// Returns the index the segment belongs to.
pub fn index(&self) -> &Index {
&self.index

View File

@@ -4,6 +4,7 @@ use uuid::Uuid;
#[cfg(test)]
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use std::error::Error;
use std::str::FromStr;
#[cfg(test)]

View File

@@ -8,6 +8,8 @@ use crc32fast::Hasher;
use std::io;
use std::io::Write;
const FOOTER_MAX_LEN: usize = 10_000;
type CrcHashU32 = u32;
#[derive(Debug, Clone, PartialEq)]
@@ -143,12 +145,23 @@ impl BinarySerializable for VersionedFooter {
}
}
BinarySerializable::serialize(&VInt(buf.len() as u64), writer)?;
assert!(buf.len() <= FOOTER_MAX_LEN);
writer.write_all(&buf[..])?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let len = VInt::deserialize(reader)?.0 as usize;
if len > FOOTER_MAX_LEN {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Footer seems invalid as it suggests a footer len of {}. File is corrupted, \
or the index was created with a different & old version of tantivy.",
len
),
));
}
let mut buf = vec![0u8; len];
reader.read_exact(&mut buf[..])?;
let mut cursor = &buf[..];
@@ -221,11 +234,12 @@ mod tests {
use super::CrcHashU32;
use super::FooterProxy;
use crate::common::BinarySerializable;
use crate::common::{BinarySerializable, VInt};
use crate::directory::footer::{Footer, VersionedFooter};
use crate::directory::TerminatingWrite;
use byteorder::{ByteOrder, LittleEndian};
use regex::Regex;
use std::io;
#[test]
fn test_versioned_footer() {
@@ -336,4 +350,20 @@ mod tests {
let res = footer.is_compatible();
assert!(res.is_err());
}
#[test]
fn test_deserialize_too_large_footer() {
let mut buf = vec![];
assert!(FooterProxy::new(&mut buf).terminate().is_ok());
let mut long_len_buf = [0u8; 10];
let num_bytes = VInt(super::FOOTER_MAX_LEN as u64 + 1u64).serialize_into(&mut long_len_buf);
buf[0..num_bytes].copy_from_slice(&long_len_buf[..num_bytes]);
let err = Footer::deserialize(&mut &buf[..]).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
assert_eq!(
err.to_string(),
"Footer seems invalid as it suggests a footer len of 10001. File is corrupted, \
or the index was created with a different & old version of tantivy."
);
}
}

View File

@@ -22,6 +22,7 @@ use crate::directory::WatchHandle;
use crate::directory::{TerminatingWrite, WritePtr};
use atomicwrites;
use memmap::Mmap;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::convert::From;
use std::fmt;

View File

@@ -13,6 +13,7 @@ mod footer;
mod managed_directory;
mod ram_directory;
mod read_only_source;
mod spilling_writer;
mod watch_event_router;
/// Errors specific to the directory module.
@@ -22,6 +23,7 @@ pub use self::directory::DirectoryLock;
pub use self::directory::{Directory, DirectoryClone};
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
pub use self::ram_directory::RAMDirectory;
pub(crate) use self::spilling_writer::SpillingWriter;
pub use self::read_only_source::ReadOnlySource;
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
use std::io::{self, BufWriter, Write};
@@ -79,10 +81,16 @@ impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
}
}
impl TerminatingWrite for Vec<u8> {
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
Ok(())
}
}
#[cfg(test)]
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
self.flush()
Ok(())
}
}

View File

@@ -144,6 +144,22 @@ impl RAMDirectory {
pub fn total_mem_usage(&self) -> usize {
self.fs.read().unwrap().total_mem_usage()
}
/// Write a copy of all of the files saved in the RAMDirectory in the target `Directory`.
///
/// Files are all written using the `Directory::write` meaning, even if they were
/// written using the `atomic_write` api.
///
/// If an error is encounterred, files may be persisted partially.
pub fn persist(&self, dest: &mut dyn Directory) -> crate::Result<()> {
let wlock = self.fs.write().unwrap();
for (path, source) in wlock.fs.iter() {
let mut dest_wrt = dest.open_write(path)?;
dest_wrt.write_all(source.as_slice())?;
dest_wrt.terminate()?;
}
Ok(())
}
}
impl Directory for RAMDirectory {
@@ -204,3 +220,28 @@ impl Directory for RAMDirectory {
Ok(self.fs.write().unwrap().watch(watch_callback))
}
}
#[cfg(test)]
mod tests {
use super::RAMDirectory;
use crate::Directory;
use std::io::Write;
use std::path::Path;
#[test]
fn test_persist() {
let msg_atomic: &'static [u8] = b"atomic is the way";
let msg_seq: &'static [u8] = b"sequential is the way";
let path_atomic: &'static Path = Path::new("atomic");
let path_seq: &'static Path = Path::new("seq");
let mut directory = RAMDirectory::create();
assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok());
let mut wrt = directory.open_write(path_seq).unwrap();
assert!(wrt.write_all(msg_seq).is_ok());
assert!(wrt.flush().is_ok());
let mut directory_copy = RAMDirectory::create();
assert!(directory.persist(&mut directory_copy).is_ok());
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
}
}

View File

@@ -0,0 +1,180 @@
use crate::directory::{WritePtr, TerminatingWrite};
use std::io::{self, Write};
enum SpillingState {
Buffer {
buffer: Vec<u8>,
capacity: usize,
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
},
Spilled(WritePtr),
}
impl SpillingState {
fn new(
limit: usize,
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
) -> SpillingState {
SpillingState::Buffer {
buffer: Vec::with_capacity(limit),
capacity: limit,
write_factory,
}
}
fn reserve(self, extra_capacity: usize) -> io::Result<SpillingState> {
match self {
SpillingState::Buffer {
buffer,
capacity,
write_factory,
} => {
if capacity >= extra_capacity {
Ok(SpillingState::Buffer {
buffer,
capacity: capacity - extra_capacity,
write_factory,
})
} else {
let mut wrt = write_factory()?;
wrt.write_all(&buffer[..])?;
Ok(SpillingState::Spilled(wrt))
}
}
SpillingState::Spilled(wrt) => Ok(SpillingState::Spilled(wrt)),
}
}
}
pub struct SpillingWriter {
state: Option<SpillingState>,
}
impl SpillingWriter {
pub fn new(
limit: usize,
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
) -> SpillingWriter {
let state = SpillingState::new(limit, write_factory);
SpillingWriter {
state: Some(state)
}
}
pub fn flush_and_finalize(self) -> io::Result<()> {
if let SpillingState::Buffer {
buffer,
write_factory,
..
} = self.state.expect("State cannot be none") {
let mut wrt = write_factory()?;
wrt.write_all(&buffer[..])?;
wrt.flush()?;
wrt.terminate()?;
}
Ok(())
}
pub fn finalize(self) -> io::Result<SpillingResult> {
match self.state.expect("state cannot be None") {
SpillingState::Spilled(mut wrt) => {
wrt.flush()?;
Ok(SpillingResult::Spilled)
}
SpillingState::Buffer { buffer, .. } => Ok(SpillingResult::Buffer(buffer)),
}
}
}
pub enum SpillingResult {
Spilled,
Buffer(Vec<u8>),
}
impl io::Write for SpillingWriter {
fn write(&mut self, payload: &[u8]) -> io::Result<usize> {
self.write_all(payload)?;
Ok(payload.len())
}
fn flush(&mut self) -> io::Result<()> {
if let Some(SpillingState::Spilled(wrt)) = &mut self.state {
wrt.flush()?;
}
Ok(())
}
fn write_all(&mut self, payload: &[u8]) -> io::Result<()> {
let state_opt: Option<io::Result<SpillingState>> = self.state
.take()
.map(|mut state| {
state = state.reserve(payload.len())?;
match &mut state {
SpillingState::Buffer { buffer, .. } => {
buffer.extend_from_slice(payload);
}
SpillingState::Spilled(wrt) => {
wrt.write_all(payload)?;
}
}
Ok(state)
});
self.state = state_opt.transpose()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::SpillingWriter;
use crate::directory::spilling_writer::SpillingResult;
use crate::directory::RAMDirectory;
use crate::Directory;
use std::io::{self, Write};
use std::path::Path;
#[test]
fn test_no_spilling() {
let ram_directory = RAMDirectory::create();
let mut ram_directory_clone = ram_directory.clone();
let path = Path::new("test");
let write_factory = Box::new(move || {
ram_directory_clone
.open_write(path)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
});
let mut spilling_wrt = SpillingWriter::new(10, write_factory);
assert!(spilling_wrt.write_all(b"abcd").is_ok());
if let SpillingResult::Buffer(buf) = spilling_wrt.finalize().unwrap() {
assert_eq!(buf, b"abcd")
} else {
panic!("spill writer should not have spilled");
}
assert!(!ram_directory.exists(path));
}
#[test]
fn test_spilling() {
let ram_directory = RAMDirectory::create();
let mut ram_directory_clone = ram_directory.clone();
let path = Path::new("test");
let write_factory = Box::new(move || {
ram_directory_clone
.open_write(path)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
});
let mut spilling_wrt = SpillingWriter::new(10, write_factory);
assert!(spilling_wrt.write_all(b"abcd").is_ok());
assert!(spilling_wrt.write_all(b"efghijklmnop").is_ok());
if let SpillingResult::Spilled = spilling_wrt.finalize().unwrap() {
} else {
panic!("spill writer should have spilled");
}
assert_eq!(
ram_directory.atomic_read(path).unwrap(),
b"abcdefghijklmnop"
);
}
}

View File

@@ -155,6 +155,8 @@ pub(crate) fn advance_deletes(
None => BitSet::with_max_value(max_doc),
};
let num_deleted_docs_before = segment.meta().num_deleted_docs();
compute_deleted_bitset(
&mut delete_bitset,
&segment_reader,
@@ -164,6 +166,8 @@ pub(crate) fn advance_deletes(
)?;
// TODO optimize
// It should be possible to do something smarter by manipulation bitsets directly
// to compute this union.
if let Some(seg_delete_bitset) = segment_reader.delete_bitset() {
for doc in 0u32..max_doc {
if seg_delete_bitset.is_deleted(doc) {
@@ -172,8 +176,9 @@ pub(crate) fn advance_deletes(
}
}
let num_deleted_docs = delete_bitset.len();
if num_deleted_docs > 0 {
let num_deleted_docs: u32 = delete_bitset.len() as u32;
if num_deleted_docs > num_deleted_docs_before {
// There are new deletes. We need to write a new delete file.
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
@@ -803,6 +808,46 @@ mod tests {
assert_eq!(batch_opstamp1, 2u64);
}
#[test]
fn test_no_need_to_rewrite_delete_file_if_no_new_deletes() {
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "hello1"));
index_writer.add_document(doc!(text_field => "hello2"));
assert!(index_writer.commit().is_ok());
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.segment_reader(0u32).num_deleted_docs(), 0);
index_writer.delete_term(Term::from_field_text(text_field, "hello1"));
assert!(index_writer.commit().is_ok());
assert!(reader.reload().is_ok());
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.segment_reader(0u32).num_deleted_docs(), 1);
let previous_delete_opstamp = index.load_metas().unwrap().segments[0].delete_opstamp();
// All docs containing hello1 have been already removed.
// We should not update the delete meta.
index_writer.delete_term(Term::from_field_text(text_field, "hello1"));
assert!(index_writer.commit().is_ok());
assert!(reader.reload().is_ok());
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.segment_reader(0u32).num_deleted_docs(), 1);
let after_delete_opstamp = index.load_metas().unwrap().segments[0].delete_opstamp();
assert_eq!(after_delete_opstamp, previous_delete_opstamp);
}
#[test]
fn test_ordered_batched_operations() {
// * one delete for `doc!(field=>"a")`

View File

@@ -2,6 +2,7 @@ use crate::common::MAX_DOC_LIMIT;
use crate::core::Segment;
use crate::core::SegmentReader;
use crate::core::SerializableSegment;
use crate::directory::WritePtr;
use crate::docset::DocSet;
use crate::fastfield::BytesFastFieldReader;
use crate::fastfield::DeleteBitSet;
@@ -661,7 +662,8 @@ impl IndexMerger {
Ok(term_ordinal_mappings)
}
fn write_storable_fields(&self, store_writer: &mut StoreWriter) -> crate::Result<()> {
pub fn write_storable_fields(&self, store_wrt: WritePtr) -> crate::Result<()> {
let mut store_writer = StoreWriter::new(store_wrt);
for reader in &self.readers {
let store_reader = reader.get_store_reader();
if reader.num_deleted_docs() > 0 {
@@ -673,6 +675,7 @@ impl IndexMerger {
store_writer.stack(&store_reader)?;
}
}
store_writer.close()?;
Ok(())
}
}
@@ -682,7 +685,6 @@ impl SerializableSegment for IndexMerger {
let term_ord_mappings = self.write_postings(serializer.get_postings_serializer())?;
self.write_fieldnorms(serializer.get_fieldnorms_serializer())?;
self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?;
self.write_storable_fields(serializer.get_store_writer())?;
serializer.close()?;
Ok(self.max_doc)
}

View File

@@ -3,12 +3,10 @@ use crate::core::SegmentComponent;
use crate::fastfield::FastFieldSerializer;
use crate::fieldnorm::FieldNormsSerializer;
use crate::postings::InvertedIndexSerializer;
use crate::store::StoreWriter;
/// Segment serializer is in charge of laying out on disk
/// the data accumulated and sorted by the `SegmentWriter`.
pub struct SegmentSerializer {
store_writer: StoreWriter,
fast_field_serializer: FastFieldSerializer,
fieldnorms_serializer: FieldNormsSerializer,
postings_serializer: InvertedIndexSerializer,
@@ -17,8 +15,6 @@ pub struct SegmentSerializer {
impl SegmentSerializer {
/// Creates a new `SegmentSerializer`.
pub fn for_segment(segment: &mut Segment) -> crate::Result<SegmentSerializer> {
let store_write = segment.open_write(SegmentComponent::STORE)?;
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?;
@@ -27,7 +23,6 @@ impl SegmentSerializer {
let postings_serializer = InvertedIndexSerializer::open(segment)?;
Ok(SegmentSerializer {
store_writer: StoreWriter::new(store_write),
fast_field_serializer,
fieldnorms_serializer,
postings_serializer,
@@ -49,16 +44,10 @@ impl SegmentSerializer {
&mut self.fieldnorms_serializer
}
/// Accessor to the `StoreWriter`.
pub fn get_store_writer(&mut self) -> &mut StoreWriter {
&mut self.store_writer
}
/// Finalize the segment serialization.
pub fn close(self) -> crate::Result<()> {
self.fast_field_serializer.close()?;
self.postings_serializer.close()?;
self.store_writer.close()?;
self.fieldnorms_serializer.close()?;
Ok(())
}

View File

@@ -18,7 +18,7 @@ use crate::indexer::SegmentSerializer;
use crate::indexer::{DefaultMergePolicy, MergePolicy};
use crate::indexer::{MergeCandidate, MergeOperation};
use crate::schema::Schema;
use crate::Opstamp;
use crate::{Opstamp, SegmentComponent};
use futures::channel::oneshot;
use futures::executor::{ThreadPool, ThreadPoolBuilder};
use futures::future::Future;
@@ -134,8 +134,10 @@ fn merge(
// ... we just serialize this index merger in our new segment to merge the two segments.
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
let num_docs = merger.write(segment_serializer)?;
let store_wrt = merged_segment.open_write(SegmentComponent::STORE)?;
merger.write_storable_fields(store_wrt)?;
let num_docs = merger.write(segment_serializer)?;
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))

View File

@@ -11,13 +11,15 @@ use crate::schema::Schema;
use crate::schema::Term;
use crate::schema::Value;
use crate::schema::{Field, FieldEntry};
use crate::store::StoreWriter;
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
use crate::tokenizer::{TokenStreamChain, Tokenizer};
use crate::DocId;
use crate::Opstamp;
use crate::{DocId, SegmentComponent};
use std::io;
use std::str;
use crate::directory::SpillingWriter;
/// Computes the initial size of the hash table.
///
@@ -43,11 +45,12 @@ fn initial_table_size(per_thread_memory_budget: usize) -> crate::Result<usize> {
pub struct SegmentWriter {
max_doc: DocId,
multifield_postings: MultiFieldPostingsWriter,
segment_serializer: SegmentSerializer,
segment: Segment,
fast_field_writers: FastFieldsWriter,
fieldnorms_writer: FieldNormsWriter,
doc_opstamps: Vec<Opstamp>,
tokenizers: Vec<Option<TextAnalyzer>>,
store_writer: StoreWriter<SpillingWriter>,
}
impl SegmentWriter {
@@ -62,11 +65,10 @@ impl SegmentWriter {
/// - schema
pub fn for_segment(
memory_budget: usize,
mut segment: Segment,
segment: Segment,
schema: &Schema,
) -> crate::Result<SegmentWriter> {
let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
let tokenizers = schema
.fields()
@@ -82,14 +84,22 @@ impl SegmentWriter {
},
)
.collect();
let mut segment_clone = segment.clone();
let spilling_wrt = SpillingWriter::new(1_000, Box::new(move || {
segment_clone
.open_write(SegmentComponent::STORE)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
}));
let store_writer = StoreWriter::new(spilling_wrt);
Ok(SegmentWriter {
max_doc: 0,
multifield_postings,
fieldnorms_writer: FieldNormsWriter::for_schema(schema),
segment_serializer,
segment,
fast_field_writers: FastFieldsWriter::from_schema(schema),
doc_opstamps: Vec::with_capacity(1_000),
tokenizers,
store_writer,
})
}
@@ -99,11 +109,14 @@ impl SegmentWriter {
/// be used afterwards.
pub fn finalize(mut self) -> crate::Result<Vec<u64>> {
self.fieldnorms_writer.fill_up_to_max_doc(self.max_doc);
let spilling_wrt = self.store_writer.close()?;
spilling_wrt.flush_and_finalize()?;
let segment_serializer = SegmentSerializer::for_segment(&mut self.segment)?;
write(
&self.multifield_postings,
&self.fast_field_writers,
&self.fieldnorms_writer,
self.segment_serializer,
segment_serializer,
)?;
Ok(self.doc_opstamps)
}
@@ -246,8 +259,7 @@ impl SegmentWriter {
}
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
doc.prepare_for_store();
let doc_writer = self.segment_serializer.get_store_writer();
doc_writer.store(&doc)?;
self.store_writer.store(&doc)?;
self.max_doc += 1;
Ok(())
}

View File

@@ -98,9 +98,6 @@
//! [literate programming](https://tantivy-search.github.io/examples/basic_search.html) /
//! [source code](https://github.com/tantivy-search/tantivy/blob/master/examples/basic_search.rs))
#[macro_use]
extern crate serde_derive;
#[cfg_attr(test, macro_use)]
extern crate serde_json;
@@ -173,6 +170,7 @@ pub use crate::schema::{Document, Term};
use std::fmt;
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
/// Index format version.
const INDEX_FORMAT_VERSION: u32 = 1;

View File

@@ -106,7 +106,7 @@ impl BlockSearcher {
/// the target.
///
/// The results should be equivalent to
/// ```ignore
/// ```compile_fail
/// block[..]
// .iter()
// .take_while(|&&val| val < target)

View File

@@ -1,6 +1,7 @@
use crate::core::Searcher;
use crate::core::SegmentReader;
use crate::docset::DocSet;
use crate::query::boost_query::BoostScorer;
use crate::query::explanation::does_not_match;
use crate::query::{Explanation, Query, Scorer, Weight};
use crate::DocId;
@@ -22,12 +23,13 @@ impl Query for AllQuery {
pub struct AllWeight;
impl Weight for AllWeight {
fn scorer(&self, reader: &SegmentReader) -> crate::Result<Box<dyn Scorer>> {
Ok(Box::new(AllScorer {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
let all_scorer = AllScorer {
state: State::NotStarted,
doc: 0u32,
max_doc: reader.max_doc(),
}))
};
Ok(Box::new(BoostScorer::new(all_scorer, boost)))
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
@@ -90,14 +92,12 @@ impl Scorer for AllScorer {
#[cfg(test)]
mod tests {
use super::AllQuery;
use crate::query::Query;
use crate::schema::{Schema, TEXT};
use crate::Index;
#[test]
fn test_all_query() {
fn create_test_index() -> Index {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
@@ -108,13 +108,18 @@ mod tests {
index_writer.commit().unwrap();
index_writer.add_document(doc!(field=>"ccc"));
index_writer.commit().unwrap();
index
}
#[test]
fn test_all_query() {
let index = create_test_index();
let reader = index.reader().unwrap();
reader.reload().unwrap();
let searcher = reader.searcher();
let weight = AllQuery.weight(&searcher, false).unwrap();
{
let reader = searcher.segment_reader(0);
let mut scorer = weight.scorer(reader).unwrap();
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
assert!(scorer.advance());
assert_eq!(scorer.doc(), 0u32);
assert!(scorer.advance());
@@ -123,10 +128,31 @@ mod tests {
}
{
let reader = searcher.segment_reader(1);
let mut scorer = weight.scorer(reader).unwrap();
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
assert!(scorer.advance());
assert_eq!(scorer.doc(), 0u32);
assert!(!scorer.advance());
}
}
#[test]
fn test_all_query_with_boost() {
let index = create_test_index();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let weight = AllQuery.weight(&searcher, false).unwrap();
let reader = searcher.segment_reader(0);
{
let mut scorer = weight.scorer(reader, 2.0f32).unwrap();
assert!(scorer.advance());
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 2.0f32);
}
{
let mut scorer = weight.scorer(reader, 1.5f32).unwrap();
assert!(scorer.advance());
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.5f32);
}
}
}

View File

@@ -40,7 +40,7 @@ impl<A> Weight for AutomatonWeight<A>
where
A: Automaton + Send + Sync + 'static,
{
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc);
@@ -58,11 +58,12 @@ where
}
}
let doc_bitset = BitSetDocSet::from(doc_bitset);
Ok(Box::new(ConstScorer::new(doc_bitset)))
let const_scorer = ConstScorer::new(doc_bitset, boost);
Ok(Box::new(const_scorer))
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer(reader)?;
let mut scorer = self.scorer(reader, 1.0f32)?;
if scorer.skip_next(doc) == SkipResult::Reached {
Ok(Explanation::new("AutomatonScorer", 1.0f32))
} else {
@@ -72,3 +73,95 @@ where
}
}
}
#[cfg(test)]
mod tests {
use super::AutomatonWeight;
use crate::query::Weight;
use crate::schema::{Schema, STRING};
use crate::Index;
use tantivy_fst::Automaton;
fn create_index() -> Index {
let mut schema = Schema::builder();
let title = schema.add_text_field("title", STRING);
let index = Index::create_in_ram(schema.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(title=>"abc"));
index_writer.add_document(doc!(title=>"bcd"));
index_writer.add_document(doc!(title=>"abcd"));
assert!(index_writer.commit().is_ok());
index
}
enum State {
Start,
NotMatching,
AfterA,
}
struct PrefixedByA;
impl Automaton for PrefixedByA {
type State = State;
fn start(&self) -> Self::State {
State::Start
}
fn is_match(&self, state: &Self::State) -> bool {
match *state {
State::AfterA => true,
_ => false,
}
}
fn accept(&self, state: &Self::State, byte: u8) -> Self::State {
match *state {
State::Start => {
if byte == b'a' {
State::AfterA
} else {
State::NotMatching
}
}
State::AfterA => State::AfterA,
State::NotMatching => State::NotMatching,
}
}
}
#[test]
fn test_automaton_weight() {
let index = create_index();
let field = index.schema().get_field("title").unwrap();
let automaton_weight = AutomatonWeight::new(field, PrefixedByA);
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let mut scorer = automaton_weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.advance());
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.0f32);
assert!(scorer.advance());
assert_eq!(scorer.doc(), 2u32);
assert_eq!(scorer.score(), 1.0f32);
assert!(!scorer.advance());
}
#[test]
fn test_automaton_weight_boost() {
let index = create_index();
let field = index.schema().get_field("title").unwrap();
let automaton_weight = AutomatonWeight::new(field, PrefixedByA);
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let mut scorer = automaton_weight
.scorer(searcher.segment_reader(0u32), 1.32f32)
.unwrap();
assert!(scorer.advance());
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.32f32);
}
}

View File

@@ -25,7 +25,6 @@ fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
cache
}
#[derive(Clone)]
pub struct BM25Weight {
idf_explain: Explanation,
weight: f32,
@@ -34,6 +33,15 @@ pub struct BM25Weight {
}
impl BM25Weight {
pub fn boost_by(&self, boost: f32) -> BM25Weight {
BM25Weight {
idf_explain: self.idf_explain.clone(),
weight: self.weight * boost,
cache: self.cache,
average_fieldnorm: self.average_fieldnorm,
}
}
pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> BM25Weight {
assert!(!terms.is_empty(), "BM25 requires at least one term");
let field = terms[0].field();

View File

@@ -55,10 +55,11 @@ impl BooleanWeight {
fn per_occur_scorers(
&self,
reader: &SegmentReader,
boost: f32,
) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
for &(ref occur, ref subweight) in &self.weights {
let sub_scorer: Box<dyn Scorer> = subweight.scorer(reader)?;
let sub_scorer: Box<dyn Scorer> = subweight.scorer(reader, boost)?;
per_occur_scorers
.entry(*occur)
.or_insert_with(Vec::new)
@@ -70,8 +71,9 @@ impl BooleanWeight {
fn complex_scorer<TScoreCombiner: ScoreCombiner>(
&self,
reader: &SegmentReader,
boost: f32,
) -> crate::Result<Box<dyn Scorer>> {
let mut per_occur_scorers = self.per_occur_scorers(reader)?;
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
let should_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::Should)
@@ -112,7 +114,7 @@ impl BooleanWeight {
}
impl Weight for BooleanWeight {
fn scorer(&self, reader: &SegmentReader) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
if self.weights.is_empty() {
Ok(Box::new(EmptyScorer))
} else if self.weights.len() == 1 {
@@ -120,17 +122,17 @@ impl Weight for BooleanWeight {
if occur == Occur::MustNot {
Ok(Box::new(EmptyScorer))
} else {
weight.scorer(reader)
weight.scorer(reader, boost)
}
} else if self.scoring_enabled {
self.complex_scorer::<SumWithCoordsCombiner>(reader)
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
} else {
self.complex_scorer::<DoNothingCombiner>(reader)
self.complex_scorer::<DoNothingCombiner>(reader, boost)
}
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader)?;
let mut scorer = self.scorer(reader, 1.0f32)?;
if scorer.skip_next(doc) != SkipResult::Reached {
return Err(does_not_match(doc));
}

View File

@@ -18,6 +18,7 @@ mod tests {
use crate::query::Scorer;
use crate::query::TermQuery;
use crate::schema::*;
use crate::tests::assert_nearly_equals;
use crate::Index;
use crate::{DocAddress, DocId};
@@ -70,7 +71,9 @@ mod tests {
let query = query_parser.parse_query("+a").unwrap();
let searcher = index.reader().unwrap().searcher();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<TermScorer>());
}
@@ -82,13 +85,17 @@ mod tests {
{
let query = query_parser.parse_query("+a +b +c").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<Intersection<TermScorer>>());
}
{
let query = query_parser.parse_query("+a +(b c)").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<Intersection<Box<dyn Scorer>>>());
}
}
@@ -101,7 +108,9 @@ mod tests {
{
let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<RequiredOptionalScorer<
Box<dyn Scorer>,
Box<dyn Scorer>,
@@ -111,7 +120,9 @@ mod tests {
{
let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&searcher, false).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<TermScorer>());
}
}
@@ -179,6 +190,50 @@ mod tests {
}
}
#[test]
pub fn test_boolean_query_with_weight() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "a b c"));
index_writer.add_document(doc!(text_field => "a c"));
index_writer.add_document(doc!(text_field => "b c"));
assert!(index_writer.commit().is_ok());
}
let term_a: Box<dyn Query> = Box::new(TermQuery::new(
Term::from_field_text(text_field, "a"),
IndexRecordOption::WithFreqs,
));
let term_b: Box<dyn Query> = Box::new(TermQuery::new(
Term::from_field_text(text_field, "b"),
IndexRecordOption::WithFreqs,
));
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let boolean_query =
BooleanQuery::from(vec![(Occur::Should, term_a), (Occur::Should, term_b)]);
let boolean_weight = boolean_query.weight(&searcher, true).unwrap();
{
let mut boolean_scorer = boolean_weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(boolean_scorer.advance());
assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals(boolean_scorer.score(), 0.84163445f32);
}
{
let mut boolean_scorer = boolean_weight
.scorer(searcher.segment_reader(0u32), 2.0f32)
.unwrap();
assert!(boolean_scorer.advance());
assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals(boolean_scorer.score(), 1.6832689f32);
}
}
#[test]
pub fn test_intersection_score() {
let (index, text_field) = aux_test_helper();
@@ -249,7 +304,9 @@ mod tests {
let query_parser = QueryParser::for_index(&index, vec![title, text]);
let query = query_parser.parse_query("Оксана Лифенко").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
let mut scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
scorer.advance();
let explanation = query.explain(&searcher, DocAddress(0u32, 0u32)).unwrap();

164
src/query/boost_query.rs Normal file
View File

@@ -0,0 +1,164 @@
use crate::common::BitSet;
use crate::fastfield::DeleteBitSet;
use crate::query::explanation::does_not_match;
use crate::query::{Explanation, Query, Scorer, Weight};
use crate::{DocId, DocSet, Searcher, SegmentReader, SkipResult, Term};
use std::collections::BTreeSet;
use std::fmt;
/// `BoostQuery` is a wrapper over a query used to boost its score.
///
/// The document set matched by the `BoostQuery` is strictly the same as the underlying query.
/// The score of each document, is the score of the underlying query multiplied by the `boost`
/// factor.
pub struct BoostQuery {
query: Box<dyn Query>,
boost: f32,
}
impl BoostQuery {
/// Builds a boost query.
pub fn new(query: Box<dyn Query>, boost: f32) -> BoostQuery {
BoostQuery { query, boost }
}
}
impl Clone for BoostQuery {
fn clone(&self) -> Self {
BoostQuery {
query: self.query.box_clone(),
boost: self.boost,
}
}
}
impl fmt::Debug for BoostQuery {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Boost(query={:?}, boost={})", self.query, self.boost)
}
}
impl Query for BoostQuery {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
let weight_without_boost = self.query.weight(searcher, scoring_enabled)?;
let boosted_weight = if scoring_enabled {
Box::new(BoostWeight::new(weight_without_boost, self.boost))
} else {
weight_without_boost
};
Ok(boosted_weight)
}
fn query_terms(&self, term_set: &mut BTreeSet<Term>) {
self.query.query_terms(term_set)
}
}
pub(crate) struct BoostWeight {
weight: Box<dyn Weight>,
boost: f32,
}
impl BoostWeight {
pub fn new(weight: Box<dyn Weight>, boost: f32) -> Self {
BoostWeight { weight, boost }
}
}
impl Weight for BoostWeight {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
self.weight.scorer(reader, boost * self.boost)
}
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0f32)?;
if scorer.skip_next(doc) != SkipResult::Reached {
return Err(does_not_match(doc));
}
let mut explanation =
Explanation::new(format!("Boost x{} of ...", self.boost), scorer.score());
let underlying_explanation = self.weight.explain(reader, doc)?;
explanation.add_detail(underlying_explanation);
Ok(explanation)
}
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
self.weight.count(reader)
}
}
pub(crate) struct BoostScorer<S: Scorer> {
underlying: S,
boost: f32,
}
impl<S: Scorer> BoostScorer<S> {
pub fn new(underlying: S, boost: f32) -> BoostScorer<S> {
BoostScorer { underlying, boost }
}
}
impl<S: Scorer> DocSet for BoostScorer<S> {
fn advance(&mut self) -> bool {
self.underlying.advance()
}
fn skip_next(&mut self, target: DocId) -> SkipResult {
self.underlying.skip_next(target)
}
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
self.underlying.fill_buffer(buffer)
}
fn doc(&self) -> u32 {
self.underlying.doc()
}
fn size_hint(&self) -> u32 {
self.underlying.size_hint()
}
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
self.underlying.append_to_bitset(bitset)
}
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
self.underlying.count(delete_bitset)
}
fn count_including_deleted(&mut self) -> u32 {
self.underlying.count_including_deleted()
}
}
impl<S: Scorer> Scorer for BoostScorer<S> {
fn score(&mut self) -> f32 {
self.underlying.score() * self.boost
}
}
#[cfg(test)]
mod tests {
use super::BoostQuery;
use crate::query::{AllQuery, Query};
use crate::schema::Schema;
use crate::{DocAddress, Document, Index};
#[test]
fn test_boost_query_explain() {
let schema = Schema::builder().build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(Document::new());
assert!(index_writer.commit().is_ok());
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let query = BoostQuery::new(Box::new(AllQuery), 0.2);
let explanation = query.explain(&searcher, DocAddress(0, 0u32)).unwrap();
assert_eq!(
explanation.to_pretty_json(),
"{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\"\n }\n ]\n}"
)
}
}

View File

@@ -33,7 +33,7 @@ impl Query for EmptyQuery {
/// It is useful for tests and handling edge cases.
pub struct EmptyWeight;
impl Weight for EmptyWeight {
fn scorer(&self, _reader: &SegmentReader) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, _reader: &SegmentReader, _boost: f32) -> crate::Result<Box<dyn Scorer>> {
Ok(Box::new(EmptyScorer))
}

View File

@@ -1,4 +1,5 @@
use crate::{DocId, TantivyError};
use serde::Serialize;
pub(crate) fn does_not_match(doc: DocId) -> TantivyError {
TantivyError::InvalidArgument(format!("Document #({}) does not match", doc))

View File

@@ -9,7 +9,7 @@ use std::ops::Range;
/// A range of Levenshtein distances that we will build DFAs for our terms
/// The computation is exponential, so best keep it to low single digits
const VALID_LEVENSHTEIN_DISTANCE_RANGE: Range<u8> = (0..3);
const VALID_LEVENSHTEIN_DISTANCE_RANGE: Range<u8> = 0..3;
static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Lazy::new(|| {
let mut lev_builder_cache = HashMap::new();

View File

@@ -7,6 +7,7 @@ mod automaton_weight;
mod bitset;
mod bm25;
mod boolean_query;
mod boost_query;
mod empty_query;
mod exclude;
mod explanation;
@@ -37,6 +38,7 @@ pub use self::all_query::{AllQuery, AllScorer, AllWeight};
pub use self::automaton_weight::AutomatonWeight;
pub use self::bitset::BitSetDocSet;
pub use self::boolean_query::BooleanQuery;
pub use self::boost_query::BoostQuery;
pub use self::empty_query::{EmptyQuery, EmptyScorer, EmptyWeight};
pub use self::exclude::Exclude;
pub use self::explanation::Explanation;

View File

@@ -7,7 +7,7 @@ pub use self::phrase_scorer::PhraseScorer;
pub use self::phrase_weight::PhraseWeight;
#[cfg(test)]
mod tests {
pub mod tests {
use super::*;
use crate::collector::tests::{TEST_COLLECTOR_WITHOUT_SCORE, TEST_COLLECTOR_WITH_SCORE};
@@ -15,10 +15,10 @@ mod tests {
use crate::error::TantivyError;
use crate::schema::{Schema, Term, TEXT};
use crate::tests::assert_nearly_equals;
use crate::DocAddress;
use crate::DocId;
use crate::{DocAddress, DocSet};
fn create_index(texts: &[&'static str]) -> Index {
pub fn create_index(texts: &[&'static str]) -> Index {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
@@ -102,30 +102,6 @@ mod tests {
assert!(test_query(vec!["g", "a"]).is_empty());
}
#[test]
pub fn test_phrase_count() {
let index = create_index(&["a c", "a a b d a b c", " a b"]);
let schema = index.schema();
let text_field = schema.get_field("text").unwrap();
let searcher = index.reader().unwrap().searcher();
let phrase_query = PhraseQuery::new(vec![
Term::from_field_text(text_field, "a"),
Term::from_field_text(text_field, "b"),
]);
let phrase_weight = phrase_query.phrase_weight(&searcher, true).unwrap();
let mut phrase_scorer = phrase_weight
.phrase_scorer(searcher.segment_reader(0u32))
.unwrap()
.unwrap();
assert!(phrase_scorer.advance());
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.phrase_count(), 2);
assert!(phrase_scorer.advance());
assert_eq!(phrase_scorer.doc(), 2);
assert_eq!(phrase_scorer.phrase_count(), 1);
assert!(!phrase_scorer.advance());
}
#[test]
pub fn test_phrase_query_no_positions() {
let mut schema_builder = Schema::builder();

View File

@@ -37,11 +37,12 @@ impl PhraseWeight {
reader.get_fieldnorms_reader(field)
}
pub fn phrase_scorer(
fn phrase_scorer(
&self,
reader: &SegmentReader,
boost: f32,
) -> Result<Option<PhraseScorer<SegmentPostings>>> {
let similarity_weight = self.similarity_weight.clone();
let similarity_weight = self.similarity_weight.boost_by(boost);
let fieldnorm_reader = self.fieldnorm_reader(reader);
if reader.has_deletes() {
let mut term_postings_list = Vec::new();
@@ -84,8 +85,8 @@ impl PhraseWeight {
}
impl Weight for PhraseWeight {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader)? {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
Ok(Box::new(scorer))
} else {
Ok(Box::new(EmptyScorer))
@@ -93,7 +94,7 @@ impl Weight for PhraseWeight {
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let scorer_opt = self.phrase_scorer(reader)?;
let scorer_opt = self.phrase_scorer(reader, 1.0f32)?;
if scorer_opt.is_none() {
return Err(does_not_match(doc));
}
@@ -109,3 +110,34 @@ impl Weight for PhraseWeight {
Ok(explanation)
}
}
#[cfg(test)]
mod tests {
use super::super::tests::create_index;
use crate::query::PhraseQuery;
use crate::{DocSet, Term};
#[test]
pub fn test_phrase_count() {
let index = create_index(&["a c", "a a b d a b c", " a b"]);
let schema = index.schema();
let text_field = schema.get_field("text").unwrap();
let searcher = index.reader().unwrap().searcher();
let phrase_query = PhraseQuery::new(vec![
Term::from_field_text(text_field, "a"),
Term::from_field_text(text_field, "b"),
]);
let phrase_weight = phrase_query.phrase_weight(&searcher, true).unwrap();
let mut phrase_scorer = phrase_weight
.phrase_scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap()
.unwrap();
assert!(phrase_scorer.advance());
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.phrase_count(), 2);
assert!(phrase_scorer.advance());
assert_eq!(phrase_scorer.doc(), 2);
assert_eq!(phrase_scorer.phrase_count(), 1);
assert!(!phrase_scorer.advance());
}
}

View File

@@ -21,6 +21,17 @@ pub enum LogicalLiteral {
pub enum LogicalAST {
Clause(Vec<(Occur, LogicalAST)>),
Leaf(Box<LogicalLiteral>),
Boost(Box<LogicalAST>, f32),
}
impl LogicalAST {
pub fn boost(self, boost: f32) -> LogicalAST {
if (boost - 1.0f32).abs() < std::f32::EPSILON {
self
} else {
LogicalAST::Boost(Box::new(self), boost)
}
}
}
fn occur_letter(occur: Occur) -> &'static str {
@@ -47,6 +58,7 @@ impl fmt::Debug for LogicalAST {
}
Ok(())
}
LogicalAST::Boost(ref ast, boost) => write!(formatter, "{:?}^{}", ast, boost),
LogicalAST::Leaf(ref literal) => write!(formatter, "{:?}", literal),
}
}

View File

@@ -1,6 +1,5 @@
use super::logical_ast::*;
use crate::core::Index;
use crate::query::AllQuery;
use crate::query::BooleanQuery;
use crate::query::EmptyQuery;
use crate::query::Occur;
@@ -8,11 +7,13 @@ use crate::query::PhraseQuery;
use crate::query::Query;
use crate::query::RangeQuery;
use crate::query::TermQuery;
use crate::query::{AllQuery, BoostQuery};
use crate::schema::{Facet, IndexRecordOption};
use crate::schema::{Field, Schema};
use crate::schema::{FieldType, Term};
use crate::tokenizer::TokenizerManager;
use std::borrow::Cow;
use std::collections::HashMap;
use std::num::{ParseFloatError, ParseIntError};
use std::ops::Bound;
use std::str::FromStr;
@@ -144,7 +145,6 @@ fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
///
/// * must terms: By prepending a term by a `+`, a term can be made required for the search.
///
///
/// * phrase terms: Quoted terms become phrase searches on fields that have positions indexed.
/// e.g., `title:"Barack Obama"` will only find documents that have "barack" immediately followed
/// by "obama".
@@ -158,12 +158,20 @@ fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
///
/// * all docs query: A plain `*` will match all documents in the index.
///
/// Parts of the queries can be boosted by appending `^boostfactor`.
/// For instance, `"SRE"^2.0 OR devops^0.4` will boost documents containing `SRE` instead of
/// devops. Negative boosts are not allowed.
///
/// It is also possible to define a boost for a some specific field, at the query parser level.
/// (See [`set_boost(...)`](#method.set_field_boost) ). Typically you may want to boost a title
/// field.
#[derive(Clone)]
pub struct QueryParser {
schema: Schema,
default_fields: Vec<Field>,
conjunction_by_default: bool,
tokenizer_manager: TokenizerManager,
boost: HashMap<Field, f32>,
}
impl QueryParser {
@@ -181,6 +189,7 @@ impl QueryParser {
default_fields,
tokenizer_manager,
conjunction_by_default: false,
boost: Default::default(),
}
}
@@ -201,6 +210,17 @@ impl QueryParser {
self.conjunction_by_default = true;
}
/// Sets a boost for a specific field.
///
/// The parse query will automatically boost this field.
///
/// If the query defines a query boost through the query language (e.g: `country:France^3.0`),
/// the two boosts (the one defined in the query, and the one defined in the `QueryParser`)
/// are multiplied together.
pub fn set_field_boost(&mut self, field: Field, boost: f32) {
self.boost.insert(field, boost);
}
/// Parse a query
///
/// Note that `parse_query` returns an error if the input
@@ -407,6 +427,10 @@ impl QueryParser {
self.compute_logical_ast_with_occur(*subquery)?;
Ok((Occur::compose(left_occur, right_occur), logical_sub_queries))
}
UserInputAST::Boost(ast, boost) => {
let (occur, ast_without_occur) = self.compute_logical_ast_with_occur(*ast)?;
Ok((occur, ast_without_occur.boost(boost)))
}
UserInputAST::Leaf(leaf) => {
let result_ast = self.compute_logical_ast_from_leaf(*leaf)?;
Ok((Occur::Should, result_ast))
@@ -414,6 +438,10 @@ impl QueryParser {
}
}
fn field_boost(&self, field: Field) -> f32 {
self.boost.get(&field).cloned().unwrap_or(1.0f32)
}
fn compute_logical_ast_from_leaf(
&self,
leaf: UserInputLeaf,
@@ -439,7 +467,9 @@ impl QueryParser {
let mut asts: Vec<LogicalAST> = Vec::new();
for (field, phrase) in term_phrases {
if let Some(ast) = self.compute_logical_ast_for_leaf(field, &phrase)? {
asts.push(LogicalAST::Leaf(Box::new(ast)));
// Apply some field specific boost defined at the query parser level.
let boost = self.field_boost(field);
asts.push(LogicalAST::Leaf(Box::new(ast)).boost(boost));
}
}
let result_ast: LogicalAST = if asts.len() == 1 {
@@ -459,14 +489,16 @@ impl QueryParser {
let mut clauses = fields
.iter()
.map(|&field| {
let boost = self.field_boost(field);
let field_entry = self.schema.get_field_entry(field);
let value_type = field_entry.field_type().value_type();
Ok(LogicalAST::Leaf(Box::new(LogicalLiteral::Range {
let logical_ast = LogicalAST::Leaf(Box::new(LogicalLiteral::Range {
field,
value_type,
lower: self.resolve_bound(field, &lower)?,
upper: self.resolve_bound(field, &upper)?,
})))
}));
Ok(logical_ast.boost(boost))
})
.collect::<Result<Vec<_>, QueryParserError>>()?;
let result_ast = if clauses.len() == 1 {
@@ -519,6 +551,11 @@ fn convert_to_query(logical_ast: LogicalAST) -> Box<dyn Query> {
Some(LogicalAST::Leaf(trimmed_logical_literal)) => {
convert_literal_to_query(*trimmed_logical_literal)
}
Some(LogicalAST::Boost(ast, boost)) => {
let query = convert_to_query(*ast);
let boosted_query = BoostQuery::new(query, boost);
Box::new(boosted_query)
}
None => Box::new(EmptyQuery),
}
}
@@ -538,7 +575,7 @@ mod test {
use crate::Index;
use matches::assert_matches;
fn make_query_parser() -> QueryParser {
fn make_schema() -> Schema {
let mut schema_builder = Schema::builder();
let text_field_indexing = TextFieldIndexing::default()
.set_tokenizer("en_with_stop_words")
@@ -546,8 +583,8 @@ mod test {
let text_options = TextOptions::default()
.set_indexing_options(text_field_indexing)
.set_stored();
let title = schema_builder.add_text_field("title", TEXT);
let text = schema_builder.add_text_field("text", TEXT);
schema_builder.add_text_field("title", TEXT);
schema_builder.add_text_field("text", TEXT);
schema_builder.add_i64_field("signed", INDEXED);
schema_builder.add_u64_field("unsigned", INDEXED);
schema_builder.add_text_field("notindexed_text", STORED);
@@ -558,8 +595,15 @@ mod test {
schema_builder.add_date_field("date", INDEXED);
schema_builder.add_f64_field("float", INDEXED);
schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let default_fields = vec![title, text];
schema_builder.build()
}
fn make_query_parser() -> QueryParser {
let schema = make_schema();
let default_fields: Vec<Field> = vec!["title", "text"]
.into_iter()
.flat_map(|field_name| schema.get_field(field_name))
.collect();
let tokenizer_manager = TokenizerManager::default();
tokenizer_manager.register(
"en_with_stop_words",
@@ -601,6 +645,45 @@ mod test {
);
}
#[test]
pub fn test_parse_query_with_boost() {
let mut query_parser = make_query_parser();
let schema = make_schema();
let text_field = schema.get_field("text").unwrap();
query_parser.set_field_boost(text_field, 2.0f32);
let query = query_parser.parse_query("text:hello").unwrap();
assert_eq!(
format!("{:?}", query),
"Boost(query=TermQuery(Term(field=1,bytes=[104, 101, 108, 108, 111])), boost=2)"
);
}
#[test]
pub fn test_parse_query_range_with_boost() {
let mut query_parser = make_query_parser();
let schema = make_schema();
let title_field = schema.get_field("title").unwrap();
query_parser.set_field_boost(title_field, 2.0f32);
let query = query_parser.parse_query("title:[A TO B]").unwrap();
assert_eq!(
format!("{:?}", query),
"Boost(query=RangeQuery { field: Field(0), value_type: Str, left_bound: Included([97]), right_bound: Included([98]) }, boost=2)"
);
}
#[test]
pub fn test_parse_query_with_default_boost_and_custom_boost() {
let mut query_parser = make_query_parser();
let schema = make_schema();
let text_field = schema.get_field("text").unwrap();
query_parser.set_field_boost(text_field, 2.0f32);
let query = query_parser.parse_query("text:hello^2").unwrap();
assert_eq!(
format!("{:?}", query),
"Boost(query=Boost(query=TermQuery(Term(field=1,bytes=[104, 101, 108, 108, 111])), boost=2), boost=2)"
);
}
#[test]
pub fn test_parse_nonindexed_field_yields_error() {
let query_parser = make_query_parser();

View File

@@ -289,7 +289,7 @@ impl RangeWeight {
}
impl Weight for RangeWeight {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc);
@@ -307,11 +307,11 @@ impl Weight for RangeWeight {
}
}
let doc_bitset = BitSetDocSet::from(doc_bitset);
Ok(Box::new(ConstScorer::new(doc_bitset)))
Ok(Box::new(ConstScorer::new(doc_bitset, boost)))
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer(reader)?;
let mut scorer = self.scorer(reader, 1.0f32)?;
if scorer.skip_next(doc) != SkipResult::Reached {
return Err(does_not_match(doc));
}

View File

@@ -115,8 +115,8 @@ mod tests {
let req = vec![1, 3, 7];
let mut reqoptscorer: RequiredOptionalScorer<_, _, SumCombiner> =
RequiredOptionalScorer::new(
ConstScorer::new(VecDocSet::from(req.clone())),
ConstScorer::new(VecDocSet::from(vec![])),
ConstScorer::from(VecDocSet::from(req.clone())),
ConstScorer::from(VecDocSet::from(vec![])),
);
let mut docs = vec![];
while reqoptscorer.advance() {
@@ -129,8 +129,8 @@ mod tests {
fn test_reqopt_scorer() {
let mut reqoptscorer: RequiredOptionalScorer<_, _, SumCombiner> =
RequiredOptionalScorer::new(
ConstScorer::new(VecDocSet::from(vec![1, 3, 7, 8, 9, 10, 13, 15])),
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15])),
ConstScorer::new(VecDocSet::from(vec![1, 3, 7, 8, 9, 10, 13, 15]), 1.0f32),
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15]), 1.0f32),
);
{
assert!(reqoptscorer.advance());
@@ -183,8 +183,8 @@ mod tests {
test_skip_against_unoptimized(
|| {
Box::new(RequiredOptionalScorer::<_, _, DoNothingCombiner>::new(
ConstScorer::new(VecDocSet::from(req_docs.clone())),
ConstScorer::new(VecDocSet::from(opt_docs.clone())),
ConstScorer::from(VecDocSet::from(req_docs.clone())),
ConstScorer::from(VecDocSet::from(opt_docs.clone())),
))
},
skip_docs,

View File

@@ -49,16 +49,14 @@ pub struct ConstScorer<TDocSet: DocSet> {
impl<TDocSet: DocSet> ConstScorer<TDocSet> {
/// Creates a new `ConstScorer`.
pub fn new(docset: TDocSet) -> ConstScorer<TDocSet> {
ConstScorer {
docset,
score: 1f32,
}
pub fn new(docset: TDocSet, score: f32) -> ConstScorer<TDocSet> {
ConstScorer { docset, score }
}
}
/// Sets the constant score to a different value.
pub fn set_score(&mut self, score: Score) {
self.score = score;
impl<TDocSet: DocSet> From<TDocSet> for ConstScorer<TDocSet> {
fn from(docset: TDocSet) -> Self {
ConstScorer::new(docset, 1.0f32)
}
}
@@ -90,6 +88,6 @@ impl<TDocSet: DocSet> DocSet for ConstScorer<TDocSet> {
impl<TDocSet: DocSet + 'static> Scorer for ConstScorer<TDocSet> {
fn score(&mut self) -> Score {
1f32
self.score
}
}

View File

@@ -39,7 +39,7 @@ mod tests {
);
let term_weight = term_query.weight(&searcher, true).unwrap();
let segment_reader = searcher.segment_reader(0);
let mut term_scorer = term_weight.scorer(segment_reader).unwrap();
let mut term_scorer = term_weight.scorer(segment_reader, 1.0f32).unwrap();
assert!(term_scorer.advance());
assert_eq!(term_scorer.doc(), 0);
assert_eq!(term_scorer.score(), 0.28768212);

View File

@@ -18,13 +18,13 @@ pub struct TermWeight {
}
impl Weight for TermWeight {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
let term_scorer = self.scorer_specialized(reader)?;
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
let term_scorer = self.scorer_specialized(reader, boost)?;
Ok(Box::new(term_scorer))
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer_specialized(reader)?;
let mut scorer = self.scorer_specialized(reader, 1.0f32)?;
if scorer.skip_next(doc) != SkipResult::Reached {
return Err(does_not_match(doc));
}
@@ -33,7 +33,7 @@ impl Weight for TermWeight {
fn count(&self, reader: &SegmentReader) -> Result<u32> {
if let Some(delete_bitset) = reader.delete_bitset() {
Ok(self.scorer(reader)?.count(delete_bitset))
Ok(self.scorer(reader, 1.0f32)?.count(delete_bitset))
} else {
let field = self.term.field();
Ok(reader
@@ -58,11 +58,11 @@ impl TermWeight {
}
}
fn scorer_specialized(&self, reader: &SegmentReader) -> Result<TermScorer> {
fn scorer_specialized(&self, reader: &SegmentReader, boost: f32) -> Result<TermScorer> {
let field = self.term.field();
let inverted_index = reader.inverted_index(field);
let fieldnorm_reader = reader.get_fieldnorms_reader(field);
let similarity_weight = self.similarity_weight.clone();
let similarity_weight = self.similarity_weight.boost_by(boost);
let postings_opt: Option<SegmentPostings> =
inverted_index.read_postings(&self.term, self.index_record_option);
if let Some(segment_postings) = postings_opt {

View File

@@ -145,26 +145,6 @@ where
}
}
fn count_including_deleted(&mut self) -> u32 {
let mut count = self.bitsets[self.cursor..HORIZON_NUM_TINYBITSETS]
.iter()
.map(|bitset| bitset.len())
.sum::<u32>();
for bitset in self.bitsets.iter_mut() {
bitset.clear();
}
while self.refill() {
count += self.bitsets.iter().map(|bitset| bitset.len()).sum::<u32>();
for bitset in self.bitsets.iter_mut() {
bitset.clear();
}
}
self.cursor = HORIZON_NUM_TINYBITSETS;
count
}
// TODO implement `count` efficiently.
fn skip_next(&mut self, target: DocId) -> SkipResult {
if !self.advance() {
return SkipResult::End;
@@ -243,6 +223,8 @@ where
}
}
// TODO implement `count` efficiently.
fn doc(&self) -> DocId {
self.doc
}
@@ -250,6 +232,24 @@ where
fn size_hint(&self) -> u32 {
0u32
}
fn count_including_deleted(&mut self) -> u32 {
let mut count = self.bitsets[self.cursor..HORIZON_NUM_TINYBITSETS]
.iter()
.map(|bitset| bitset.len())
.sum::<u32>();
for bitset in self.bitsets.iter_mut() {
bitset.clear();
}
while self.refill() {
count += self.bitsets.iter().map(|bitset| bitset.len()).sum::<u32>();
for bitset in self.bitsets.iter_mut() {
bitset.clear();
}
}
self.cursor = HORIZON_NUM_TINYBITSETS;
count
}
}
impl<TScorer, TScoreCombiner> Scorer for Union<TScorer, TScoreCombiner>
@@ -290,7 +290,7 @@ mod tests {
vals.iter()
.cloned()
.map(VecDocSet::from)
.map(ConstScorer::new)
.map(|docset| ConstScorer::new(docset, 1.0f32))
.collect::<Vec<ConstScorer<VecDocSet>>>(),
)
};
@@ -339,7 +339,7 @@ mod tests {
.iter()
.map(|docs| docs.clone())
.map(VecDocSet::from)
.map(ConstScorer::new)
.map(|docset| ConstScorer::new(docset, 1.0f32))
.collect::<Vec<_>>(),
));
res
@@ -369,8 +369,8 @@ mod tests {
#[test]
fn test_union_skip_corner_case3() {
let mut docset = Union::<_, DoNothingCombiner>::from(vec![
ConstScorer::new(VecDocSet::from(vec![0u32, 5u32])),
ConstScorer::new(VecDocSet::from(vec![1u32, 4u32])),
ConstScorer::from(VecDocSet::from(vec![0u32, 5u32])),
ConstScorer::from(VecDocSet::from(vec![1u32, 4u32])),
]);
assert!(docset.advance());
assert_eq!(docset.doc(), 0u32);

View File

@@ -1,7 +1,7 @@
use super::Scorer;
use crate::core::SegmentReader;
use crate::query::Explanation;
use crate::{DocId, Result};
use crate::DocId;
/// A Weight is the specialization of a Query
/// for a given set of segments.
@@ -9,15 +9,18 @@ use crate::{DocId, Result};
/// See [`Query`](./trait.Query.html).
pub trait Weight: Send + Sync + 'static {
/// Returns the scorer for the given segment.
///
/// `boost` is a multiplier to apply to the score.
///
/// See [`Query`](./trait.Query.html).
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>>;
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>>;
/// Returns an `Explanation` for the given document.
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation>;
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
/// Returns the number documents within the given `SegmentReader`.
fn count(&self, reader: &SegmentReader) -> Result<u32> {
let mut scorer = self.scorer(reader)?;
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
let mut scorer = self.scorer(reader, 1.0f32)?;
if let Some(delete_bitset) = reader.delete_bitset() {
Ok(scorer.count(delete_bitset))
} else {

View File

@@ -68,7 +68,9 @@ impl<T> Pool<T> {
/// After publish, all new `Searcher` acquired will be
/// of the new generation.
pub fn publish_new_generation(&self, items: Vec<T>) {
assert!(!items.is_empty());
let next_generation = self.next_generation.fetch_add(1, Ordering::SeqCst) + 1;
let num_items = items.len();
for item in items {
let gen_item = GenerationItem {
item,
@@ -77,6 +79,23 @@ impl<T> Pool<T> {
self.queue.push(gen_item);
}
self.advertise_generation(next_generation);
// Purge possible previous searchers.
//
// Assuming at this point no searcher is held more than duration T by the user,
// this guarantees that an obsolete searcher will not be uselessly held (and its associated
// mmap) for more than duration T.
//
// Proof: At this point, obsolete searcher that are held by the user will be held for less
// than T. When released, they will be dropped as their generation is detected obsolete.
//
// We still need to ensure that the searcher that are obsolete and in the pool get removed.
// The queue currently contains up to 2n searchers, in any random order.
//
// Half of them are obsoletes. By requesting `(n+1)` fresh searchers, we ensure that all
// searcher will be inspected.
for _ in 0..=num_items {
let _ = self.acquire();
}
}
/// At the exit of this method,

View File

@@ -4,6 +4,7 @@ use crate::common::VInt;
use crate::tokenizer::PreTokenizedString;
use crate::DateTime;
use itertools::Itertools;
use serde;
use std::io::{self, Read, Write};
/// Tantivy's Document is the object that can
@@ -16,7 +17,7 @@ use std::io::{self, Read, Write};
/// Documents are really just a list of couple `(field, value)`.
/// In this list, one field may appear more than once.
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)]
pub struct Document {
field_values: Vec<FieldValue>,
}

View File

@@ -125,7 +125,7 @@ impl Facet {
/// This function is the inverse of Facet::from(&str).
pub fn to_path_string(&self) -> String {
format!("{}", self.to_string())
format!("{}", self)
}
}

View File

@@ -1,11 +1,14 @@
use crate::common::BinarySerializable;
use serde;
use std::io;
use std::io::Read;
use std::io::Write;
/// `Field` is represented by an unsigned 32-bit integer type
/// The schema holds the mapping between field names and `Field` objects.
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
#[derive(
Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, serde::Serialize, serde::Deserialize,
)]
pub struct Field(u32);
impl Field {

View File

@@ -1,12 +1,13 @@
use crate::common::BinarySerializable;
use crate::schema::Field;
use crate::schema::Value;
use serde;
use std::io;
use std::io::Read;
use std::io::Write;
/// `FieldValue` holds together a `Field` and its `Value`.
#[derive(Debug, Clone, Ord, PartialEq, Eq, PartialOrd, Serialize, Deserialize)]
#[derive(Debug, Clone, Ord, PartialEq, Eq, PartialOrd, serde::Serialize, serde::Deserialize)]
pub struct FieldValue {
field: Field,
value: Value,

View File

@@ -1,3 +1,5 @@
use serde::{Deserialize, Serialize};
/// `IndexRecordOption` describes an amount information associated
/// to a given indexed field.
///

View File

@@ -1,4 +1,5 @@
use crate::schema::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag};
use serde::{Deserialize, Serialize};
use std::ops::BitOr;
/// Express whether a field is single-value or multi-valued.

View File

@@ -1,4 +1,5 @@
use crate::schema::Value;
use serde::Serialize;
use std::collections::BTreeMap;
/// Internal representation of a document used for JSON

View File

@@ -1,6 +1,7 @@
use crate::schema::flags::SchemaFlagList;
use crate::schema::flags::StoredFlag;
use crate::schema::IndexRecordOption;
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use std::ops::BitOr;

View File

@@ -11,6 +11,7 @@ under-count actual resultant space usage by up to 4095 bytes per file.
use crate::schema::Field;
use crate::SegmentComponent;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Indicates space usage in bytes

View File

@@ -9,7 +9,7 @@ pub const COMPRESSION: &str = "snappy";
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
compressed.clear();
let mut encoder = snap::Writer::new(compressed);
let mut encoder = snap::write::FrameEncoder::new(compressed);
encoder.write_all(&uncompressed)?;
encoder.flush()?;
Ok(())
@@ -17,6 +17,6 @@ pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()>
pub fn decompress(compressed: &[u8], decompressed: &mut Vec<u8>) -> io::Result<()> {
decompressed.clear();
snap::Reader::new(compressed).read_to_end(decompressed)?;
snap::read::FrameDecoder::new(compressed).read_to_end(decompressed)?;
Ok(())
}

View File

@@ -3,8 +3,6 @@ use super::skiplist::SkipListBuilder;
use super::StoreReader;
use crate::common::CountingWriter;
use crate::common::{BinarySerializable, VInt};
use crate::directory::TerminatingWrite;
use crate::directory::WritePtr;
use crate::schema::Document;
use crate::DocId;
use std::io::{self, Write};
@@ -19,20 +17,20 @@ const BLOCK_SIZE: usize = 16_384;
///
/// The skip list index on the other hand, is built in memory.
///
pub struct StoreWriter {
pub struct StoreWriter<W: io::Write> {
doc: DocId,
offset_index_writer: SkipListBuilder<u64>,
writer: CountingWriter<WritePtr>,
writer: CountingWriter<W>,
intermediary_buffer: Vec<u8>,
current_block: Vec<u8>,
}
impl StoreWriter {
impl<W: io::Write> StoreWriter<W> {
/// Create a store writer.
///
/// The store writer will writes blocks on disc as
/// document are added.
pub fn new(writer: WritePtr) -> StoreWriter {
pub fn new(writer: W) -> StoreWriter<W> {
StoreWriter {
doc: 0,
offset_index_writer: SkipListBuilder::new(4),
@@ -102,7 +100,7 @@ impl StoreWriter {
///
/// Compress the last unfinished block if any,
/// and serializes the skip list index on disc.
pub fn close(mut self) -> io::Result<()> {
pub fn close(mut self) -> io::Result<W> {
if !self.current_block.is_empty() {
self.write_and_compress_block()?;
}
@@ -110,6 +108,9 @@ impl StoreWriter {
self.offset_index_writer.write(&mut self.writer)?;
header_offset.serialize(&mut self.writer)?;
self.doc.serialize(&mut self.writer)?;
self.writer.terminate()
self.writer.flush()?;
let (wrt, _) = self.writer.finish()?;
Ok(wrt)
}
}

View File

@@ -1,6 +1,7 @@
use super::{Token, TokenFilter, TokenStream};
use crate::tokenizer::BoxTokenStream;
use rust_stemmers::{self, Algorithm};
use serde::{Deserialize, Serialize};
/// Available stemmer languages.
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Copy, Clone)]

View File

@@ -1,4 +1,5 @@
use crate::tokenizer::{BoxTokenStream, Token, TokenStream, TokenStreamChain};
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
/// Struct representing pre-tokenized text

View File

@@ -1,4 +1,5 @@
use crate::tokenizer::TokenStreamChain;
use serde::{Deserialize, Serialize};
/// The tokenizer module contains all of the tools used to process
/// text in `tantivy`.
use std::borrow::{Borrow, BorrowMut};