Compare commits

..

1 Commits

Author SHA1 Message Date
Paul Masurel
79894657df Address #656
Broke the reference loop to make sure that the watch_router can
be dropped, and the thread exits.
2019-09-30 12:54:02 +09:00
102 changed files with 947 additions and 1539 deletions

View File

@@ -47,7 +47,6 @@ matrix:
before_install:
- set -e
- rustup self update
- rustup component add rustfmt
install:
- sh ci/install.sh
@@ -61,7 +60,6 @@ before_script:
script:
- bash ci/script.sh
- cargo fmt --all -- --check
before_deploy:
- sh ci/before_deploy.sh

View File

@@ -2,19 +2,6 @@ Tantivy 0.11.0
=====================
- Added f64 field. Internally reuse u64 code the same way i64 does (@fdb-hiroshima)
- Various bugfixes in the query parser.
- Better handling of hyphens in query parser. (#609)
- Better handling of whitespaces.
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
- API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
## How to update?
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
an error and handling the `Result` is required.
Tantivy 0.10.2

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy"
version = "0.11.0"
version = "0.10.2"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
@@ -15,9 +15,8 @@ edition = "2018"
[dependencies]
base64 = "0.10.0"
byteorder = "1.0"
crc32fast = "1.2.0"
once_cell = "1.0"
regex ={version = "1.3.0", default-features = false, features = ["std"]}
once_cell = "0.2"
regex = "1.0"
tantivy-fst = "0.1"
memmap = {version = "0.7", optional=true}
lz4 = {version="1.20", optional=true}
@@ -25,6 +24,8 @@ snap = {version="0.2"}
atomicwrites = {version="0.2.2", optional=true}
tempfile = "3.0"
log = "0.4"
combine = ">=3.6.0,<4.0.0"
tempdir = "0.3"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
@@ -35,14 +36,13 @@ levenshtein_automata = {version="0.1", features=["fst_automaton"]}
notify = {version="4", optional=true}
bit-set = "0.5"
uuid = { version = "0.7.2", features = ["v4", "serde"] }
crossbeam = "0.7"
crossbeam = "0.5"
futures = "0.1"
futures-cpupool = "0.1"
owning_ref = "0.4"
stable_deref_trait = "1.0.0"
rust-stemmers = "1.1"
downcast-rs = { version="1.0" }
tantivy-query-grammar = { path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.2"
fnv = "1.0.6"
@@ -81,16 +81,13 @@ failpoints = ["fail/failpoints"]
unstable = [] # useful for benches.
wasm-bindgen = ["uuid/wasm-bindgen"]
[workspace]
members = ["query-grammar"]
[badges]
travis-ci = { repository = "tantivy-search/tantivy" }
[dev-dependencies.fail]
version = "0.3"
features = ["failpoints"]
# Following the "fail" crate best practises, we isolate
# tests that define specific behavior in fail check points
# in a different binary.

View File

@@ -1,3 +0,0 @@
test:
echo "Run test only... No examples."
cargo test --tests --lib

View File

@@ -7,7 +7,7 @@ set -ex
main() {
if [ ! -z $CODECOV ]; then
echo "Codecov"
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov
cargo build --verbose && cargo coverage --verbose && bash <(curl -s https://codecov.io/bash) -s target/kcov
else
echo "Build"
cross build --target $TARGET
@@ -15,8 +15,7 @@ main() {
return
fi
echo "Test"
cross test --target $TARGET --no-default-features --features mmap
cross test --target $TARGET --no-default-features --features mmap query-grammar
cross test --target $TARGET --no-default-features --features mmap -- --test-threads 1
fi
for example in $(ls examples/*.rs)
do

View File

@@ -5,23 +5,26 @@
//
// We will :
// - define our schema
// - create an index in a directory
// - index a few documents into our index
// - search for the best document matching a basic query
// - retrieve the best document's original content.
// = create an index in a directory
// - index few documents in our index
// - search for the best document matchings "sea whale"
// - retrieve the best document original content.
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::{doc, Index, ReloadPolicy};
use tempfile::TempDir;
use tantivy::Index;
use tantivy::ReloadPolicy;
use tempdir::TempDir;
fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the
// sake of this example
let index_path = TempDir::new()?;
let index_path = TempDir::new("tantivy_example_dir")?;
// # Defining the schema
//
@@ -30,7 +33,7 @@ fn main() -> tantivy::Result<()> {
// and for each field, its type and "the way it should
// be indexed".
// First we need to define a schema ...
// first we need to define a schema ...
let mut schema_builder = Schema::builder();
// Our first field is title.
@@ -45,7 +48,7 @@ fn main() -> tantivy::Result<()> {
//
// `STORED` means that the field will also be saved
// in a compressed, row-oriented key-value store.
// This store is useful for reconstructing the
// This store is useful to reconstruct the
// documents that were selected during the search phase.
schema_builder.add_text_field("title", TEXT | STORED);
@@ -54,7 +57,8 @@ fn main() -> tantivy::Result<()> {
// need to be able to be able to retrieve it
// for our application.
//
// We can make our index lighter by omitting the `STORED` flag.
// We can make our index lighter and
// by omitting `STORED` flag.
schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build();
@@ -67,7 +71,7 @@ fn main() -> tantivy::Result<()> {
// with our schema in the directory.
let index = Index::create_in_dir(&index_path, schema.clone())?;
// To insert a document we will need an index writer.
// To insert document we need an index writer.
// There must be only one writer at a time.
// This single `IndexWriter` is already
// multithreaded.
@@ -145,8 +149,8 @@ fn main() -> tantivy::Result<()> {
// At this point our documents are not searchable.
//
//
// We need to call `.commit()` explicitly to force the
// `index_writer` to finish processing the documents in the queue,
// We need to call .commit() explicitly to force the
// index_writer to finish processing the documents in the queue,
// flush the current index to the disk, and advertise
// the existence of new documents.
//
@@ -158,14 +162,14 @@ fn main() -> tantivy::Result<()> {
// persistently indexed.
//
// In the scenario of a crash or a power failure,
// tantivy behaves as if it has rolled back to its last
// tantivy behaves as if has rolled back to its last
// commit.
// # Searching
//
// ### Searcher
//
// A reader is required first in order to search an index.
// A reader is required to get search the index.
// It acts as a `Searcher` pool that reloads itself,
// depending on a `ReloadPolicy`.
//
@@ -181,7 +185,7 @@ fn main() -> tantivy::Result<()> {
// We now need to acquire a searcher.
//
// A searcher points to a snapshotted, immutable version of the index.
// A searcher points to snapshotted, immutable version of the index.
//
// Some search experience might require more than
// one query. Using the same searcher ensures that all of these queries will run on the
@@ -201,7 +205,7 @@ fn main() -> tantivy::Result<()> {
// in both title and body.
let query_parser = QueryParser::for_index(&index, vec![title, body]);
// `QueryParser` may fail if the query is not in the right
// QueryParser may fail if the query is not in the right
// format. For user facing applications, this can be a problem.
// A ticket has been opened regarding this problem.
let query = query_parser.parse_query("sea whale")?;
@@ -217,7 +221,7 @@ fn main() -> tantivy::Result<()> {
//
// We are not interested in all of the documents but
// only in the top 10. Keeping track of our top 10 best documents
// is the role of the `TopDocs` collector.
// is the role of the TopDocs.
// We can now perform our query.
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;

View File

@@ -9,12 +9,15 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::{Collector, SegmentCollector};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser;
use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, SegmentReader, TantivyError};
use tantivy::SegmentReader;
use tantivy::{Index, TantivyError};
#[derive(Default)]
struct Stats {

View File

@@ -2,11 +2,14 @@
//
// In this example, we'll see how to define a tokenizer pipeline
// by aligning a bunch of `TokenFilter`.
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::tokenizer::NgramTokenizer;
use tantivy::{doc, Index};
use tantivy::Index;
fn main() -> tantivy::Result<()> {
// # Defining the schema

View File

@@ -8,10 +8,13 @@
//
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::TermQuery;
use tantivy::schema::*;
use tantivy::{doc, Index, IndexReader};
use tantivy::Index;
use tantivy::IndexReader;
// A simple helper function to fetch a single document
// given its id from our index.

View File

@@ -12,16 +12,17 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::FacetCollector;
use tantivy::query::AllQuery;
use tantivy::schema::*;
use tantivy::{doc, Index};
use tempfile::TempDir;
use tantivy::Index;
fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the
// sake of this example
let index_path = TempDir::new()?;
let index_path = TempDir::new("tantivy_facet_example_dir")?;
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("name", TEXT | STORED);
@@ -73,3 +74,5 @@ fn main() -> tantivy::Result<()> {
Ok(())
}
use tempdir::TempDir;

View File

@@ -2,10 +2,14 @@
//
// Below is an example of creating an indexed integer field in your schema
// You can use RangeQuery to get a Count of all occurrences in a given range.
#[macro_use]
extern crate tantivy;
use tantivy::collector::Count;
use tantivy::query::RangeQuery;
use tantivy::schema::{Schema, INDEXED};
use tantivy::{doc, Index, Result};
use tantivy::Index;
use tantivy::Result;
fn run() -> Result<()> {
// For the sake of simplicity, this schema will only have 1 field

View File

@@ -9,8 +9,11 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::schema::*;
use tantivy::{doc, DocId, DocSet, Index, Postings};
use tantivy::Index;
use tantivy::{DocId, DocSet, Postings};
fn main() -> tantivy::Result<()> {
// We first create a schema for the sake of the

View File

@@ -25,11 +25,14 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use std::sync::{Arc, RwLock};
use std::thread;
use std::time::Duration;
use tantivy::schema::{Schema, STORED, TEXT};
use tantivy::{doc, Index, IndexWriter, Opstamp};
use tantivy::Opstamp;
use tantivy::{Index, IndexWriter};
fn main() -> tantivy::Result<()> {
// # Defining the schema
@@ -46,9 +49,10 @@ fn main() -> tantivy::Result<()> {
thread::spawn(move || {
// we index 100 times the document... for the sake of the example.
for i in 0..100 {
let opstamp = index_writer_clone_1
.read().unwrap() //< A read lock is sufficient here.
.add_document(
let opstamp = {
// A read lock is sufficient here.
let index_writer_rlock = index_writer_clone_1.read().unwrap();
index_writer_rlock.add_document(
doc!(
title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
@@ -59,7 +63,8 @@ fn main() -> tantivy::Result<()> {
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
));
))
};
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
thread::sleep(Duration::from_millis(20));
}

View File

@@ -7,16 +7,19 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::{doc, Index, Snippet, SnippetGenerator};
use tempfile::TempDir;
use tantivy::Index;
use tantivy::{Snippet, SnippetGenerator};
use tempdir::TempDir;
fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the
// sake of this example
let index_path = TempDir::new()?;
let index_path = TempDir::new("tantivy_example_dir")?;
// # Defining the schema
let mut schema_builder = Schema::builder();

View File

@@ -11,11 +11,13 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::tokenizer::*;
use tantivy::{doc, Index};
use tantivy::Index;
fn main() -> tantivy::Result<()> {
// this example assumes you understand the content in `basic_search`

View File

@@ -1,16 +0,0 @@
[package]
name = "tantivy-query-grammar"
version = "0.11.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
description = """Search engine library"""
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
homepage = "https://github.com/tantivy-search/tantivy"
repository = "https://github.com/tantivy-search/tantivy"
readme = "README.md"
keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies]
combine = ">=3.6.0,<4.0.0"

View File

@@ -1,17 +0,0 @@
#![recursion_limit = "100"]
mod occur;
mod query_grammar;
mod user_input_ast;
use combine::parser::Parser;
pub use crate::occur::Occur;
use crate::query_grammar::parse_to_ast;
pub use crate::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
pub struct Error;
pub fn parse_query(query: &str) -> Result<UserInputAST, Error> {
let (user_input_ast, _remaining) = parse_to_ast().parse(query).map_err(|_| Error)?;
Ok(user_input_ast)
}

View File

@@ -1,380 +0,0 @@
use super::user_input_ast::*;
use crate::Occur;
use combine::char::*;
use combine::error::StreamError;
use combine::stream::StreamErrorFor;
use combine::*;
parser! {
fn field[I]()(I) -> String
where [I: Stream<Item = char>] {
(
letter(),
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
).skip(char(':')).map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
}
}
parser! {
fn word[I]()(I) -> String
where [I: Stream<Item = char>] {
(
satisfy(|c: char| !c.is_whitespace() && !['-', '`', ':', '{', '}', '"', '[', ']', '(',')'].contains(&c) ),
many(satisfy(|c: char| !c.is_whitespace() && ![':', '{', '}', '"', '[', ']', '(',')'].contains(&c)))
)
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
.and_then(|s: String|
match s.as_str() {
"OR" => Err(StreamErrorFor::<I>::unexpected_static_message("OR")),
"AND" => Err(StreamErrorFor::<I>::unexpected_static_message("AND")),
"NOT" => Err(StreamErrorFor::<I>::unexpected_static_message("NOT")),
_ => Ok(s)
})
}
}
parser! {
fn literal[I]()(I) -> UserInputLeaf
where [I: Stream<Item = char>]
{
let term_val = || {
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
phrase.or(word())
};
let term_val_with_field = negative_number().or(term_val());
let term_query =
(field(), term_val_with_field)
.map(|(field_name, phrase)| UserInputLiteral {
field_name: Some(field_name),
phrase,
});
let term_default_field = term_val().map(|phrase| UserInputLiteral {
field_name: None,
phrase,
});
attempt(term_query)
.or(term_default_field)
.map(UserInputLeaf::from)
}
}
parser! {
fn negative_number[I]()(I) -> String
where [I: Stream<Item = char>]
{
(char('-'), many1(satisfy(char::is_numeric)),
optional((char('.'), many1(satisfy(char::is_numeric)))))
.map(|(s1, s2, s3): (char, String, Option<(char, String)>)| {
if let Some(('.', s3)) = s3 {
format!("{}{}.{}", s1, s2, s3)
} else {
format!("{}{}", s1, s2)
}
})
}
}
parser! {
fn spaces1[I]()(I) -> ()
where [I: Stream<Item = char>] {
skip_many1(space())
}
}
parser! {
/// Function that parses a range out of a Stream
/// Supports ranges like:
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
/// [a TO *], [a TO c], [abc TO bcd}
fn range[I]()(I) -> UserInputLeaf
where [I: Stream<Item = char>] {
let range_term_val = || {
word().or(negative_number()).or(char('*').with(value("*".to_string())))
};
// check for unbounded range in the form of <5, <=10, >5, >=5
let elastic_unbounded_range = (choice([attempt(string(">=")),
attempt(string("<=")),
attempt(string("<")),
attempt(string(">"))])
.skip(spaces()),
range_term_val()).
map(|(comparison_sign, bound): (&str, String)|
match comparison_sign {
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
"<=" => (UserInputBound::Unbounded, UserInputBound::Inclusive(bound)),
"<" => (UserInputBound::Unbounded, UserInputBound::Exclusive(bound)),
">" => (UserInputBound::Exclusive(bound), UserInputBound::Unbounded),
// default case
_ => (UserInputBound::Unbounded, UserInputBound::Unbounded)
});
let lower_bound = (one_of("{[".chars()), range_term_val())
.map(|(boundary_char, lower_bound): (char, String)|
if lower_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '{' {
UserInputBound::Exclusive(lower_bound)
} else {
UserInputBound::Inclusive(lower_bound)
});
let upper_bound = (range_term_val(), one_of("}]".chars()))
.map(|(higher_bound, boundary_char): (String, char)|
if higher_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '}' {
UserInputBound::Exclusive(higher_bound)
} else {
UserInputBound::Inclusive(higher_bound)
});
// return only lower and upper
let lower_to_upper = (lower_bound.
skip((spaces(),
string("TO"),
spaces())),
upper_bound);
(optional(field()).skip(spaces()),
// try elastic first, if it matches, the range is unbounded
attempt(elastic_unbounded_range).or(lower_to_upper))
.map(|(field, (lower, upper))|
// Construct the leaf from extracted field (optional)
// and bounds
UserInputLeaf::Range {
field,
lower,
upper
})
}
}
fn negate(expr: UserInputAST) -> UserInputAST {
expr.unary(Occur::MustNot)
}
fn must(expr: UserInputAST) -> UserInputAST {
expr.unary(Occur::Must)
}
parser! {
fn leaf[I]()(I) -> UserInputAST
where [I: Stream<Item = char>] {
char('-').with(leaf()).map(negate)
.or(char('+').with(leaf()).map(must))
.or(char('(').with(ast()).skip(char(')')))
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
.or(attempt(string("NOT").skip(spaces1()).with(leaf()).map(negate)))
.or(attempt(range().map(UserInputAST::from)))
.or(literal().map(UserInputAST::from))
}
}
#[derive(Clone, Copy)]
enum BinaryOperand {
Or,
And,
}
parser! {
fn binary_operand[I]()(I) -> BinaryOperand
where [I: Stream<Item = char>]
{
string("AND").with(value(BinaryOperand::And))
.or(string("OR").with(value(BinaryOperand::Or)))
}
}
fn aggregate_binary_expressions(
left: UserInputAST,
others: Vec<(BinaryOperand, UserInputAST)>,
) -> UserInputAST {
let mut dnf: Vec<Vec<UserInputAST>> = vec![vec![left]];
for (operator, operand_ast) in others {
match operator {
BinaryOperand::And => {
if let Some(last) = dnf.last_mut() {
last.push(operand_ast);
}
}
BinaryOperand::Or => {
dnf.push(vec![operand_ast]);
}
}
}
if dnf.len() == 1 {
UserInputAST::and(dnf.into_iter().next().unwrap()) //< safe
} else {
let conjunctions = dnf.into_iter().map(UserInputAST::and).collect();
UserInputAST::or(conjunctions)
}
}
parser! {
pub fn ast[I]()(I) -> UserInputAST
where [I: Stream<Item = char>]
{
let operand_leaf = (binary_operand().skip(spaces()), leaf().skip(spaces()));
let boolean_expr = (leaf().skip(spaces().silent()), many1(operand_leaf)).map(
|(left, right)| aggregate_binary_expressions(left,right));
let whitespace_separated_leaves = many1(leaf().skip(spaces().silent()))
.map(|subqueries: Vec<UserInputAST>|
if subqueries.len() == 1 {
subqueries.into_iter().next().unwrap()
} else {
UserInputAST::Clause(subqueries.into_iter().collect())
});
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
spaces().with(expr).skip(spaces())
}
}
parser! {
pub fn parse_to_ast[I]()(I) -> UserInputAST
where [I: Stream<Item = char>]
{
spaces().with(optional(ast()).skip(eof())).map(|opt_ast| opt_ast.unwrap_or_else(UserInputAST::empty_query))
}
}
#[cfg(test)]
mod test {
use super::*;
fn test_parse_query_to_ast_helper(query: &str, expected: &str) {
let query = parse_to_ast().parse(query).unwrap().0;
let query_str = format!("{:?}", query);
assert_eq!(query_str, expected);
}
fn test_is_parse_err(query: &str) {
assert!(parse_to_ast().parse(query).is_err());
}
#[test]
fn test_parse_empty_to_ast() {
test_parse_query_to_ast_helper("", "<emptyclause>");
}
#[test]
fn test_parse_query_to_ast_hyphen() {
test_parse_query_to_ast_helper("\"www-form-encoded\"", "\"www-form-encoded\"");
test_parse_query_to_ast_helper("www-form-encoded", "\"www-form-encoded\"");
test_parse_query_to_ast_helper("www-form-encoded", "\"www-form-encoded\"");
}
#[test]
fn test_parse_query_to_ast_not_op() {
assert_eq!(
format!("{:?}", parse_to_ast().parse("NOT")),
"Err(UnexpectedParse)"
);
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
test_parse_query_to_ast_helper("NOT a", "-(\"a\")");
}
#[test]
fn test_parse_query_to_ast_binary_op() {
test_parse_query_to_ast_helper("a AND b", "(+(\"a\") +(\"b\"))");
test_parse_query_to_ast_helper("a OR b", "(?(\"a\") ?(\"b\"))");
test_parse_query_to_ast_helper("a OR b AND c", "(?(\"a\") ?((+(\"b\") +(\"c\"))))");
test_parse_query_to_ast_helper("a AND b AND c", "(+(\"a\") +(\"b\") +(\"c\"))");
assert_eq!(
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("a AND b aaa")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("aaa a OR b ")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("aaa ccc a OR b ")),
"Err(UnexpectedParse)"
);
}
#[test]
fn test_parse_elastic_query_ranges() {
test_parse_query_to_ast_helper("title: >a", "title:{\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title:>=a", "title:[\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title: <a", "title:{\"*\" TO \"a\"}");
test_parse_query_to_ast_helper("title:<=a", "title:{\"*\" TO \"a\"]");
test_parse_query_to_ast_helper("title:<=bsd", "title:{\"*\" TO \"bsd\"]");
test_parse_query_to_ast_helper("weight: >70", "weight:{\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight:>=70", "weight:[\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: <70", "weight:{\"*\" TO \"70\"}");
test_parse_query_to_ast_helper("weight:<=70", "weight:{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: >60.7", "weight:{\"60.7\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: <= 70", "weight:{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
}
#[test]
fn test_range_parser() {
// testing the range() parser separately
let res = range().parse("title: <hello").unwrap().0;
let expected = UserInputLeaf::Range {
field: Some("title".to_string()),
lower: UserInputBound::Unbounded,
upper: UserInputBound::Exclusive("hello".to_string()),
};
let res2 = range().parse("title:{* TO hello}").unwrap().0;
assert_eq!(res, expected);
assert_eq!(res2, expected);
let expected_weight = UserInputLeaf::Range {
field: Some("weight".to_string()),
lower: UserInputBound::Inclusive("71.2".to_string()),
upper: UserInputBound::Unbounded,
};
let res3 = range().parse("weight: >=71.2").unwrap().0;
let res4 = range().parse("weight:[71.2 TO *}").unwrap().0;
assert_eq!(res3, expected_weight);
assert_eq!(res4, expected_weight);
}
#[test]
fn test_parse_query_to_triming_spaces() {
test_parse_query_to_ast_helper(" abc", "\"abc\"");
test_parse_query_to_ast_helper("abc ", "\"abc\"");
test_parse_query_to_ast_helper("( a OR abc)", "(?(\"a\") ?(\"abc\"))");
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
test_parse_query_to_ast_helper("a OR abc ", "(?(\"a\") ?(\"abc\"))");
test_parse_query_to_ast_helper("(a OR abc )", "(?(\"a\") ?(\"abc\"))");
test_parse_query_to_ast_helper("(a OR abc) ", "(?(\"a\") ?(\"abc\"))");
}
#[test]
fn test_parse_query_to_ast() {
test_parse_query_to_ast_helper("abc", "\"abc\"");
test_parse_query_to_ast_helper("a b", "(\"a\" \"b\")");
test_parse_query_to_ast_helper("+(a b)", "+((\"a\" \"b\"))");
test_parse_query_to_ast_helper("+d", "+(\"d\")");
test_parse_query_to_ast_helper("+(a b) +d", "(+((\"a\" \"b\")) +(\"d\"))");
test_parse_query_to_ast_helper("(+a +b) d", "((+(\"a\") +(\"b\")) \"d\")");
test_parse_query_to_ast_helper("(+a)", "+(\"a\")");
test_parse_query_to_ast_helper("(+a +b)", "(+(\"a\") +(\"b\"))");
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
test_parse_query_to_ast_helper("+abc:toto", "+(abc:\"toto\")");
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+(abc:\"toto\") -(\"titi\"))");
test_parse_query_to_ast_helper("-abc:toto", "-(abc:\"toto\")");
test_parse_query_to_ast_helper("abc:a b", "(abc:\"a\" \"b\")");
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[* TO toto}", "foo:{\"*\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[1 TO *}", "foo:[\"1\" TO \"*\"}");
test_parse_query_to_ast_helper("foo:[1.1 TO *}", "foo:[\"1.1\" TO \"*\"}");
test_is_parse_err("abc + ");
}
}

View File

@@ -10,10 +10,12 @@ use crate::SegmentReader;
/// documents match the query.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::Count;
/// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result};
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
@@ -123,4 +125,5 @@ mod tests {
assert_eq!(count_collector.harvest(), 2);
}
}
}

View File

@@ -81,10 +81,12 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
///
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::FacetCollector;
/// use tantivy::query::AllQuery;
/// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{doc, Index, Result};
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
@@ -599,18 +601,19 @@ mod tests {
);
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use crate::collector::FacetCollector;
use crate::query::AllQuery;
use crate::schema::{Facet, Schema};
use crate::Index;
use rand::seq::SliceRandom;
use rand::thread_rng;
use collector::FacetCollector;
use query::AllQuery;
use rand::{thread_rng, Rng};
use schema::Facet;
use schema::Schema;
use test::Bencher;
use Index;
#[bench]
fn bench_facet_collector(b: &mut Bencher) {
@@ -627,7 +630,7 @@ mod bench {
}
}
// 40425 docs
docs[..].shuffle(&mut thread_rng());
thread_rng().shuffle(&mut docs[..]);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs {
@@ -636,7 +639,7 @@ mod bench {
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let searcher = reader.searcher();
let searcher = index.searcher();
let facet_collector = FacetCollector::for_field(facet_field);
searcher.search(&AllQuery, &facet_collector).unwrap();
});

View File

@@ -35,6 +35,7 @@ The resulting `Fruit` will then be a typed tuple with each collector's original
in their respective position.
```rust
# extern crate tantivy;
# use tantivy::schema::*;
# use tantivy::*;
# use tantivy::query::*;

View File

@@ -105,10 +105,12 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
/// [Combining several collectors section of the collector documentation](./index.html#combining-several-collectors).
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::{Count, TopDocs, MultiCollector};
/// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result};
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {

View File

@@ -13,7 +13,6 @@ use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
use std::fmt;
/// The Top Score Collector keeps track of the K documents
/// sorted by their score.
@@ -23,10 +22,13 @@ use std::fmt;
/// is `O(n log K)`.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::DocAddress;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, DocAddress, Index, Result};
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
@@ -66,12 +68,6 @@ use std::fmt;
/// ```
pub struct TopDocs(TopCollector<Score>);
impl fmt::Debug for TopDocs {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TopDocs({})", self.0.limit())
}
}
impl TopDocs {
/// Creates a top score collector, with a number of documents equal to "limit".
///
@@ -84,8 +80,10 @@ impl TopDocs {
/// Set top-K to rank documents by a given fast field.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, Result, DocAddress};
/// # use tantivy::{Index, Result, DocAddress};
/// # use tantivy::query::{Query, QueryParser};
/// use tantivy::Searcher;
/// use tantivy::collector::TopDocs;
@@ -123,7 +121,7 @@ impl TopDocs {
/// ///
/// /// `field` is required to be a FAST field.
/// fn docs_sorted_by_rating(searcher: &Searcher,
/// query: &dyn Query,
/// query: &Query,
/// sort_by_field: Field)
/// -> Result<Vec<(u64, DocAddress)>> {
///
@@ -192,8 +190,10 @@ impl TopDocs {
/// learning-to-rank model over various features
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress, DocId, Score};
/// # use tantivy::{Index, DocAddress, DocId, Score};
/// # use tantivy::query::QueryParser;
/// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs;
@@ -295,8 +295,10 @@ impl TopDocs {
/// # Example
///
/// ```rust
/// # #[macro_use]
/// # extern crate tantivy;
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress, DocId};
/// # use tantivy::{Index, DocAddress, DocId};
/// # use tantivy::query::QueryParser;
/// use tantivy::SegmentReader;
/// use tantivy::collector::TopDocs;
@@ -582,7 +584,7 @@ mod tests {
query_field: Field,
schema: Schema,
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
) -> (Index, Box<dyn Query>) {
) -> (Index, Box<Query>) {
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
@@ -592,4 +594,5 @@ mod tests {
let query = query_parser.parse_query(query).unwrap();
(index, query)
}
}

View File

@@ -2,7 +2,7 @@ use crate::common::BinarySerializable;
use crate::common::CountingWriter;
use crate::common::VInt;
use crate::directory::ReadOnlySource;
use crate::directory::{TerminatingWrite, WritePtr};
use crate::directory::WritePtr;
use crate::schema::Field;
use crate::space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage;
@@ -42,7 +42,7 @@ pub struct CompositeWrite<W = WritePtr> {
offsets: HashMap<FileAddr, u64>,
}
impl<W: TerminatingWrite + Write> CompositeWrite<W> {
impl<W: Write> CompositeWrite<W> {
/// Crate a new API writer that writes a composite file
/// in a given write.
pub fn wrap(w: W) -> CompositeWrite<W> {
@@ -91,7 +91,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
let footer_len = (self.write.written_bytes() - footer_offset) as u32;
footer_len.serialize(&mut self.write)?;
self.write.terminate()
self.write.flush()?;
Ok(())
}
}
@@ -230,4 +231,5 @@ mod test {
}
}
}
}

View File

@@ -1,5 +1,3 @@
use crate::directory::AntiCallToken;
use crate::directory::TerminatingWrite;
use std::io;
use std::io::Write;
@@ -44,13 +42,6 @@ impl<W: Write> Write for CountingWriter<W> {
}
}
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.underlying.terminate_ref(token)
}
}
#[cfg(test)]
mod test {

View File

@@ -124,24 +124,26 @@ pub fn f64_to_u64(val: f64) -> u64 {
/// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html).
#[inline(always)]
pub fn u64_to_f64(val: u64) -> f64 {
f64::from_bits(if val & HIGHEST_BIT != 0 {
val ^ HIGHEST_BIT
} else {
!val
})
f64::from_bits(
if val & HIGHEST_BIT != 0 {
val ^ HIGHEST_BIT
} else {
!val
}
)
}
#[cfg(test)]
pub(crate) mod test {
pub use super::serialize::test::fixed_size_test;
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
use super::{compute_num_bits, i64_to_u64, u64_to_i64, f64_to_u64, u64_to_f64};
use std::f64;
fn test_i64_converter_helper(val: i64) {
assert_eq!(u64_to_i64(i64_to_u64(val)), val);
}
fn test_f64_converter_helper(val: f64) {
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
}
@@ -170,8 +172,7 @@ pub(crate) mod test {
#[test]
fn test_f64_order() {
assert!(!(f64_to_u64(f64::NEG_INFINITY)..f64_to_u64(f64::INFINITY))
.contains(&f64_to_u64(f64::NAN))); //nan is not a number
assert!(!(f64_to_u64(f64::NEG_INFINITY)..f64_to_u64(f64::INFINITY)).contains(&f64_to_u64(f64::NAN))); //nan is not a number
assert!(f64_to_u64(1.5) > f64_to_u64(1.0)); //same exponent, different mantissa
assert!(f64_to_u64(2.0) > f64_to_u64(1.0)); //same mantissa, different exponent
assert!(f64_to_u64(2.0) > f64_to_u64(1.5)); //different exponent and mantissa

View File

@@ -199,7 +199,10 @@ pub mod test {
fn test_serialize_string() {
assert_eq!(serialize_test(String::from("")), 1);
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
assert_eq!(serialize_test(String::from("富士さん見える。")), 1 + 3 * 8);
assert_eq!(
serialize_test(String::from("富士さん見える。")),
1 + 3 * 8
);
}
#[test]

View File

@@ -26,10 +26,9 @@ use crate::IndexWriter;
use crate::Result;
use num_cpus;
use std::borrow::BorrowMut;
use std::collections::HashSet;
use std::fmt;
#[cfg(feature = "mmap")]
use std::path::{Path, PathBuf};
use std::path::Path;
use std::sync::Arc;
fn load_metas(directory: &dyn Directory, inventory: &SegmentMetaInventory) -> Result<IndexMeta> {
@@ -174,11 +173,11 @@ impl Index {
}
/// Helper to access the tokenizer associated to a specific field.
pub fn tokenizer_for_field(&self, field: Field) -> Result<BoxedTokenizer> {
pub fn tokenizer_for_field(&self, field: Field) -> Result<Box<dyn BoxedTokenizer>> {
let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type();
let tokenizer_manager: &TokenizerManager = self.tokenizers();
let tokenizer_name_opt: Option<BoxedTokenizer> = match field_type {
let tokenizer_name_opt: Option<Box<dyn BoxedTokenizer>> = match field_type {
FieldType::Str(text_options) => text_options
.get_indexing_options()
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
@@ -217,22 +216,8 @@ impl Index {
Index::open(mmap_directory)
}
/// Returns the list of the segment metas tracked by the index.
///
/// Such segments can of course be part of the index,
/// but also they could be segments being currently built or in the middle of a merge
/// operation.
pub fn list_all_segment_metas(&self) -> Vec<SegmentMeta> {
self.inventory.all()
}
/// Creates a new segment_meta (Advanced user only).
///
/// As long as the `SegmentMeta` lives, the files associated with the
/// `SegmentMeta` are guaranteed to not be garbage collected, regardless of
/// whether the segment is recorded as part of the index or not.
pub fn new_segment_meta(&self, segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
self.inventory.new_segment_meta(segment_id, max_doc)
pub(crate) fn inventory(&self) -> &SegmentMetaInventory {
&self.inventory
}
/// Open the index using the provided directory
@@ -369,11 +354,6 @@ impl Index {
.map(SegmentMeta::id)
.collect())
}
/// Returns the set of corrupted files
pub fn validate_checksum(&self) -> Result<HashSet<PathBuf>> {
self.directory.list_damaged().map_err(Into::into)
}
}
impl fmt::Debug for Index {
@@ -479,13 +459,13 @@ mod tests {
use super::*;
use std::path::PathBuf;
use tempfile::TempDir;
use tempdir::TempDir;
#[test]
fn test_index_on_commit_reload_policy_mmap() {
let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap();
let tempdir = TempDir::new().unwrap();
let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
@@ -524,7 +504,7 @@ mod tests {
fn test_index_on_commit_reload_policy_different_directories() {
let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap();
let tempdir = TempDir::new().unwrap();
let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let write_index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let read_index = Index::open_in_dir(&tempdir_path).unwrap();
@@ -601,4 +581,5 @@ mod tests {
assert_eq!(searcher.num_docs(), 8_000);
assert!(mem_right_after_merge_finished < mem_right_after_commit);
}
}

View File

@@ -30,6 +30,7 @@ impl SegmentMetaInventory {
.collect::<Vec<_>>()
}
#[doc(hidden)]
pub fn new_segment_meta(&self, segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
let inner = InnerSegmentMeta {
segment_id,

View File

@@ -4,8 +4,6 @@ use uuid::Uuid;
#[cfg(test)]
use once_cell::sync::Lazy;
use std::error::Error;
use std::str::FromStr;
#[cfg(test)]
use std::sync::atomic;
@@ -54,51 +52,15 @@ impl SegmentId {
/// and the rest is random.
///
/// Picking the first 8 chars is ok to identify
/// segments in a display message (e.g. a5c4dfcb).
/// segments in a display message.
pub fn short_uuid_string(&self) -> String {
(&self.0.to_simple_ref().to_string()[..8]).to_string()
}
/// Returns a segment uuid string.
///
/// It consists in 32 lowercase hexadecimal chars
/// (e.g. a5c4dfcbdfe645089129e308e26d5523)
pub fn uuid_string(&self) -> String {
self.0.to_simple_ref().to_string()
}
/// Build a `SegmentId` string from the full uuid string.
///
/// E.g. "a5c4dfcbdfe645089129e308e26d5523"
pub fn from_uuid_string(uuid_string: &str) -> Result<SegmentId, SegmentIdParseError> {
FromStr::from_str(uuid_string)
}
}
/// Error type used when parsing a `SegmentId` from a string fails.
pub struct SegmentIdParseError(uuid::parser::ParseError);
impl Error for SegmentIdParseError {}
impl fmt::Debug for SegmentIdParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Display for SegmentIdParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl FromStr for SegmentId {
type Err = SegmentIdParseError;
fn from_str(uuid_string: &str) -> Result<Self, SegmentIdParseError> {
let uuid = Uuid::parse_str(uuid_string).map_err(SegmentIdParseError)?;
Ok(SegmentId(uuid))
}
}
impl fmt::Debug for SegmentId {
@@ -118,18 +80,3 @@ impl Ord for SegmentId {
self.0.as_bytes().cmp(other.0.as_bytes())
}
}
#[cfg(test)]
mod tests {
use super::SegmentId;
#[test]
fn test_to_uuid_string() {
let full_uuid = "a5c4dfcbdfe645089129e308e26d5523";
let segment_id = SegmentId::from_uuid_string(full_uuid).unwrap();
assert_eq!(segment_id.uuid_string(), full_uuid);
assert_eq!(segment_id.short_uuid_string(), "a5c4dfcb");
// one extra char
assert!(SegmentId::from_uuid_string("a5c4dfcbdfe645089129e308e26d5523b").is_err());
}
}

View File

@@ -48,14 +48,14 @@ impl RetryPolicy {
///
/// It is transparently associated to a lock file, that gets deleted
/// on `Drop.` The lock is released automatically on `Drop`.
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
pub struct DirectoryLock(Box<dyn Drop + Send + Sync + 'static>);
struct DirectoryLockGuard {
directory: Box<dyn Directory>,
path: PathBuf,
}
impl<T: Send + Sync + 'static> From<Box<T>> for DirectoryLock {
impl<T: Drop + Send + Sync + 'static> From<Box<T>> for DirectoryLock {
fn from(underlying: Box<T>) -> Self {
DirectoryLock(underlying)
}
@@ -118,8 +118,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
///
/// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [`open_write`]
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Removes a file
@@ -159,8 +157,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [`atomic_write`]
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
/// Atomically replace the content of a file with data.

View File

@@ -1,213 +0,0 @@
use crate::directory::read_only_source::ReadOnlySource;
use crate::directory::{AntiCallToken, TerminatingWrite};
use byteorder::{ByteOrder, LittleEndian};
use crc32fast::Hasher;
use std::io;
use std::io::Write;
const COMMON_FOOTER_SIZE: usize = 4 * 5;
#[derive(Debug, Clone, PartialEq)]
pub struct Footer {
pub tantivy_version: (u32, u32, u32),
pub meta: String,
pub versioned_footer: VersionedFooter,
}
impl Footer {
pub fn new(versioned_footer: VersionedFooter) -> Self {
let tantivy_version = (
env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
);
Footer {
tantivy_version,
meta: format!(
"tantivy {}.{}.{}, index v{}",
tantivy_version.0,
tantivy_version.1,
tantivy_version.2,
versioned_footer.version()
),
versioned_footer,
}
}
pub fn to_bytes(&self) -> Vec<u8> {
let mut res = self.versioned_footer.to_bytes();
res.extend_from_slice(self.meta.as_bytes());
let len = res.len();
res.resize(len + COMMON_FOOTER_SIZE, 0);
let mut common_footer = &mut res[len..];
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
res
}
pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
let len = data.len();
if len < COMMON_FOOTER_SIZE + 4 {
// 4 bytes for index version, stored in versioned footer
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
)
);
}
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
if len < size as usize {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The footer len is {}, while the entire file len is {}",
size, len
),
));
}
let footer = &data[len - size as usize..];
let meta_len = LittleEndian::read_u32(&footer[size - 20..]) as usize;
let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
Ok(Footer {
tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
meta: String::from_utf8_lossy(&footer[size - meta_len - 20..size - 20]).into_owned(),
versioned_footer: VersionedFooter::from_bytes(&footer[..size - meta_len - 20])?,
})
}
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
let footer = Footer::from_bytes(source.as_slice())?;
let reader = source.slice_to(source.as_slice().len() - footer.size());
Ok((footer, reader))
}
pub fn size(&self) -> usize {
self.versioned_footer.size() as usize + self.meta.len() + 20
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum VersionedFooter {
UnknownVersion { version: u32, size: u32 },
V0(u32), // crc
}
impl VersionedFooter {
pub fn to_bytes(&self) -> Vec<u8> {
match self {
VersionedFooter::V0(crc) => {
let mut res = vec![0; 8];
LittleEndian::write_u32(&mut res, 0);
LittleEndian::write_u32(&mut res[4..], *crc);
res
}
VersionedFooter::UnknownVersion { .. } => {
panic!("Unsupported index should never get serialized");
}
}
}
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> {
assert!(footer.len() >= 4);
let version = LittleEndian::read_u32(footer);
match version {
0 => {
if footer.len() == 8 {
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..])))
} else {
Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The versioned footer len is {}, while it should be 8",
footer.len()
),
))
}
}
version => Ok(VersionedFooter::UnknownVersion {
version,
size: footer.len() as u32,
}),
}
}
pub fn size(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 8,
VersionedFooter::UnknownVersion { size, .. } => *size,
}
}
pub fn version(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 0,
VersionedFooter::UnknownVersion { version, .. } => *version,
}
}
pub fn crc(&self) -> Option<u32> {
match self {
VersionedFooter::V0(crc) => Some(*crc),
VersionedFooter::UnknownVersion { .. } => None,
}
}
}
pub(crate) struct FooterProxy<W: TerminatingWrite> {
/// always Some except after terminate call
hasher: Option<Hasher>,
/// always Some except after terminate call
writer: Option<W>,
}
impl<W: TerminatingWrite> FooterProxy<W> {
pub fn new(writer: W) -> Self {
FooterProxy {
hasher: Some(Hasher::new()),
writer: Some(writer),
}
}
}
impl<W: TerminatingWrite> Write for FooterProxy<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let count = self.writer.as_mut().unwrap().write(buf)?;
self.hasher.as_mut().unwrap().update(&buf[..count]);
Ok(count)
}
fn flush(&mut self) -> io::Result<()> {
self.writer.as_mut().unwrap().flush()
}
}
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes();
let mut writer = self.writer.take().unwrap();
writer.write_all(&footer)?;
writer.terminate()
}
}
#[cfg(test)]
mod tests {
use crate::directory::footer::{Footer, VersionedFooter};
#[test]
fn test_serialize_deserialize_footer() {
let crc = 123456;
let footer = Footer::new(VersionedFooter::V0(crc));
let footer_bytes = footer.to_bytes();
assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer);
}
}

View File

@@ -1,6 +1,5 @@
use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::DirectoryLock;
use crate::directory::Lock;
use crate::directory::META_LOCK;
@@ -9,7 +8,6 @@ use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption;
use crate::Directory;
use crate::Result;
use crc32fast::Hasher;
use serde_json;
use std::collections::HashSet;
use std::io;
@@ -209,59 +207,17 @@ impl ManagedDirectory {
}
Ok(())
}
/// Verify checksum of a managed file
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
let reader = self.directory.open_read(path)?;
let (footer, data) = Footer::extract_footer(reader)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
let mut hasher = Hasher::new();
hasher.update(data.as_slice());
let crc = hasher.finalize();
Ok(footer
.versioned_footer
.crc()
.map(|v| v == crc)
.unwrap_or(false))
}
/// List files for which checksum does not match content
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
let mut hashset = HashSet::new();
let managed_paths = self
.meta_informations
.read()
.expect("Managed directory rlock poisoned in list damaged.")
.managed_paths
.clone();
for path in managed_paths.into_iter() {
if !self.validate_checksum(&path)? {
hashset.insert(path);
}
}
Ok(hashset)
}
}
impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?;
let (_footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
Ok(reader)
self.directory.open_read(path)
}
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
self.directory
.open_write(path)?
.into_inner()
.map_err(|_| ())
.expect("buffer should be empty"),
))))
self.directory.open_write(path)
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
@@ -303,16 +259,15 @@ impl Clone for ManagedDirectory {
#[cfg(test)]
mod tests_mmap_specific {
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite};
use crate::directory::{Directory, ManagedDirectory, MmapDirectory};
use std::collections::HashSet;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::{Path, PathBuf};
use tempfile::TempDir;
use tempdir::TempDir;
#[test]
fn test_managed_directory() {
let tempdir = TempDir::new().unwrap();
let tempdir = TempDir::new("tantivy-test").unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let test_path1: &'static Path = Path::new("some_path_for_test");
@@ -320,8 +275,8 @@ mod tests_mmap_specific {
{
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let write_file = managed_directory.open_write(test_path1).unwrap();
write_file.terminate().unwrap();
let mut write_file = managed_directory.open_write(test_path1).unwrap();
write_file.flush().unwrap();
managed_directory
.atomic_write(test_path2, &[0u8, 1u8])
.unwrap();
@@ -349,15 +304,15 @@ mod tests_mmap_specific {
fn test_managed_directory_gc_while_mmapped() {
let test_path1: &'static Path = Path::new("some_path_for_test");
let tempdir = TempDir::new().unwrap();
let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let living_files = HashSet::new();
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap();
managed_directory
.atomic_write(test_path1, &vec![0u8, 1u8])
.unwrap();
assert!(managed_directory.exists(test_path1));
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
@@ -376,38 +331,4 @@ mod tests_mmap_specific {
}
}
#[test]
fn test_checksum() {
let test_path1: &'static Path = Path::new("some_path_for_test");
let test_path2: &'static Path = Path::new("other_test_path");
let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap();
let mut write = managed_directory.open_write(test_path2).unwrap();
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
write.terminate().unwrap();
assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone();
corrupted_path.push(test_path2);
let mut file = OpenOptions::new()
.write(true)
.open(&corrupted_path)
.unwrap();
file.write_all(&[255u8]).unwrap();
file.flush().unwrap();
drop(file);
let damaged = managed_directory.list_damaged().unwrap();
assert_eq!(damaged.len(), 1);
assert!(damaged.contains(test_path2));
}
}

View File

@@ -11,7 +11,6 @@ use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::directory::read_only_source::BoxedData;
use crate::directory::AntiCallToken;
use crate::directory::Directory;
use crate::directory::DirectoryLock;
use crate::directory::Lock;
@@ -19,7 +18,7 @@ use crate::directory::ReadOnlySource;
use crate::directory::WatchCallback;
use crate::directory::WatchCallbackList;
use crate::directory::WatchHandle;
use crate::directory::{TerminatingWrite, WritePtr};
use crate::directory::WritePtr;
use atomicwrites;
use memmap::Mmap;
use std::collections::HashMap;
@@ -37,7 +36,7 @@ use std::sync::Mutex;
use std::sync::RwLock;
use std::sync::Weak;
use std::thread;
use tempfile::TempDir;
use tempdir::TempDir;
/// Create a default io error given a string.
pub(crate) fn make_io_err(msg: String) -> io::Error {
@@ -254,7 +253,7 @@ impl MmapDirectoryInner {
}
}
if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() {
Ok(watch_wrapper.watch(watch_callback))
return Ok(watch_wrapper.watch(watch_callback));
} else {
unreachable!("At this point, watch wrapper is supposed to be initialized");
}
@@ -283,7 +282,7 @@ impl MmapDirectory {
/// This is mostly useful to test the MmapDirectory itself.
/// For your unit tests, prefer the RAMDirectory.
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
let tempdir = TempDir::new("index").map_err(OpenDirectoryError::IoError)?;
let tempdir_path = PathBuf::from(tempdir.path());
MmapDirectory::new(tempdir_path, Some(tempdir))
}
@@ -401,12 +400,6 @@ impl Seek for SafeFileWriter {
}
}
impl TerminatingWrite for SafeFileWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
@@ -637,7 +630,7 @@ mod tests {
fn test_watch_wrapper() {
let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone();
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp_dir: TempDir = tempdir::TempDir::new("test_watch_wrapper").unwrap();
let tmp_dirpath = tmp_dir.path().to_owned();
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
let tmp_file = tmp_dirpath.join("coucou");

View File

@@ -9,7 +9,6 @@ mod mmap_directory;
mod directory;
mod directory_lock;
mod footer;
mod managed_directory;
mod ram_directory;
mod read_only_source;
@@ -25,49 +24,18 @@ pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource;
pub(crate) use self::watch_event_router::WatchCallbackList;
pub use self::watch_event_router::{WatchCallback, WatchHandle};
use std::io::{self, BufWriter, Write};
use std::io::{BufWriter, Write};
#[cfg(feature = "mmap")]
pub use self::mmap_directory::MmapDirectory;
pub use self::managed_directory::ManagedDirectory;
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer
pub trait TerminatingWrite: Write {
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()>
where
Self: Sized,
{
self.terminate_ref(AntiCallToken(()))
}
/// You should implement this function to define custom behavior.
/// This function should flush any buffer it may hold.
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
}
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.as_mut().terminate_ref(token)
}
}
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.get_mut().terminate_ref(a)
}
}
/// Write object for Directory.
///
/// `WritePtr` are required to implement both Write
/// and Seek.
pub type WritePtr = BufWriter<Box<dyn TerminatingWrite>>;
pub type WritePtr = BufWriter<Box<dyn Write>>;
#[cfg(test)]
mod tests;

View File

@@ -1,9 +1,8 @@
use crate::core::META_FILEPATH;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken;
use crate::directory::WatchCallbackList;
use crate::directory::WritePtr;
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use crate::directory::{TerminatingWrite, WritePtr};
use fail::fail_point;
use std::collections::HashMap;
use std::fmt;
@@ -72,12 +71,6 @@ impl Write for VecWriter {
}
}
impl TerminatingWrite for VecWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
#[derive(Default)]
struct InnerDirectory {
fs: HashMap<PathBuf, ReadOnlySource>,
@@ -184,7 +177,7 @@ impl Directory for RAMDirectory {
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
io::ErrorKind::Other,
msg.unwrap_or_else(|| "Undefined".to_string())
msg.unwrap_or("Undefined".to_string())
)));
let path_buf = PathBuf::from(path);

View File

@@ -127,7 +127,7 @@ fn test_watch(directory: &mut dyn Directory) {
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok());
for _ in 0..1_000 {
for _ in 0..100 {
if counter.load(Ordering::SeqCst) > i {
break;
}

View File

@@ -152,4 +152,5 @@ mod tests {
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(2, counter.load(Ordering::SeqCst));
}
}

View File

@@ -429,6 +429,7 @@ mod tests {
}
}
}
}
#[cfg(all(test, feature = "unstable"))]
@@ -436,9 +437,9 @@ mod bench {
use super::tests::FIELD;
use super::tests::{generate_permutation, SCHEMA};
use super::*;
use crate::common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader;
use common::CompositeFile;
use directory::{Directory, RAMDirectory, WritePtr};
use fastfield::FastFieldReader;
use std::collections::HashMap;
use std::path::Path;
use test::{self, Bencher};
@@ -536,4 +537,5 @@ mod bench {
});
}
}
}

View File

@@ -5,8 +5,8 @@ use crate::postings::UnorderedTermId;
use crate::schema::{Document, Field};
use crate::termdict::TermOrdinal;
use crate::DocId;
use fnv::FnvHashMap;
use itertools::Itertools;
use std::collections::HashMap;
use std::io;
/// Writer for multi-valued (as in, more than one value per document)
@@ -102,7 +102,7 @@ impl MultiValueIntFastFieldWriter {
pub fn serialize(
&self,
serializer: &mut FastFieldSerializer,
mapping_opt: Option<&FnvHashMap<UnorderedTermId, TermOrdinal>>,
mapping_opt: Option<&HashMap<UnorderedTermId, TermOrdinal>>,
) -> io::Result<()> {
{
// writing the offset index

View File

@@ -6,7 +6,6 @@ use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
use crate::postings::UnorderedTermId;
use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
use crate::termdict::TermOrdinal;
use fnv::FnvHashMap;
use std::collections::HashMap;
use std::io;
@@ -32,9 +31,7 @@ impl FastFieldsWriter {
_ => 0u64,
};
match *field_entry.field_type() {
FieldType::I64(ref int_options)
| FieldType::U64(ref int_options)
| FieldType::F64(ref int_options) => {
FieldType::I64(ref int_options) | FieldType::U64(ref int_options) | FieldType::F64(ref int_options) => {
match int_options.get_fastfield_cardinality() {
Some(Cardinality::SingleValue) => {
let mut fast_field_writer = IntFastFieldWriter::new(field);
@@ -117,7 +114,7 @@ impl FastFieldsWriter {
pub fn serialize(
&self,
serializer: &mut FastFieldSerializer,
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>,
mapping: &HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>,
) -> io::Result<()> {
for field_writer in &self.single_value_writers {
field_writer.serialize(serializer)?;

View File

@@ -8,7 +8,6 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::SegmentReader;
use crate::directory::DirectoryLock;
use crate::directory::TerminatingWrite;
use crate::docset::DocSet;
use crate::error::TantivyError;
use crate::fastfield::write_delete_bitset;
@@ -169,7 +168,6 @@ pub(crate) fn advance_deletes(
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, &mut delete_file)?;
delete_file.terminate()?;
}
}
segment_entry.set_meta(segment.meta().clone());
@@ -211,7 +209,10 @@ fn index_documents(
assert!(num_docs > 0);
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
let segment_meta = segment.index().new_segment_meta(segment_id, num_docs);
let segment_meta = segment
.index()
.inventory()
.new_segment_meta(segment_id, num_docs);
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
@@ -449,10 +450,12 @@ impl IndexWriter {
/// by clearing and resubmitting necessary documents
///
/// ```rust
/// use tantivy::collector::TopDocs;
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::query::QueryParser;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::*;
/// use tantivy::{doc, Index};
/// use tantivy::Index;
///
/// fn main() -> tantivy::Result<()> {
/// let mut schema_builder = Schema::builder();
@@ -758,6 +761,7 @@ mod tests {
use crate::Index;
use crate::ReloadPolicy;
use crate::Term;
use fail;
#[test]
fn test_operations_group() {
@@ -1179,4 +1183,5 @@ mod tests {
assert!(clear_again.is_ok());
assert!(commit_again.is_ok());
}
}

View File

@@ -134,4 +134,5 @@ mod tests {
}
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
}
}

View File

@@ -126,7 +126,9 @@ fn perform_merge(
let num_docs = merger.write(segment_serializer)?;
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
let segment_meta = index
.inventory()
.new_segment_meta(merged_segment.id(), num_docs);
let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None);
Ok(after_merge_segment_entry)
@@ -280,7 +282,7 @@ impl SegmentUpdater {
fn list_files(&self) -> HashSet<PathBuf> {
let mut files = HashSet::new();
files.insert(META_FILEPATH.to_path_buf());
for segment_meta in self.0.index.list_all_segment_metas() {
for segment_meta in self.0.index.inventory().all() {
files.extend(segment_meta.list_files());
}
files

View File

@@ -49,7 +49,7 @@ pub struct SegmentWriter {
fast_field_writers: FastFieldsWriter,
fieldnorms_writer: FieldNormsWriter,
doc_opstamps: Vec<Opstamp>,
tokenizers: Vec<Option<BoxedTokenizer>>,
tokenizers: Vec<Option<Box<dyn BoxedTokenizer>>>,
}
impl SegmentWriter {
@@ -296,4 +296,5 @@ mod tests {
assert_eq!(initial_table_size(10_000_000).unwrap(), 17);
assert_eq!(initial_table_size(1_000_000_000).unwrap(), 19);
}
}

View File

@@ -3,6 +3,7 @@
#![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))]
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
#![warn(missing_docs)]
#![recursion_limit = "80"]
//! # `tantivy`
//!
@@ -10,17 +11,26 @@
//! Think `Lucene`, but in Rust.
//!
//! ```rust
//! # extern crate tempdir;
//! #
//! #[macro_use]
//! extern crate tantivy;
//!
//! // ...
//!
//! # use std::path::Path;
//! # use tempfile::TempDir;
//! # use tempdir::TempDir;
//! # use tantivy::Index;
//! # use tantivy::schema::*;
//! # use tantivy::{Score, DocAddress};
//! # use tantivy::collector::TopDocs;
//! # use tantivy::query::QueryParser;
//! # use tantivy::schema::*;
//! # use tantivy::{doc, DocAddress, Index, Score};
//! #
//! # fn main() {
//! # // Let's create a temporary directory for the
//! # // sake of this example
//! # if let Ok(dir) = TempDir::new() {
//! # if let Ok(dir) = TempDir::new("tantivy_example_dir") {
//! # run_example(dir.path()).unwrap();
//! # dir.close().unwrap();
//! # }
@@ -161,16 +171,16 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset;
pub use self::docset::{DocSet, SkipResult};
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
pub use crate::core::SegmentComponent;
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
pub use crate::core::{InvertedIndexReader, SegmentReader};
pub use crate::directory::Directory;
pub use crate::indexer::IndexWriter;
pub use crate::postings::Postings;
pub use crate::reader::LeasedItem;
pub use crate::schema::{Document, Term};
pub use crate::common::{i64_to_u64, u64_to_i64, f64_to_u64, u64_to_f64};
/// Expose the current version of tantivy, as well
/// whether it was compiled with the simd compression.
pub fn version() -> &'static str {
@@ -839,8 +849,7 @@ mod tests {
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap();
{
let document =
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
let document = doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
index_writer.add_document(document);
index_writer.commit().unwrap();
}

View File

@@ -22,9 +22,11 @@
///
/// # Example
///
/// ```rust
/// ```
/// #[macro_use]
/// extern crate tantivy;
///
/// use tantivy::schema::{Schema, TEXT, FAST};
/// use tantivy::doc;
///
/// //...
///

View File

@@ -274,15 +274,13 @@ pub mod tests {
mod bench {
use super::*;
use rand::rngs::StdRng;
use rand::Rng;
use rand::SeedableRng;
use rand::{Rng, XorShiftRng};
use test::Bencher;
fn generate_array_with_seed(n: usize, ratio: f64, seed_val: u8) -> Vec<u32> {
let mut seed: [u8; 32] = [0; 32];
seed[31] = seed_val;
let mut rng = StdRng::from_seed(seed);
let seed: &[u8; 16] = &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, seed_val];
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
(0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect()
}

View File

@@ -622,23 +622,23 @@ pub mod tests {
assert!(!postings_unopt.advance());
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::tests::*;
use crate::docset::SkipResult;
use crate::query::Intersection;
use crate::schema::IndexRecordOption;
use crate::tests;
use crate::DocSet;
use docset::SkipResult;
use query::Intersection;
use schema::IndexRecordOption;
use test::{self, Bencher};
use tests;
use DocSet;
#[bench]
fn bench_segment_postings(b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
@@ -652,8 +652,7 @@ mod bench {
#[bench]
fn bench_segment_intersection(b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
let segment_postings_a = segment_reader
@@ -683,8 +682,7 @@ mod bench {
}
fn bench_skip_next(p: f64, b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
let docs = tests::sample(segment_reader.num_docs(), p);
@@ -739,8 +737,7 @@ mod bench {
#[bench]
fn bench_iterate_segment_postings(b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
let n: u32 = test::black_box(17);

View File

@@ -12,7 +12,6 @@ use crate::tokenizer::TokenStream;
use crate::tokenizer::{Token, MAX_TOKEN_LEN};
use crate::DocId;
use crate::Result;
use fnv::FnvHashMap;
use std::collections::HashMap;
use std::io;
use std::marker::PhantomData;
@@ -128,12 +127,12 @@ impl MultiFieldPostingsWriter {
pub fn serialize(
&self,
serializer: &mut InvertedIndexSerializer,
) -> Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
) -> Result<HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>> {
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
self.term_index.iter().collect();
term_offsets.sort_unstable_by_key(|&(k, _, _)| k);
let mut unordered_term_mappings: HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>> =
let mut unordered_term_mappings: HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>> =
HashMap::new();
let field_offsets = make_field_partition(&term_offsets);
@@ -148,7 +147,7 @@ impl MultiFieldPostingsWriter {
let unordered_term_ids = term_offsets[start..stop]
.iter()
.map(|&(_, _, bucket)| bucket);
let mapping: FnvHashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
let mapping: HashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
.enumerate()
.map(|(term_ord, unord_term_id)| {
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal)

View File

@@ -141,7 +141,10 @@ impl<'a> FieldSerializer<'a> {
FieldType::Str(ref text_options) => {
if let Some(text_indexing_options) = text_options.get_indexing_options() {
let index_option = text_indexing_options.index_option();
(index_option.has_freq(), index_option.has_positions())
(
index_option.is_termfreq_enabled(),
index_option.is_position_enabled(),
)
} else {
(false, false)
}

View File

@@ -310,7 +310,6 @@ mod bench {
use super::super::MemoryArena;
use super::ExpUnrolledLinkedList;
use byteorder::{NativeEndian, WriteBytesExt};
use std::iter;
use test::Bencher;
const NUM_STACK: usize = 10_000;
@@ -336,10 +335,11 @@ mod bench {
fn bench_push_stack(bench: &mut Bencher) {
bench.iter(|| {
let mut heap = MemoryArena::new();
let mut stacks: Vec<ExpUnrolledLinkedList> =
iter::repeat_with(ExpUnrolledLinkedList::new)
.take(NUM_STACK)
.collect();
let mut stacks = Vec::with_capacity(100);
for _ in 0..NUM_STACK {
let mut stack = ExpUnrolledLinkedList::new();
stacks.push(stack);
}
for s in 0..NUM_STACK {
for i in 0u32..STACK_SIZE {
let t = s * 392017 % NUM_STACK;

View File

@@ -130,4 +130,5 @@ mod tests {
assert!(!scorer.advance());
}
}
}

View File

@@ -8,13 +8,15 @@ use crate::termdict::{TermDictionary, TermStreamer};
use crate::DocId;
use crate::TantivyError;
use crate::{Result, SkipResult};
use std::sync::Arc;
use tantivy_fst::Automaton;
/// A weight struct for Fuzzy Term and Regex Queries
pub struct AutomatonWeight<A> {
pub struct AutomatonWeight<A>
where
A: Automaton + Send + Sync + 'static,
{
field: Field,
automaton: Arc<A>,
automaton: A,
}
impl<A> AutomatonWeight<A>
@@ -22,16 +24,12 @@ where
A: Automaton + Send + Sync + 'static,
{
/// Create a new AutomationWeight
pub fn new<IntoArcA: Into<Arc<A>>>(field: Field, automaton: IntoArcA) -> AutomatonWeight<A> {
AutomatonWeight {
field,
automaton: automaton.into(),
}
pub fn new(field: Field, automaton: A) -> AutomatonWeight<A> {
AutomatonWeight { field, automaton }
}
fn automaton_stream<'a>(&'a self, term_dict: &'a TermDictionary) -> TermStreamer<'a, &'a A> {
let automaton: &A = &*self.automaton;
let term_stream_builder = term_dict.search(automaton);
let term_stream_builder = term_dict.search(&self.automaton);
term_stream_builder.into_stream()
}
}

View File

@@ -216,6 +216,7 @@ mod tests {
assert!(!docset.advance());
}
}
}
#[cfg(all(test, feature = "unstable"))]
@@ -223,12 +224,13 @@ mod bench {
use super::BitSet;
use super::BitSetDocSet;
use crate::test;
use crate::tests;
use crate::DocSet;
use test;
use tests;
use DocSet;
#[bench]
fn bench_bitset_1pct_insert(b: &mut test::Bencher) {
use tests;
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
b.iter(|| {
let mut bitset = BitSet::with_max_value(1_000_000);
@@ -240,6 +242,7 @@ mod bench {
#[bench]
fn bench_bitset_1pct_clone(b: &mut test::Bencher) {
use tests;
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
let mut bitset = BitSet::with_max_value(1_000_000);
for el in els {

View File

@@ -137,4 +137,5 @@ mod tests {
fn test_idf() {
assert_nearly_equals(idf(1, 2), 0.6931472);
}
}

View File

@@ -247,7 +247,9 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title, text]);
let query = query_parser.parse_query("Оксана Лифенко").unwrap();
let query = query_parser
.parse_query("Оксана Лифенко")
.unwrap();
let weight = query.weight(&searcher, true).unwrap();
let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
scorer.advance();

View File

@@ -175,4 +175,5 @@ mod tests {
sample_skip,
);
}
}

View File

@@ -1,4 +1,3 @@
use crate::error::TantivyError::InvalidArgument;
use crate::query::{AutomatonWeight, Query, Weight};
use crate::schema::Term;
use crate::Result;
@@ -6,16 +5,11 @@ use crate::Searcher;
use levenshtein_automata::{LevenshteinAutomatonBuilder, DFA};
use once_cell::sync::Lazy;
use std::collections::HashMap;
use std::ops::Range;
/// A range of Levenshtein distances that we will build DFAs for our terms
/// The computation is exponential, so best keep it to low single digits
const VALID_LEVENSHTEIN_DISTANCE_RANGE: Range<u8> = (0..3);
static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Lazy::new(|| {
let mut lev_builder_cache = HashMap::new();
// TODO make population lazy on a `(distance, val)` basis
for distance in VALID_LEVENSHTEIN_DISTANCE_RANGE {
for distance in 0..3 {
for &transposition in &[false, true] {
let lev_automaton_builder = LevenshteinAutomatonBuilder::new(distance, transposition);
lev_builder_cache.insert((distance, transposition), lev_automaton_builder);
@@ -28,10 +22,12 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// containing a specific term that is within
/// Levenshtein distance
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result, Term};
/// use tantivy::collector::{Count, TopDocs};
/// use tantivy::query::FuzzyTermQuery;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result, Term};
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
@@ -104,18 +100,10 @@ impl FuzzyTermQuery {
}
fn specialized_weight(&self) -> Result<AutomatonWeight<DFA>> {
// LEV_BUILDER is a HashMap, whose `get` method returns an Option
match LEV_BUILDER.get(&(self.distance, false)) {
// Unwrap the option and build the Ok(AutomatonWeight)
Some(automaton_builder) => {
let automaton = automaton_builder.build_dfa(self.term.text());
Ok(AutomatonWeight::new(self.term.field(), automaton))
}
None => Err(InvalidArgument(format!(
"Levenshtein distance of {} is not allowed. Choose a value in the {:?} range",
self.distance, VALID_LEVENSHTEIN_DISTANCE_RANGE
))),
}
let automaton = LEV_BUILDER.get(&(self.distance, false))
.unwrap() // TODO return an error
.build_dfa(self.term.text());
Ok(AutomatonWeight::new(self.term.field(), automaton))
}
}

View File

@@ -45,7 +45,7 @@ pub fn intersect_scorers(mut scorers: Vec<Box<dyn Scorer>>) -> Box<dyn Scorer> {
})
}
/// Creates a `DocSet` that iterate through the intersection of two or more `DocSet`s.
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
pub struct Intersection<TDocSet: DocSet, TOtherDocSet: DocSet = Box<dyn Scorer>> {
left: TDocSet,
right: TDocSet,

View File

@@ -5,7 +5,7 @@ use Score;
use SkipResult;
/// Creates a `DocSet` that iterate through the intersection of two `DocSet`s.
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
pub struct IntersectionTwoTerms<TDocSet> {
left: TDocSet,
right: TDocSet

View File

@@ -12,6 +12,7 @@ mod exclude;
mod explanation;
mod fuzzy_query;
mod intersection;
mod occur;
mod phrase_query;
mod query;
mod query_parser;
@@ -42,6 +43,7 @@ pub use self::exclude::Exclude;
pub use self::explanation::Explanation;
pub use self::fuzzy_query::FuzzyTermQuery;
pub use self::intersection::intersect_scorers;
pub use self::occur::Occur;
pub use self::phrase_query::PhraseQuery;
pub use self::query::Query;
pub use self::query_parser::QueryParser;
@@ -53,7 +55,6 @@ pub use self::scorer::ConstScorer;
pub use self::scorer::Scorer;
pub use self::term_query::TermQuery;
pub use self::weight::Weight;
pub use tantivy_query_grammar::Occur;
#[cfg(test)]
mod tests {

View File

@@ -1,6 +1,3 @@
use std::fmt;
use std::fmt::Write;
/// Defines whether a term in a query must be present,
/// should be present or must not be present.
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
@@ -21,38 +18,32 @@ impl Occur {
/// - `Should` => '?',
/// - `Must` => '+'
/// - `Not` => '-'
fn to_char(self) -> char {
pub fn to_char(self) -> char {
match self {
Occur::Should => '?',
Occur::Must => '+',
Occur::MustNot => '-',
}
}
}
/// Compose two occur values.
pub fn compose(left: Occur, right: Occur) -> Occur {
match left {
Occur::Should => right,
Occur::Must => {
if right == Occur::MustNot {
Occur::MustNot
} else {
Occur::Must
}
/// Compose two occur values.
pub fn compose_occur(left: Occur, right: Occur) -> Occur {
match left {
Occur::Should => right,
Occur::Must => {
if right == Occur::MustNot {
Occur::MustNot
} else {
Occur::Must
}
Occur::MustNot => {
if right == Occur::MustNot {
Occur::Must
} else {
Occur::MustNot
}
}
Occur::MustNot => {
if right == Occur::MustNot {
Occur::Must
} else {
Occur::MustNot
}
}
}
}
impl fmt::Display for Occur {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_char(self.to_char())
}
}

View File

@@ -18,6 +18,7 @@ pub enum LogicalLiteral {
All,
}
#[derive(Clone)]
pub enum LogicalAST {
Clause(Vec<(Occur, LogicalAST)>),
Leaf(Box<LogicalLiteral>),

View File

@@ -1,4 +1,6 @@
mod query_grammar;
mod query_parser;
mod user_input_ast;
pub mod logical_ast;
pub use self::query_parser::QueryParser;

View File

@@ -0,0 +1,284 @@
use super::query_grammar;
use super::user_input_ast::*;
use crate::query::occur::Occur;
use crate::query::query_parser::user_input_ast::UserInputBound;
use combine::char::*;
use combine::error::StreamError;
use combine::stream::StreamErrorFor;
use combine::*;
parser! {
fn field[I]()(I) -> String
where [I: Stream<Item = char>] {
(
letter(),
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
).map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
}
}
parser! {
fn word[I]()(I) -> String
where [I: Stream<Item = char>] {
many1(satisfy(|c: char| c.is_alphanumeric() || c=='.'))
.and_then(|s: String| {
match s.as_str() {
"OR" => Err(StreamErrorFor::<I>::unexpected_static_message("OR")),
"AND" => Err(StreamErrorFor::<I>::unexpected_static_message("AND")),
"NOT" => Err(StreamErrorFor::<I>::unexpected_static_message("NOT")),
_ => Ok(s)
}
})
}
}
parser! {
fn literal[I]()(I) -> UserInputLeaf
where [I: Stream<Item = char>]
{
let term_val = || {
let phrase = (char('"'), many1(satisfy(|c| c != '"')), char('"')).map(|(_, s, _)| s);
phrase.or(word())
};
let term_val_with_field = negative_number().or(term_val());
let term_query =
(field(), char(':'), term_val_with_field).map(|(field_name, _, phrase)| UserInputLiteral {
field_name: Some(field_name),
phrase,
});
let term_default_field = term_val().map(|phrase| UserInputLiteral {
field_name: None,
phrase,
});
attempt(term_query)
.or(term_default_field)
.map(UserInputLeaf::from)
}
}
parser! {
fn negative_number[I]()(I) -> String
where [I: Stream<Item = char>]
{
(char('-'), many1(satisfy(char::is_numeric)))
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
}
}
parser! {
fn spaces1[I]()(I) -> ()
where [I: Stream<Item = char>] {
skip_many1(space())
}
}
parser! {
fn range[I]()(I) -> UserInputLeaf
where [I: Stream<Item = char>] {
let term_val = || {
word().or(negative_number()).or(char('*').map(|_| "*".to_string()))
};
let lower_bound = {
let excl = (char('{'), term_val()).map(|(_, w)| UserInputBound::Exclusive(w));
let incl = (char('['), term_val()).map(|(_, w)| UserInputBound::Inclusive(w));
attempt(excl).or(incl)
};
let upper_bound = {
let excl = (term_val(), char('}')).map(|(w, _)| UserInputBound::Exclusive(w));
let incl = (term_val(), char(']')).map(|(w, _)| UserInputBound::Inclusive(w));
attempt(excl).or(incl)
};
(
optional((field(), char(':')).map(|x| x.0)),
lower_bound,
spaces(),
string("TO"),
spaces(),
upper_bound,
).map(|(field, lower, _, _, _, upper)| UserInputLeaf::Range {
field,
lower,
upper
})
}
}
parser! {
fn leaf[I]()(I) -> UserInputAST
where [I: Stream<Item = char>] {
(char('-'), leaf()).map(|(_, expr)| expr.unary(Occur::MustNot) )
.or((char('+'), leaf()).map(|(_, expr)| expr.unary(Occur::Must) ))
.or((char('('), parse_to_ast(), char(')')).map(|(_, expr, _)| expr))
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All) ))
.or(attempt(
(string("NOT"), spaces1(), leaf()).map(|(_, _, expr)| expr.unary(Occur::MustNot))
)
)
.or(attempt(
range().map(UserInputAST::from)
)
)
.or(literal().map(|leaf| UserInputAST::Leaf(Box::new(leaf))))
}
}
enum BinaryOperand {
Or,
And,
}
parser! {
fn binary_operand[I]()(I) -> BinaryOperand
where [I: Stream<Item = char>] {
(spaces1(),
(
string("AND").map(|_| BinaryOperand::And)
.or(string("OR").map(|_| BinaryOperand::Or))
),
spaces1()).map(|(_, op,_)| op)
}
}
enum Element {
SingleEl(UserInputAST),
NormalDisjunctive(Vec<Vec<UserInputAST>>),
}
impl Element {
pub fn into_dnf(self) -> Vec<Vec<UserInputAST>> {
match self {
Element::NormalDisjunctive(conjunctions) => conjunctions,
Element::SingleEl(el) => vec![vec![el]],
}
}
}
parser! {
pub fn parse_to_ast[I]()(I) -> UserInputAST
where [I: Stream<Item = char>]
{
(
attempt(
chainl1(
leaf().map(Element::SingleEl),
binary_operand().map(|op: BinaryOperand|
move |left: Element, right: Element| {
let mut dnf = left.into_dnf();
if let Element::SingleEl(el) = right {
match op {
BinaryOperand::And => {
if let Some(last) = dnf.last_mut() {
last.push(el);
}
}
BinaryOperand::Or => {
dnf.push(vec!(el));
}
}
} else {
unreachable!("Please report.")
}
Element::NormalDisjunctive(dnf)
}
)
)
.map(query_grammar::Element::into_dnf)
.map(|fnd| {
if fnd.len() == 1 {
UserInputAST::and(fnd.into_iter().next().unwrap()) //< safe
} else {
let conjunctions = fnd
.into_iter()
.map(UserInputAST::and)
.collect();
UserInputAST::or(conjunctions)
}
})
)
.or(
sep_by(leaf(), spaces())
.map(|subqueries: Vec<UserInputAST>| {
if subqueries.len() == 1 {
subqueries.into_iter().next().unwrap()
} else {
UserInputAST::Clause(subqueries.into_iter().collect())
}
})
)
)
}
}
#[cfg(test)]
mod test {
use super::*;
fn test_parse_query_to_ast_helper(query: &str, expected: &str) {
let query = parse_to_ast().parse(query).unwrap().0;
let query_str = format!("{:?}", query);
assert_eq!(query_str, expected);
}
fn test_is_parse_err(query: &str) {
assert!(parse_to_ast().parse(query).is_err());
}
#[test]
fn test_parse_query_to_ast_not_op() {
assert_eq!(
format!("{:?}", parse_to_ast().parse("NOT")),
"Err(UnexpectedParse)"
);
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
test_parse_query_to_ast_helper("NOT a", "-(\"a\")");
}
#[test]
fn test_parse_query_to_ast_binary_op() {
test_parse_query_to_ast_helper("a AND b", "(+(\"a\") +(\"b\"))");
test_parse_query_to_ast_helper("a OR b", "(?(\"a\") ?(\"b\"))");
test_parse_query_to_ast_helper("a OR b AND c", "(?(\"a\") ?((+(\"b\") +(\"c\"))))");
test_parse_query_to_ast_helper("a AND b AND c", "(+(\"a\") +(\"b\") +(\"c\"))");
assert_eq!(
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("a AND b aaa")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("aaa a OR b ")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("aaa ccc a OR b ")),
"Err(UnexpectedParse)"
);
}
#[test]
fn test_parse_query_to_ast() {
test_parse_query_to_ast_helper("+(a b) +d", "(+((\"a\" \"b\")) +(\"d\"))");
test_parse_query_to_ast_helper("(+a +b) d", "((+(\"a\") +(\"b\")) \"d\")");
test_parse_query_to_ast_helper("(+a)", "+(\"a\")");
test_parse_query_to_ast_helper("(+a +b)", "(+(\"a\") +(\"b\"))");
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
test_parse_query_to_ast_helper("+abc:toto", "+(abc:\"toto\")");
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+(abc:\"toto\") -(\"titi\"))");
test_parse_query_to_ast_helper("-abc:toto", "-(abc:\"toto\")");
test_parse_query_to_ast_helper("abc:a b", "(abc:\"a\" \"b\")");
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[* TO toto}", "foo:[\"*\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[1 TO *}", "foo:[\"1\" TO \"*\"}");
test_parse_query_to_ast_helper("foo:[1.1 TO *}", "foo:[\"1.1\" TO \"*\"}");
test_is_parse_err("abc + ");
}
}

View File

@@ -1,5 +1,9 @@
use super::logical_ast::*;
use super::query_grammar::parse_to_ast;
use super::user_input_ast::*;
use crate::core::Index;
use crate::query::occur::compose_occur;
use crate::query::query_parser::logical_ast::LogicalAST;
use crate::query::AllQuery;
use crate::query::BooleanQuery;
use crate::query::EmptyQuery;
@@ -12,58 +16,44 @@ use crate::schema::IndexRecordOption;
use crate::schema::{Field, Schema};
use crate::schema::{FieldType, Term};
use crate::tokenizer::TokenizerManager;
use combine::Parser;
use std::borrow::Cow;
use std::num::{ParseFloatError, ParseIntError};
use std::num::{ParseIntError, ParseFloatError};
use std::ops::Bound;
use std::str::FromStr;
use tantivy_query_grammar::{UserInputAST, UserInputBound, UserInputLeaf};
/// Possible error that may happen when parsing a query.
#[derive(Debug, PartialEq, Eq, Fail)]
#[derive(Debug, PartialEq, Eq)]
pub enum QueryParserError {
/// Error in the query syntax
#[fail(display = "Syntax Error")]
SyntaxError,
/// `FieldDoesNotExist(field_name: String)`
/// The query references a field that is not in the schema
#[fail(display = "File does not exists: '{:?}'", _0)]
FieldDoesNotExist(String),
/// The query contains a term for a `u64` or `i64`-field, but the value
/// is neither.
#[fail(display = "Expected a valid integer: '{:?}'", _0)]
ExpectedInt(ParseIntError),
/// The query contains a term for a `f64`-field, but the value
/// is not a f64.
#[fail(display = "Invalid query: Only excluding terms given")]
ExpectedFloat(ParseFloatError),
/// It is forbidden queries that are only "excluding". (e.g. -title:pop)
#[fail(display = "Invalid query: Only excluding terms given")]
AllButQueryForbidden,
/// If no default field is declared, running a query without any
/// field specified is forbbidden.
#[fail(display = "No default field declared and no field specified in query")]
NoDefaultFieldDeclared,
/// The field searched for is not declared
/// as indexed in the schema.
#[fail(display = "The field '{:?}' is not declared as indexed", _0)]
FieldNotIndexed(String),
/// A phrase query was requested for a field that does not
/// have any positions indexed.
#[fail(display = "The field '{:?}' does not have positions indexed", _0)]
FieldDoesNotHavePositionsIndexed(String),
/// The tokenizer for the given field is unknown
/// The two argument strings are the name of the field, the name of the tokenizer
#[fail(
display = "The tokenizer '{:?}' for the field '{:?}' is unknown",
_0, _1
)]
UnknownTokenizer(String, String),
/// The query contains a range query with a phrase as one of the bounds.
/// Only terms can be used as bounds.
#[fail(display = "A range query cannot have a phrase as one of the bounds")]
RangeMustNotHavePhrase,
/// The format for the date field is not RFC 3339 compliant.
#[fail(display = "The date field has an invalid format")]
DateFormatError(chrono::ParseError),
}
@@ -218,8 +208,9 @@ impl QueryParser {
/// Parse the user query into an AST.
fn parse_query_to_logical_ast(&self, query: &str) -> Result<LogicalAST, QueryParserError> {
let user_input_ast =
tantivy_query_grammar::parse_query(query).map_err(|_| QueryParserError::SyntaxError)?;
let (user_input_ast, _remaining) = parse_to_ast()
.parse(query)
.map_err(|_| QueryParserError::SyntaxError)?;
self.compute_logical_ast(user_input_ast)
}
@@ -364,7 +355,6 @@ impl QueryParser {
match *bound {
UserInputBound::Inclusive(_) => Ok(Bound::Included(term)),
UserInputBound::Exclusive(_) => Ok(Bound::Excluded(term)),
UserInputBound::Unbounded => Ok(Bound::Unbounded),
}
}
@@ -394,7 +384,7 @@ impl QueryParser {
let mut logical_sub_queries: Vec<(Occur, LogicalAST)> = Vec::new();
for sub_query in sub_queries {
let (occur, sub_ast) = self.compute_logical_ast_with_occur(sub_query)?;
let new_occur = Occur::compose(default_occur, occur);
let new_occur = compose_occur(default_occur, occur);
logical_sub_queries.push((new_occur, sub_ast));
}
Ok((Occur::Should, LogicalAST::Clause(logical_sub_queries)))
@@ -402,7 +392,7 @@ impl QueryParser {
UserInputAST::Unary(left_occur, subquery) => {
let (right_occur, logical_sub_queries) =
self.compute_logical_ast_with_occur(*subquery)?;
Ok((Occur::compose(left_occur, right_occur), logical_sub_queries))
Ok((compose_occur(left_occur, right_occur), logical_sub_queries))
}
UserInputAST::Leaf(leaf) => {
let result_ast = self.compute_logical_ast_from_leaf(*leaf)?;
@@ -624,7 +614,7 @@ mod test {
pub fn test_parse_query_untokenized() {
test_parse_query_to_logical_ast_helper(
"nottokenized:\"wordone wordtwo\"",
"Term(field=7,bytes=[119, 111, 114, 100, 111, 110, \
"Term([0, 0, 0, 7, 119, 111, 114, 100, 111, 110, \
101, 32, 119, 111, 114, 100, 116, 119, 111])",
false,
);
@@ -668,7 +658,7 @@ mod test {
.is_ok());
test_parse_query_to_logical_ast_helper(
"unsigned:2324",
"Term(field=3,bytes=[0, 0, 0, 0, 0, 0, 9, 20])",
"Term([0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 9, 20])",
false,
);
@@ -686,22 +676,22 @@ mod test {
}
#[test]
pub fn test_parse_query_to_ast_single_term() {
pub fn test_parse_query_to_ast_disjunction() {
test_parse_query_to_logical_ast_helper(
"title:toto",
"Term(field=0,bytes=[116, 111, 116, 111])",
"Term([0, 0, 0, 0, 116, 111, 116, 111])",
false,
);
test_parse_query_to_logical_ast_helper(
"+title:toto",
"Term(field=0,bytes=[116, 111, 116, 111])",
"Term([0, 0, 0, 0, 116, 111, 116, 111])",
false,
);
test_parse_query_to_logical_ast_helper(
"+title:toto -titi",
"(+Term(field=0,bytes=[116, 111, 116, 111]) \
-(Term(field=0,bytes=[116, 105, 116, 105]) \
Term(field=1,bytes=[116, 105, 116, 105])))",
"(+Term([0, 0, 0, 0, 116, 111, 116, 111]) \
-(Term([0, 0, 0, 0, 116, 105, 116, 105]) \
Term([0, 0, 0, 1, 116, 105, 116, 105])))",
false,
);
assert_eq!(
@@ -710,67 +700,49 @@ mod test {
.unwrap(),
QueryParserError::AllButQueryForbidden
);
}
#[test]
pub fn test_parse_query_to_ast_two_terms() {
test_parse_query_to_logical_ast_helper(
"title:a b",
"(Term(field=0,bytes=[97]) (Term(field=0,bytes=[98]) Term(field=1,bytes=[98])))",
"(Term([0, 0, 0, 0, 97]) (Term([0, 0, 0, 0, 98]) \
Term([0, 0, 0, 1, 98])))",
false,
);
test_parse_query_to_logical_ast_helper(
"title:\"a b\"",
"\"[(0, Term(field=0,bytes=[97])), \
(1, Term(field=0,bytes=[98]))]\"",
"\"[(0, Term([0, 0, 0, 0, 97])), \
(1, Term([0, 0, 0, 0, 98]))]\"",
false,
);
}
#[test]
pub fn test_parse_query_to_ast_ranges() {
test_parse_query_to_logical_ast_helper(
"title:[a TO b]",
"(Included(Term(field=0,bytes=[97])) TO Included(Term(field=0,bytes=[98])))",
"(Included(Term([0, 0, 0, 0, 97])) TO \
Included(Term([0, 0, 0, 0, 98])))",
false,
);
test_parse_query_to_logical_ast_helper(
"[a TO b]",
"((Included(Term(field=0,bytes=[97])) TO \
Included(Term(field=0,bytes=[98]))) \
(Included(Term(field=1,bytes=[97])) TO \
Included(Term(field=1,bytes=[98]))))",
"((Included(Term([0, 0, 0, 0, 97])) TO \
Included(Term([0, 0, 0, 0, 98]))) \
(Included(Term([0, 0, 0, 1, 97])) TO \
Included(Term([0, 0, 0, 1, 98]))))",
false,
);
test_parse_query_to_logical_ast_helper(
"title:{titi TO toto}",
"(Excluded(Term(field=0,bytes=[116, 105, 116, 105])) TO \
Excluded(Term(field=0,bytes=[116, 111, 116, 111])))",
"(Excluded(Term([0, 0, 0, 0, 116, 105, 116, 105])) TO \
Excluded(Term([0, 0, 0, 0, 116, 111, 116, 111])))",
false,
);
test_parse_query_to_logical_ast_helper(
"title:{* TO toto}",
"(Unbounded TO Excluded(Term(field=0,bytes=[116, 111, 116, 111])))",
"(Unbounded TO \
Excluded(Term([0, 0, 0, 0, 116, 111, 116, 111])))",
false,
);
test_parse_query_to_logical_ast_helper(
"title:{titi TO *}",
"(Excluded(Term(field=0,bytes=[116, 105, 116, 105])) TO Unbounded)",
"(Excluded(Term([0, 0, 0, 0, 116, 105, 116, 105])) TO Unbounded)",
false,
);
test_parse_query_to_logical_ast_helper(
"signed:{-5 TO 3}",
"(Excluded(Term(field=2,bytes=[127, 255, 255, 255, 255, 255, 255, 251])) TO \
Excluded(Term(field=2,bytes=[128, 0, 0, 0, 0, 0, 0, 3])))",
false,
);
test_parse_query_to_logical_ast_helper(
"float:{-1.5 TO 1.5}",
"(Excluded(Term(field=10,bytes=[64, 7, 255, 255, 255, 255, 255, 255])) TO \
Excluded(Term(field=10,bytes=[191, 248, 0, 0, 0, 0, 0, 0])))",
false,
);
test_parse_query_to_logical_ast_helper("*", "*", false);
}
@@ -872,19 +844,19 @@ mod test {
pub fn test_parse_query_to_ast_conjunction() {
test_parse_query_to_logical_ast_helper(
"title:toto",
"Term(field=0,bytes=[116, 111, 116, 111])",
"Term([0, 0, 0, 0, 116, 111, 116, 111])",
true,
);
test_parse_query_to_logical_ast_helper(
"+title:toto",
"Term(field=0,bytes=[116, 111, 116, 111])",
"Term([0, 0, 0, 0, 116, 111, 116, 111])",
true,
);
test_parse_query_to_logical_ast_helper(
"+title:toto -titi",
"(+Term(field=0,bytes=[116, 111, 116, 111]) \
-(Term(field=0,bytes=[116, 105, 116, 105]) \
Term(field=1,bytes=[116, 105, 116, 105])))",
"(+Term([0, 0, 0, 0, 116, 111, 116, 111]) \
-(Term([0, 0, 0, 0, 116, 105, 116, 105]) \
Term([0, 0, 0, 1, 116, 105, 116, 105])))",
true,
);
assert_eq!(
@@ -895,25 +867,16 @@ mod test {
);
test_parse_query_to_logical_ast_helper(
"title:a b",
"(+Term(field=0,bytes=[97]) \
+(Term(field=0,bytes=[98]) \
Term(field=1,bytes=[98])))",
"(+Term([0, 0, 0, 0, 97]) \
+(Term([0, 0, 0, 0, 98]) \
Term([0, 0, 0, 1, 98])))",
true,
);
test_parse_query_to_logical_ast_helper(
"title:\"a b\"",
"\"[(0, Term(field=0,bytes=[97])), \
(1, Term(field=0,bytes=[98]))]\"",
"\"[(0, Term([0, 0, 0, 0, 97])), \
(1, Term([0, 0, 0, 0, 98]))]\"",
true,
);
}
#[test]
pub fn test_query_parser_hyphen() {
test_parse_query_to_logical_ast_helper(
"title:www-form-encoded",
"\"[(0, Term(field=0,bytes=[119, 119, 119])), (1, Term(field=0,bytes=[102, 111, 114, 109])), (2, Term(field=0,bytes=[101, 110, 99, 111, 100, 101, 100]))]\"",
false
);
}
}

View File

@@ -0,0 +1,44 @@
use std::sync::Arc;
use stemmer;
pub struct StemmerTokenStream<TailTokenStream>
where TailTokenStream: TokenStream {
tail: TailTokenStream,
stemmer: Arc<stemmer::Stemmer>,
}
impl<TailTokenStream> TokenStream for StemmerTokenStream<TailTokenStream>
where TailTokenStream: TokenStream {
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool {
if self.tail.advance() {
// self.tail.token_mut().term.make_ascii_lowercase();
let new_str = self.stemmer.stem_str(&self.token().term);
true
}
else {
false
}
}
}
impl<TailTokenStream> StemmerTokenStream<TailTokenStream>
where TailTokenStream: TokenStream {
fn wrap(stemmer: Arc<stemmer::Stemmer>, tail: TailTokenStream) -> StemmerTokenStream<TailTokenStream> {
StemmerTokenStream {
tail,
stemmer,
}
}
}

View File

@@ -1,9 +1,8 @@
use std::fmt;
use std::fmt::{Debug, Formatter};
use crate::Occur;
use crate::query::Occur;
#[derive(PartialEq)]
pub enum UserInputLeaf {
Literal(UserInputLiteral),
All,
@@ -36,7 +35,6 @@ impl Debug for UserInputLeaf {
}
}
#[derive(PartialEq)]
pub struct UserInputLiteral {
pub field_name: Option<String>,
pub phrase: String,
@@ -51,11 +49,9 @@ impl fmt::Debug for UserInputLiteral {
}
}
#[derive(PartialEq)]
pub enum UserInputBound {
Inclusive(String),
Exclusive(String),
Unbounded,
}
impl UserInputBound {
@@ -63,7 +59,6 @@ impl UserInputBound {
match *self {
UserInputBound::Inclusive(ref word) => write!(formatter, "[\"{}\"", word),
UserInputBound::Exclusive(ref word) => write!(formatter, "{{\"{}\"", word),
UserInputBound::Unbounded => write!(formatter, "{{\"*\""),
}
}
@@ -71,7 +66,6 @@ impl UserInputBound {
match *self {
UserInputBound::Inclusive(ref word) => write!(formatter, "\"{}\"]", word),
UserInputBound::Exclusive(ref word) => write!(formatter, "\"{}\"}}", word),
UserInputBound::Unbounded => write!(formatter, "\"*\"}}"),
}
}
@@ -79,7 +73,6 @@ impl UserInputBound {
match *self {
UserInputBound::Inclusive(ref contents) => contents,
UserInputBound::Exclusive(ref contents) => contents,
UserInputBound::Unbounded => &"*",
}
}
}
@@ -87,6 +80,9 @@ impl UserInputBound {
pub enum UserInputAST {
Clause(Vec<UserInputAST>),
Unary(Occur, Box<UserInputAST>),
// Not(Box<UserInputAST>),
// Should(Box<UserInputAST>),
// Must(Box<UserInputAST>),
Leaf(Box<UserInputLeaf>),
}
@@ -96,7 +92,7 @@ impl UserInputAST {
}
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
assert_ne!(occur, Occur::MustNot);
assert!(occur != Occur::MustNot);
assert!(!asts.is_empty());
if asts.len() == 1 {
asts.into_iter().next().unwrap() //< safe
@@ -109,10 +105,6 @@ impl UserInputAST {
}
}
pub fn empty_query() -> UserInputAST {
UserInputAST::Clause(Vec::default())
}
pub fn and(asts: Vec<UserInputAST>) -> UserInputAST {
UserInputAST::compose(Occur::Must, asts)
}
@@ -122,6 +114,42 @@ impl UserInputAST {
}
}
/*
impl UserInputAST {
fn compose_occur(self, occur: Occur) -> UserInputAST {
match self {
UserInputAST::Not(other) => {
let new_occur = compose_occur(Occur::MustNot, occur);
other.simplify()
}
_ => {
self
}
}
}
pub fn simplify(self) -> UserInputAST {
match self {
UserInputAST::Clause(els) => {
if els.len() == 1 {
return els.into_iter().next().unwrap();
} else {
return self;
}
}
UserInputAST::Not(els) => {
if els.len() == 1 {
return els.into_iter().next().unwrap();
} else {
return self;
}
}
}
}
}
*/
impl From<UserInputLiteral> for UserInputLeaf {
fn from(literal: UserInputLiteral) -> UserInputLeaf {
UserInputLeaf::Literal(literal)
@@ -151,7 +179,7 @@ impl fmt::Debug for UserInputAST {
Ok(())
}
UserInputAST::Unary(ref occur, ref subquery) => {
write!(formatter, "{}({:?})", occur, subquery)
write!(formatter, "{}({:?})", occur.to_char(), subquery)
}
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
}

View File

@@ -38,10 +38,14 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// # Example
///
/// ```rust
/// # use tantivy::collector::Count;
/// # use tantivy::query::RangeQuery;
///
/// # #[macro_use]
/// # extern crate tantivy;
/// # use tantivy::Index;
/// # use tantivy::schema::{Schema, INDEXED};
/// # use tantivy::{doc, Index, Result};
/// # use tantivy::collector::Count;
/// # use tantivy::Result;
/// # use tantivy::query::RangeQuery;
/// #
/// # fn run() -> Result<()> {
/// # let mut schema_builder = Schema::builder();
@@ -334,33 +338,39 @@ mod tests {
use crate::collector::Count;
use crate::schema::{Document, Field, Schema, INDEXED};
use crate::Index;
use crate::Result;
use std::collections::Bound;
#[test]
fn test_range_query_simple() {
let mut schema_builder = Schema::builder();
let year_field = schema_builder.add_u64_field("year", INDEXED);
let schema = schema_builder.build();
fn run() -> Result<()> {
let mut schema_builder = Schema::builder();
let year_field = schema_builder.add_u64_field("year", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
for year in 1950u64..2017u64 {
let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
for _ in 0..num_docs_within_year {
index_writer.add_document(doc!(year_field => year));
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
for year in 1950u64..2017u64 {
let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
for _ in 0..num_docs_within_year {
index_writer.add_document(doc!(year_field => year));
}
}
index_writer.commit().unwrap();
}
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64);
// ... or `1960..=1969` if inclusive range is enabled.
let count = searcher.search(&docs_in_the_sixties, &Count)?;
assert_eq!(count, 2285);
Ok(())
}
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64);
// ... or `1960..=1969` if inclusive range is enabled.
let count = searcher.search(&docs_in_the_sixties, &Count).unwrap();
assert_eq!(count, 2285);
run().unwrap();
}
#[test]
@@ -450,10 +460,7 @@ mod tests {
let count_multiples =
|range_query: RangeQuery| searcher.search(&range_query, &Count).unwrap();
assert_eq!(
count_multiples(RangeQuery::new_f64(float_field, 10.0..11.0)),
9
);
assert_eq!(count_multiples(RangeQuery::new_f64(float_field, 10.0..11.0)), 9);
assert_eq!(
count_multiples(RangeQuery::new_f64_bounds(
float_field,
@@ -479,4 +486,5 @@ mod tests {
91
);
}
}

View File

@@ -4,18 +4,22 @@ use crate::schema::Field;
use crate::Result;
use crate::Searcher;
use std::clone::Clone;
use std::sync::Arc;
use tantivy_fst::Regex;
/// A Regex Query matches all of the documents
// A Regex Query matches all of the documents
/// containing a specific term that matches
/// a regex pattern.
/// a regex pattern
/// A Fuzzy Query matches all of the documents
/// containing a specific term that is within
/// Levenshtein distance
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result, Term};
/// use tantivy::collector::Count;
/// use tantivy::query::RegexQuery;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result, Term};
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
@@ -44,7 +48,7 @@ use tantivy_fst::Regex;
/// let searcher = reader.searcher();
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
/// let query = RegexQuery::new("d[ai]{2}ry".to_string(), title);
/// let count = searcher.search(&query, &Count)?;
/// assert_eq!(count, 3);
/// Ok(())
@@ -52,34 +56,30 @@ use tantivy_fst::Regex;
/// ```
#[derive(Debug, Clone)]
pub struct RegexQuery {
regex: Arc<Regex>,
regex_pattern: String,
field: Field,
}
impl RegexQuery {
/// Creates a new RegexQuery from a given pattern
pub fn from_pattern(regex_pattern: &str, field: Field) -> Result<Self> {
let regex = Regex::new(&regex_pattern)
.map_err(|_| TantivyError::InvalidArgument(regex_pattern.to_string()))?;
Ok(RegexQuery::from_regex(regex, field))
}
/// Creates a new RegexQuery from a fully built Regex
pub fn from_regex<T: Into<Arc<Regex>>>(regex: T, field: Field) -> Self {
/// Creates a new Fuzzy Query
pub fn new(regex_pattern: String, field: Field) -> RegexQuery {
RegexQuery {
regex: regex.into(),
regex_pattern,
field,
}
}
fn specialized_weight(&self) -> AutomatonWeight<Regex> {
AutomatonWeight::new(self.field, self.regex.clone())
fn specialized_weight(&self) -> Result<AutomatonWeight<Regex>> {
let automaton = Regex::new(&self.regex_pattern)
.map_err(|_| TantivyError::InvalidArgument(self.regex_pattern.clone()))?;
Ok(AutomatonWeight::new(self.field, automaton))
}
}
impl Query for RegexQuery {
fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<dyn Weight>> {
Ok(Box::new(self.specialized_weight()))
Ok(Box::new(self.specialized_weight()?))
}
}
@@ -87,14 +87,13 @@ impl Query for RegexQuery {
mod test {
use super::RegexQuery;
use crate::collector::TopDocs;
use crate::schema::Schema;
use crate::schema::TEXT;
use crate::schema::{Field, Schema};
use crate::tests::assert_nearly_equals;
use crate::{Index, IndexReader};
use std::sync::Arc;
use tantivy_fst::Regex;
use crate::Index;
fn build_test_index() -> (IndexReader, Field) {
#[test]
pub fn test_regex_query() {
let mut schema_builder = Schema::builder();
let country_field = schema_builder.add_text_field("country", TEXT);
let schema = schema_builder.build();
@@ -110,65 +109,20 @@ mod test {
index_writer.commit().unwrap();
}
let reader = index.reader().unwrap();
(reader, country_field)
}
fn verify_regex_query(
query_matching_one: RegexQuery,
query_matching_zero: RegexQuery,
reader: IndexReader,
) {
let searcher = reader.searcher();
{
let regex_query = RegexQuery::new("jap[ao]n".to_string(), country_field);
let scored_docs = searcher
.search(&query_matching_one, &TopDocs::with_limit(2))
.search(&regex_query, &TopDocs::with_limit(2))
.unwrap();
assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
let (score, _) = scored_docs[0];
assert_nearly_equals(1f32, score);
}
let regex_query = RegexQuery::new("jap[A-Z]n".to_string(), country_field);
let top_docs = searcher
.search(&query_matching_zero, &TopDocs::with_limit(2))
.search(&regex_query, &TopDocs::with_limit(2))
.unwrap();
assert!(top_docs.is_empty(), "Expected ZERO document");
}
#[test]
pub fn test_regex_query() {
let (reader, field) = build_test_index();
let matching_one = RegexQuery::from_pattern("jap[ao]n", field).unwrap();
let matching_zero = RegexQuery::from_pattern("jap[A-Z]n", field).unwrap();
verify_regex_query(matching_one, matching_zero, reader);
}
#[test]
pub fn test_construct_from_regex() {
let (reader, field) = build_test_index();
let matching_one = RegexQuery::from_regex(Regex::new("jap[ao]n").unwrap(), field);
let matching_zero = RegexQuery::from_regex(Regex::new("jap[A-Z]n").unwrap(), field);
verify_regex_query(matching_one, matching_zero, reader);
}
#[test]
pub fn test_construct_from_reused_regex() {
let r1 = Arc::new(Regex::new("jap[ao]n").unwrap());
let r2 = Arc::new(Regex::new("jap[A-Z]n").unwrap());
let (reader, field) = build_test_index();
let matching_one = RegexQuery::from_regex(r1.clone(), field);
let matching_zero = RegexQuery::from_regex(r2.clone(), field);
verify_regex_query(matching_one, matching_zero, reader.clone());
let matching_one = RegexQuery::from_regex(r1.clone(), field);
let matching_zero = RegexQuery::from_regex(r2.clone(), field);
verify_regex_query(matching_one, matching_zero, reader.clone());
}
}

View File

@@ -190,4 +190,5 @@ mod tests {
skip_docs,
);
}
}

View File

@@ -12,7 +12,7 @@ mod tests {
use crate::collector::TopDocs;
use crate::docset::DocSet;
use crate::query::{Query, QueryParser, Scorer, TermQuery};
use crate::schema::{Field, IndexRecordOption, Schema, STRING, TEXT};
use crate::schema::{IndexRecordOption, Schema, STRING, TEXT};
use crate::tests::assert_nearly_equals;
use crate::Index;
use crate::Term;
@@ -114,16 +114,4 @@ mod tests {
let reader = index.reader().unwrap();
assert_eq!(term_query.count(&*reader.searcher()).unwrap(), 1);
}
#[test]
fn test_term_query_debug() {
let term_query = TermQuery::new(
Term::from_field_text(Field(1), "hello"),
IndexRecordOption::WithFreqs,
);
assert_eq!(
format!("{:?}", term_query),
"TermQuery(Term(field=1,bytes=[104, 101, 108, 108, 111]))"
);
}
}

View File

@@ -7,7 +7,6 @@ use crate::Result;
use crate::Searcher;
use crate::Term;
use std::collections::BTreeSet;
use std::fmt;
/// A Term query matches all of the documents
/// containing a specific term.
@@ -20,10 +19,12 @@ use std::fmt;
/// * `field norm` - number of tokens in the field.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
/// use tantivy::{Index, Result, Term};
/// use tantivy::collector::{Count, TopDocs};
/// use tantivy::query::TermQuery;
/// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
/// use tantivy::{doc, Index, Result, Term};
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
@@ -60,18 +61,12 @@ use std::fmt;
/// Ok(())
/// }
/// ```
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct TermQuery {
term: Term,
index_record_option: IndexRecordOption,
}
impl fmt::Debug for TermQuery {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TermQuery({:?})", self.term)
}
}
impl TermQuery {
/// Creates a new term query.
pub fn new(term: Term, segment_postings_options: IndexRecordOption) -> TermQuery {

View File

@@ -28,7 +28,7 @@ where
}
}
/// Creates a `DocSet` that iterate through the union of two or more `DocSet`s.
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
pub struct Union<TScorer, TScoreCombiner = DoNothingCombiner> {
docsets: Vec<TScorer>,
bitsets: Box<[TinySet; HORIZON_NUM_TINYBITSETS]>,
@@ -409,17 +409,20 @@ mod tests {
vec![1, 2, 3, 7, 8, 9, 99, 100, 101, 500, 20000],
);
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use crate::query::score_combiner::DoNothingCombiner;
use crate::query::{ConstScorer, Union, VecDocSet};
use crate::tests;
use crate::DocId;
use crate::DocSet;
use query::score_combiner::DoNothingCombiner;
use query::ConstScorer;
use query::Union;
use query::VecDocSet;
use test::Bencher;
use tests;
use DocId;
use DocSet;
#[bench]
fn bench_union_3_high(bench: &mut Bencher) {

View File

@@ -82,4 +82,5 @@ pub mod tests {
}
assert_eq!(postings.fill_buffer(&mut buffer[..]), 9);
}
}

View File

@@ -1,7 +1,6 @@
mod pool;
pub use self::pool::LeasedItem;
use self::pool::Pool;
use self::pool::{LeasedItem, Pool};
use crate::core::Segment;
use crate::directory::Directory;
use crate::directory::WatchHandle;

View File

@@ -123,10 +123,6 @@ impl<T> Pool<T> {
}
}
/// A LeasedItem holds an object borrowed from a Pool.
///
/// Upon drop, the object is automatically returned
/// into the pool.
pub struct LeasedItem<T> {
gen_item: Option<GenerationItem<T>>,
recycle_queue: Arc<Queue<GenerationItem<T>>>,

View File

@@ -178,4 +178,5 @@ mod tests {
doc.add_text(text_field, "My title");
assert_eq!(doc.field_values().len(), 1);
}
}

View File

@@ -120,7 +120,9 @@ impl Facet {
/// Extract path from the `Facet`.
pub fn to_path(&self) -> Vec<&str> {
self.encoded_str().split(|c| c == FACET_SEP_CHAR).collect()
self.encoded_str()
.split(|c| c == FACET_SEP_CHAR)
.collect()
}
}

View File

@@ -108,9 +108,7 @@ impl FieldEntry {
/// Returns true iff the field is a int (signed or unsigned) fast field
pub fn is_int_fast(&self) -> bool {
match self.field_type {
FieldType::U64(ref options)
| FieldType::I64(ref options)
| FieldType::F64(ref options) => options.is_fast(),
FieldType::U64(ref options) | FieldType::I64(ref options) | FieldType::F64(ref options) => options.is_fast(),
_ => false,
}
}

View File

@@ -10,7 +10,7 @@ use serde_json::Value as JsonValue;
/// Possible error that may occur while parsing a field value
/// At this point the JSON is known to be valid.
#[derive(Debug, PartialEq)]
#[derive(Debug)]
pub enum ValueParsingError {
/// Encountered a numerical value that overflows or underflow its integer type.
OverflowError(String),
@@ -83,9 +83,9 @@ impl FieldType {
pub fn is_indexed(&self) -> bool {
match *self {
FieldType::Str(ref text_options) => text_options.get_indexing_options().is_some(),
FieldType::U64(ref int_options)
| FieldType::I64(ref int_options)
| FieldType::F64(ref int_options) => int_options.is_indexed(),
FieldType::U64(ref int_options) | FieldType::I64(ref int_options) | FieldType::F64(ref int_options) => {
int_options.is_indexed()
}
FieldType::Date(ref date_options) => date_options.is_indexed(),
FieldType::HierarchicalFacet => true,
FieldType::Bytes => false,
@@ -125,12 +125,9 @@ impl FieldType {
match *json {
JsonValue::String(ref field_text) => match *self {
FieldType::Str(_) => Ok(Value::Str(field_text.clone())),
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {
Err(ValueParsingError::TypeError(format!(
"Expected an integer, got {:?}",
json
)))
}
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => Err(
ValueParsingError::TypeError(format!("Expected an integer, got {:?}", json)),
),
FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))),
FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| {
ValueParsingError::InvalidBase64(format!(
@@ -155,7 +152,7 @@ impl FieldType {
let msg = format!("Expected a u64 int, got {:?}", json);
Err(ValueParsingError::OverflowError(msg))
}
}
},
FieldType::F64(_) => {
if let Some(field_val_f64) = field_val_num.as_f64() {
Ok(Value::F64(field_val_f64))

View File

@@ -29,6 +29,22 @@ pub enum IndexRecordOption {
}
impl IndexRecordOption {
/// Returns true iff the term frequency will be encoded.
pub fn is_termfreq_enabled(self) -> bool {
match self {
IndexRecordOption::WithFreqsAndPositions | IndexRecordOption::WithFreqs => true,
_ => false,
}
}
/// Returns true iff the term positions within the document are stored as well.
pub fn is_position_enabled(self) -> bool {
match self {
IndexRecordOption::WithFreqsAndPositions => true,
_ => false,
}
}
/// Returns true iff this option includes encoding
/// term frequencies.
pub fn has_freq(self) -> bool {

View File

@@ -174,4 +174,5 @@ mod tests {
assert!(!is_valid_field_name("シャボン玉"));
assert!(is_valid_field_name("my_text_field"));
}
}

View File

@@ -246,25 +246,6 @@ impl Schema {
self.0.fields_map.get(field_name).cloned()
}
/// Create a named document off the doc.
pub fn convert_named_doc(
&self,
named_doc: NamedFieldDocument,
) -> Result<Document, DocParsingError> {
let mut document = Document::new();
for (field_name, values) in named_doc.0 {
if let Some(field) = self.get_field(&field_name) {
for value in values {
let field_value = FieldValue::new(field, value);
document.add(field_value);
}
} else {
return Err(DocParsingError::NoSuchFieldInSchema(field_name));
}
}
Ok(document)
}
/// Create a named document off the doc.
pub fn to_named_doc(&self, doc: &Document) -> NamedFieldDocument {
let mut field_map = BTreeMap::new();
@@ -301,26 +282,28 @@ impl Schema {
let mut doc = Document::default();
for (field_name, json_value) in json_obj.iter() {
let field = self
.get_field(field_name)
.ok_or_else(|| DocParsingError::NoSuchFieldInSchema(field_name.clone()))?;
let field_entry = self.get_field_entry(field);
let field_type = field_entry.field_type();
match *json_value {
JsonValue::Array(ref json_items) => {
for json_item in json_items {
let value = field_type
.value_from_json(json_item)
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
doc.add(FieldValue::new(field, value));
match self.get_field(field_name) {
Some(field) => {
let field_entry = self.get_field_entry(field);
let field_type = field_entry.field_type();
match *json_value {
JsonValue::Array(ref json_items) => {
for json_item in json_items {
let value = field_type.value_from_json(json_item).map_err(|e| {
DocParsingError::ValueError(field_name.clone(), e)
})?;
doc.add(FieldValue::new(field, value));
}
}
_ => {
let value = field_type
.value_from_json(json_value)
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
doc.add(FieldValue::new(field, value));
}
}
}
_ => {
let value = field_type
.value_from_json(json_value)
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
doc.add(FieldValue::new(field, value));
}
None => return Err(DocParsingError::NoSuchFieldInSchema(field_name.clone())),
}
}
Ok(doc)
@@ -377,19 +360,13 @@ impl<'de> Deserialize<'de> for Schema {
/// Error that may happen when deserializing
/// a document from JSON.
#[derive(Debug, Fail, PartialEq)]
#[derive(Debug)]
pub enum DocParsingError {
/// The payload given is not valid JSON.
#[fail(display = "The provided string is not valid JSON")]
NotJSON(String),
/// One of the value node could not be parsed.
#[fail(display = "The field '{:?}' could not be parsed: {:?}", _0, _1)]
ValueError(String, ValueParsingError),
/// The json-document contains a field that is not declared in the schema.
#[fail(
display = "The document contains a field that is not declared in the schema: {:?}",
_0
)]
NoSuchFieldInSchema(String),
}
@@ -401,7 +378,6 @@ mod tests {
use crate::schema::*;
use matches::{assert_matches, matches};
use serde_json;
use std::collections::BTreeMap;
#[test]
pub fn is_indexed_test() {
@@ -516,54 +492,6 @@ mod tests {
assert_eq!(doc, doc_serdeser);
}
#[test]
pub fn test_document_from_nameddoc() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT);
let val = schema_builder.add_i64_field("val", INDEXED);
let schema = schema_builder.build();
let mut named_doc_map = BTreeMap::default();
named_doc_map.insert(
"title".to_string(),
vec![Value::from("title1"), Value::from("title2")],
);
named_doc_map.insert(
"val".to_string(),
vec![Value::from(14u64), Value::from(-1i64)],
);
let doc = schema
.convert_named_doc(NamedFieldDocument(named_doc_map))
.unwrap();
assert_eq!(
doc.get_all(title),
vec![
&Value::from("title1".to_string()),
&Value::from("title2".to_string())
]
);
assert_eq!(
doc.get_all(val),
vec![&Value::from(14u64), &Value::from(-1i64)]
);
}
#[test]
pub fn test_document_from_nameddoc_error() {
let schema = Schema::builder().build();
let mut named_doc_map = BTreeMap::default();
named_doc_map.insert(
"title".to_string(),
vec![Value::from("title1"), Value::from("title2")],
);
let err = schema
.convert_named_doc(NamedFieldDocument(named_doc_map))
.unwrap_err();
assert_eq!(
err,
DocParsingError::NoSuchFieldInSchema("title".to_string())
);
}
#[test]
pub fn test_parse_document() {
let mut schema_builder = Schema::builder();

View File

@@ -22,10 +22,10 @@ impl Term {
/// Builds a term given a field, and a i64-value
///
/// Assuming the term has a field id of 1, and a i64 value of 3234,
/// the Term will have 12 bytes.
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 8 following bytes are encoding the u64 value.
/// The 4 following bytes are encoding the u64 value.
pub fn from_field_i64(field: Field, val: i64) -> Term {
let val_u64: u64 = common::i64_to_u64(val);
Term::from_field_u64(field, val_u64)
@@ -33,11 +33,11 @@ impl Term {
/// Builds a term given a field, and a f64-value
///
/// Assuming the term has a field id of 1, and a f64 value of 1.5,
/// the Term will have 12 bytes.
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 8 bytes. <= this is wrong
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 8 following bytes are encoding the f64 as a u64 value.
/// The 4 following bytes are encoding the u64 value.
pub fn from_field_f64(field: Field, val: f64) -> Term {
let val_u64: u64 = common::f64_to_u64(val);
Term::from_field_u64(field, val_u64)
@@ -46,10 +46,10 @@ impl Term {
/// Builds a term given a field, and a DateTime value
///
/// Assuming the term has a field id of 1, and a timestamp i64 value of 3234,
/// the Term will have 12 bytes.
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 8 following bytes are encoding the DateTime as i64 timestamp value.
/// The 4 following bytes are encoding the DateTime as i64 timestamp value.
pub fn from_field_date(field: Field, val: &DateTime) -> Term {
let val_timestamp = val.timestamp();
Term::from_field_i64(field, val_timestamp)
@@ -82,10 +82,10 @@ impl Term {
/// Builds a term given a field, and a u64-value
///
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 12 bytes.
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 8 following bytes are encoding the u64 value.
/// The 4 following bytes are encoding the u64 value.
pub fn from_field_u64(field: Field, val: u64) -> Term {
let mut term = Term(vec![0u8; INT_TERM_LEN]);
term.set_field(field);
@@ -182,7 +182,7 @@ where
///
/// # Panics
/// ... or returns an invalid value
/// if the term is not a `f64` field.
/// if the term is not a `i64` field.
pub fn get_f64(&self) -> f64 {
common::u64_to_f64(BigEndian::read_u64(&self.0.as_ref()[4..]))
}
@@ -224,12 +224,7 @@ where
impl fmt::Debug for Term {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"Term(field={},bytes={:?})",
self.field().0,
self.value_bytes()
)
write!(f, "Term({:?})", &self.0[..])
}
}

View File

@@ -2,7 +2,7 @@ use crate::schema::Facet;
use crate::DateTime;
use serde::de::Visitor;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::{cmp::Ordering, fmt};
use std::{fmt, cmp::Ordering};
/// Value represents the value of a any field.
/// It is an enum over all over all of the possible field type.
@@ -27,7 +27,7 @@ pub enum Value {
impl Eq for Value {}
impl Ord for Value {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
match (self,other) {
(Value::Str(l), Value::Str(r)) => l.cmp(r),
(Value::U64(l), Value::U64(r)) => l.cmp(r),
(Value::I64(l), Value::I64(r)) => l.cmp(r),
@@ -35,7 +35,7 @@ impl Ord for Value {
(Value::Facet(l), Value::Facet(r)) => l.cmp(r),
(Value::Bytes(l), Value::Bytes(r)) => l.cmp(r),
(Value::F64(l), Value::F64(r)) => {
match (l.is_nan(), r.is_nan()) {
match (l.is_nan(),r.is_nan()) {
(false, false) => l.partial_cmp(r).unwrap(), // only fail on NaN
(true, true) => Ordering::Equal,
(true, false) => Ordering::Less, // we define NaN as less than -∞
@@ -155,7 +155,7 @@ impl Value {
Value::F64(ref value) => *value,
_ => panic!("This is not a f64 field."),
}
}
}
/// Returns the Date-value, provided the value is of the `Date` type.
///
@@ -219,7 +219,7 @@ impl From<Vec<u8>> for Value {
mod binary_serialize {
use super::Value;
use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable};
use crate::common::{BinarySerializable, f64_to_u64, u64_to_f64};
use crate::schema::Facet;
use chrono::{TimeZone, Utc};
use std::io::{self, Read, Write};

View File

@@ -63,7 +63,7 @@ impl FragmentCandidate {
fn try_add_token(&mut self, token: &Token, terms: &BTreeMap<String, f32>) {
self.stop_offset = token.offset_to;
if let Some(&score) = terms.get(&token.text.to_lowercase()) {
if let Some(score) = terms.get(&token.text.to_lowercase()) {
self.score += score;
self.highlighted
.push(HighlightSection::new(token.offset_from, token.offset_to));
@@ -142,7 +142,7 @@ impl Snippet {
/// Fragments must be valid in the sense that `&text[fragment.start..fragment.stop]`\
/// has to be a valid string.
fn search_fragments<'a>(
tokenizer: &BoxedTokenizer,
tokenizer: &dyn BoxedTokenizer,
text: &'a str,
terms: &BTreeMap<String, f32>,
max_num_chars: usize,
@@ -150,6 +150,7 @@ fn search_fragments<'a>(
let mut token_stream = tokenizer.token_stream(text);
let mut fragment = FragmentCandidate::new(0);
let mut fragments: Vec<FragmentCandidate> = vec![];
while let Some(next) = token_stream.next() {
if (next.offset_to - fragment.start_offset) > max_num_chars {
if fragment.score > 0.0 {
@@ -213,9 +214,11 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
/// # Example
///
/// ```rust
/// # use tantivy::query::QueryParser;
/// # #[macro_use]
/// # extern crate tantivy;
/// # use tantivy::Index;
/// # use tantivy::schema::{Schema, TEXT};
/// # use tantivy::{doc, Index};
/// # use tantivy::query::QueryParser;
/// use tantivy::SnippetGenerator;
///
/// # fn main() -> tantivy::Result<()> {
@@ -251,7 +254,7 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
/// ```
pub struct SnippetGenerator {
terms_text: BTreeMap<String, f32>,
tokenizer: BoxedTokenizer,
tokenizer: Box<dyn BoxedTokenizer>,
field: Field,
max_num_chars: usize,
}
@@ -313,8 +316,12 @@ impl SnippetGenerator {
/// Generates a snippet for the given text.
pub fn snippet(&self, text: &str) -> Snippet {
let fragment_candidates =
search_fragments(&self.tokenizer, &text, &self.terms_text, self.max_num_chars);
let fragment_candidates = search_fragments(
&*self.tokenizer,
&text,
&self.terms_text,
self.max_num_chars,
);
select_best_fragment_combination(&fragment_candidates[..], &text)
}
}
@@ -324,7 +331,7 @@ mod tests {
use super::{search_fragments, select_best_fragment_combination};
use crate::query::QueryParser;
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, TextOptions, TEXT};
use crate::tokenizer::SimpleTokenizer;
use crate::tokenizer::{box_tokenizer, SimpleTokenizer};
use crate::Index;
use crate::SnippetGenerator;
use maplit::btreemap;
@@ -348,12 +355,12 @@ Survey in 2016, 2017, and 2018."#;
#[test]
fn test_snippet() {
let boxed_tokenizer = SimpleTokenizer.into();
let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
let terms = btreemap! {
String::from("rust") => 1.0,
String::from("language") => 0.9
};
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 100);
let fragments = search_fragments(&*boxed_tokenizer, TEST_TEXT, &terms, 100);
assert_eq!(fragments.len(), 7);
{
let first = &fragments[0];
@@ -375,13 +382,13 @@ Survey in 2016, 2017, and 2018."#;
#[test]
fn test_snippet_scored_fragment() {
let boxed_tokenizer = SimpleTokenizer.into();
let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
{
let terms = btreemap! {
String::from("rust") =>1.0f32,
String::from("language") => 0.9f32
};
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 20);
let fragments = search_fragments(&*boxed_tokenizer, TEST_TEXT, &terms, 20);
{
let first = &fragments[0];
assert_eq!(first.score, 1.0);
@@ -390,13 +397,13 @@ Survey in 2016, 2017, and 2018."#;
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
assert_eq!(snippet.to_html(), "<b>Rust</b> is a systems")
}
let boxed_tokenizer = SimpleTokenizer.into();
let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
{
let terms = btreemap! {
String::from("rust") =>0.9f32,
String::from("language") => 1.0f32
};
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 20);
let fragments = search_fragments(&*boxed_tokenizer, TEST_TEXT, &terms, 20);
//assert_eq!(fragments.len(), 7);
{
let first = &fragments[0];
@@ -410,14 +417,14 @@ Survey in 2016, 2017, and 2018."#;
#[test]
fn test_snippet_in_second_fragment() {
let boxed_tokenizer = SimpleTokenizer.into();
let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
let text = "a b c d e f g";
let mut terms = BTreeMap::new();
terms.insert(String::from("c"), 1.0);
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
let fragments = search_fragments(&*boxed_tokenizer, &text, &terms, 3);
assert_eq!(fragments.len(), 1);
{
@@ -434,14 +441,14 @@ Survey in 2016, 2017, and 2018."#;
#[test]
fn test_snippet_with_term_at_the_end_of_fragment() {
let boxed_tokenizer = SimpleTokenizer.into();
let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
let text = "a b c d e f f g";
let mut terms = BTreeMap::new();
terms.insert(String::from("f"), 1.0);
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
let fragments = search_fragments(&*boxed_tokenizer, &text, &terms, 3);
assert_eq!(fragments.len(), 2);
{
@@ -458,7 +465,7 @@ Survey in 2016, 2017, and 2018."#;
#[test]
fn test_snippet_with_second_fragment_has_the_highest_score() {
let boxed_tokenizer = SimpleTokenizer.into();
let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
let text = "a b c d e f g";
@@ -466,7 +473,7 @@ Survey in 2016, 2017, and 2018."#;
terms.insert(String::from("f"), 1.0);
terms.insert(String::from("a"), 0.9);
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 7);
let fragments = search_fragments(&*boxed_tokenizer, &text, &terms, 7);
assert_eq!(fragments.len(), 2);
{
@@ -483,14 +490,14 @@ Survey in 2016, 2017, and 2018."#;
#[test]
fn test_snippet_with_term_not_in_text() {
let boxed_tokenizer = SimpleTokenizer.into();
let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
let text = "a b c d";
let mut terms = BTreeMap::new();
terms.insert(String::from("z"), 1.0);
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
let fragments = search_fragments(&*boxed_tokenizer, &text, &terms, 3);
assert_eq!(fragments.len(), 0);
@@ -501,12 +508,12 @@ Survey in 2016, 2017, and 2018."#;
#[test]
fn test_snippet_with_no_terms() {
let boxed_tokenizer = SimpleTokenizer.into();
let boxed_tokenizer = box_tokenizer(SimpleTokenizer);
let text = "a b c d";
let terms = BTreeMap::new();
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
let fragments = search_fragments(&*boxed_tokenizer, &text, &terms, 3);
assert_eq!(fragments.len(), 0);
let snippet = select_best_fragment_combination(&fragments[..], &text);

View File

@@ -120,16 +120,17 @@ pub mod tests {
);
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::tests::write_lorem_ipsum_store;
use crate::directory::Directory;
use crate::directory::RAMDirectory;
use crate::store::StoreReader;
use directory::Directory;
use directory::RAMDirectory;
use std::path::Path;
use store::StoreReader;
use test::Bencher;
#[bench]

View File

@@ -165,4 +165,5 @@ mod tests {
assert_eq!(output.len(), 65);
assert_eq!(output[0], 128u8 + 3u8);
}
}

View File

@@ -3,7 +3,6 @@ use super::skiplist::SkipListBuilder;
use super::StoreReader;
use crate::common::CountingWriter;
use crate::common::{BinarySerializable, VInt};
use crate::directory::TerminatingWrite;
use crate::directory::WritePtr;
use crate::schema::Document;
use crate::DocId;
@@ -110,6 +109,6 @@ impl StoreWriter {
self.offset_index_writer.write(&mut self.writer)?;
header_offset.serialize(&mut self.writer)?;
self.doc.serialize(&mut self.writer)?;
self.writer.terminate()
self.writer.flush()
}
}

View File

@@ -328,4 +328,5 @@ mod tests {
assert_eq!(term_info_store.get(i as u64), term_infos[i]);
}
}
}

View File

@@ -1,5 +1,6 @@
//! # Example
//! ```rust
//! ```
//! extern crate tantivy;
//! use tantivy::tokenizer::*;
//!
//! # fn main() {

View File

@@ -98,6 +98,10 @@ mod tests {
#[test]
fn test_lowercaser() {
assert_eq!(lowercase_helper("Tree"), vec!["tree".to_string()]);
assert_eq!(lowercase_helper("Русский"), vec!["русский".to_string()]);
assert_eq!(
lowercase_helper("Русский"),
vec!["русский".to_string()]
);
}
}

View File

@@ -4,7 +4,8 @@
//! You must define in your schema which tokenizer should be used for
//! each of your fields :
//!
//! ```rust
//! ```
//! extern crate tantivy;
//! use tantivy::schema::*;
//!
//! # fn main() {
@@ -64,6 +65,8 @@
//! For instance, the `en_stem` is defined as follows.
//!
//! ```rust
//! # extern crate tantivy;
//!
//! use tantivy::tokenizer::*;
//!
//! # fn main() {
@@ -77,7 +80,8 @@
//! Once your tokenizer is defined, you need to
//! register it with a name in your index's [`TokenizerManager`](./struct.TokenizerManager.html).
//!
//! ```rust
//! ```
//! # extern crate tantivy;
//! # use tantivy::schema::Schema;
//! # use tantivy::tokenizer::*;
//! # use tantivy::Index;
@@ -97,7 +101,8 @@
//!
//! # Example
//!
//! ```rust
//! ```
//! extern crate tantivy;
//! use tantivy::schema::{Schema, IndexRecordOption, TextOptions, TextFieldIndexing};
//! use tantivy::tokenizer::*;
//! use tantivy::Index;
@@ -150,6 +155,7 @@ pub use self::simple_tokenizer::SimpleTokenizer;
pub use self::stemmer::{Language, Stemmer};
pub use self::stop_word_filter::StopWordFilter;
pub(crate) use self::token_stream_chain::TokenStreamChain;
pub(crate) use self::tokenizer::box_tokenizer;
pub use self::tokenizer::BoxedTokenizer;
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
@@ -281,4 +287,5 @@ pub mod tests {
assert!(tokens.is_empty());
}
}
}

View File

@@ -29,7 +29,8 @@ use super::{Token, TokenStream, Tokenizer};
///
/// # Example
///
/// ```rust
/// ```
/// # extern crate tantivy;
/// use tantivy::tokenizer::*;
/// # fn main() {
/// let tokenizer = NgramTokenizer::new(2, 3, false);
@@ -460,4 +461,5 @@ mod tests {
assert_eq!(it.next(), Some((8, 9)));
assert_eq!(it.next(), None);
}
}

View File

@@ -1,5 +1,6 @@
//! # Example
//! ```rust
//! ```
//! extern crate tantivy;
//! use tantivy::tokenizer::*;
//!
//! # fn main() {

View File

@@ -1,5 +1,6 @@
//! # Example
//! ```rust
//! ```
//! extern crate tantivy;
//! use tantivy::tokenizer::*;
//!
//! # fn main() {

View File

@@ -97,4 +97,5 @@ mod tests {
assert!(!token_chain.advance());
}
}

View File

@@ -56,6 +56,8 @@ pub trait Tokenizer<'a>: Sized + Clone {
/// # Example
///
/// ```rust
/// # extern crate tantivy;
///
/// use tantivy::tokenizer::*;
///
/// # fn main() {
@@ -78,7 +80,7 @@ pub trait Tokenizer<'a>: Sized + Clone {
}
/// A boxed tokenizer
trait BoxedTokenizerTrait: Send + Sync {
pub trait BoxedTokenizer: Send + Sync {
/// Tokenize a `&str`
fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a>;
@@ -90,41 +92,7 @@ trait BoxedTokenizerTrait: Send + Sync {
fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b>;
/// Return a boxed clone of the tokenizer
fn boxed_clone(&self) -> BoxedTokenizer;
}
/// A boxed tokenizer
pub struct BoxedTokenizer(Box<dyn BoxedTokenizerTrait>);
impl<T> From<T> for BoxedTokenizer
where
T: 'static + Send + Sync + for<'a> Tokenizer<'a>,
{
fn from(tokenizer: T) -> BoxedTokenizer {
BoxedTokenizer(Box::new(BoxableTokenizer(tokenizer)))
}
}
impl BoxedTokenizer {
/// Tokenize a `&str`
pub fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a> {
self.0.token_stream(text)
}
/// Tokenize an array`&str`
///
/// The resulting `TokenStream` is equivalent to what would be obtained if the &str were
/// one concatenated `&str`, with an artificial position gap of `2` between the different fields
/// to prevent accidental `PhraseQuery` to match accross two terms.
pub fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b> {
self.0.token_stream_texts(texts)
}
}
impl Clone for BoxedTokenizer {
fn clone(&self) -> BoxedTokenizer {
self.0.boxed_clone()
}
fn boxed_clone(&self) -> Box<dyn BoxedTokenizer>;
}
#[derive(Clone)]
@@ -132,7 +100,7 @@ struct BoxableTokenizer<A>(A)
where
A: for<'a> Tokenizer<'a> + Send + Sync;
impl<A> BoxedTokenizerTrait for BoxableTokenizer<A>
impl<A> BoxedTokenizer for BoxableTokenizer<A>
where
A: 'static + Send + Sync + for<'a> Tokenizer<'a>,
{
@@ -157,11 +125,18 @@ where
}
}
fn boxed_clone(&self) -> BoxedTokenizer {
self.0.clone().into()
fn boxed_clone(&self) -> Box<dyn BoxedTokenizer> {
Box::new(self.clone())
}
}
pub(crate) fn box_tokenizer<A>(a: A) -> Box<dyn BoxedTokenizer>
where
A: 'static + Send + Sync + for<'a> Tokenizer<'a>,
{
Box::new(BoxableTokenizer(a))
}
impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
fn advance(&mut self) -> bool {
let token_stream: &mut dyn TokenStream = self.borrow_mut();
@@ -186,6 +161,7 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
/// # Example
///
/// ```
/// extern crate tantivy;
/// use tantivy::tokenizer::*;
///
/// # fn main() {
@@ -227,6 +203,7 @@ pub trait TokenStream {
/// and `.token()`.
///
/// ```
/// # extern crate tantivy;
/// # use tantivy::tokenizer::*;
/// #
/// # fn main() {

Some files were not shown because too many files have changed in this diff Show More