mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-31 14:32:54 +00:00
Compare commits
1 Commits
fix-bench
...
streamer-w
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
790baa7adf |
14
CHANGELOG.md
14
CHANGELOG.md
@@ -7,20 +7,10 @@ Tantivy 0.11.0
|
||||
- Better handling of whitespaces.
|
||||
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
|
||||
- API change around `Box<BoxableTokenizer>`. See detail in #629
|
||||
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
||||
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
|
||||
|
||||
|
||||
## How to update?
|
||||
|
||||
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
||||
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
|
||||
an error and handling the `Result` is required.
|
||||
|
||||
|
||||
Tantivy 0.10.2
|
||||
=====================
|
||||
|
||||
- Closes #656. Solving memory leak.
|
||||
`Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
||||
|
||||
Tantivy 0.10.1
|
||||
=====================
|
||||
|
||||
13
Cargo.toml
13
Cargo.toml
@@ -15,16 +15,16 @@ edition = "2018"
|
||||
[dependencies]
|
||||
base64 = "0.10.0"
|
||||
byteorder = "1.0"
|
||||
crc32fast = "1.2.0"
|
||||
once_cell = "1.0"
|
||||
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
||||
tantivy-fst = "0.1"
|
||||
once_cell = "0.2"
|
||||
regex = "1.0"
|
||||
tantivy-fst = {git="https://github.com/tantivy-search/fst"}
|
||||
memmap = {version = "0.7", optional=true}
|
||||
lz4 = {version="1.20", optional=true}
|
||||
snap = {version="0.2"}
|
||||
atomicwrites = {version="0.2.2", optional=true}
|
||||
tempfile = "3.0"
|
||||
log = "0.4"
|
||||
combine = ">=3.6.0,<4.0.0"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
@@ -42,7 +42,6 @@ owning_ref = "0.4"
|
||||
stable_deref_trait = "1.0.0"
|
||||
rust-stemmers = "1.1"
|
||||
downcast-rs = { version="1.0" }
|
||||
tantivy-query-grammar = { path="./query-grammar" }
|
||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||
census = "0.2"
|
||||
fnv = "1.0.6"
|
||||
@@ -81,14 +80,10 @@ failpoints = ["fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
wasm-bindgen = ["uuid/wasm-bindgen"]
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar"]
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "tantivy-search/tantivy" }
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.3"
|
||||
features = ["failpoints"]
|
||||
|
||||
# Following the "fail" crate best practises, we isolate
|
||||
|
||||
3
Makefile
3
Makefile
@@ -1,3 +0,0 @@
|
||||
test:
|
||||
echo "Run test only... No examples."
|
||||
cargo test --tests --lib
|
||||
@@ -7,7 +7,7 @@ set -ex
|
||||
main() {
|
||||
if [ ! -z $CODECOV ]; then
|
||||
echo "Codecov"
|
||||
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov
|
||||
cargo build --verbose && cargo coverage --verbose && bash <(curl -s https://codecov.io/bash) -s target/kcov
|
||||
else
|
||||
echo "Build"
|
||||
cross build --target $TARGET
|
||||
@@ -15,8 +15,7 @@ main() {
|
||||
return
|
||||
fi
|
||||
echo "Test"
|
||||
cross test --target $TARGET --no-default-features --features mmap
|
||||
cross test --target $TARGET --no-default-features --features mmap query-grammar
|
||||
cross test --target $TARGET --no-default-features --features mmap -- --test-threads 1
|
||||
fi
|
||||
for example in $(ls examples/*.rs)
|
||||
do
|
||||
|
||||
@@ -5,17 +5,20 @@
|
||||
//
|
||||
// We will :
|
||||
// - define our schema
|
||||
// - create an index in a directory
|
||||
// - index a few documents into our index
|
||||
// - search for the best document matching a basic query
|
||||
// - retrieve the best document's original content.
|
||||
// = create an index in a directory
|
||||
// - index few documents in our index
|
||||
// - search for the best document matchings "sea whale"
|
||||
// - retrieve the best document original content.
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tantivy::Index;
|
||||
use tantivy::ReloadPolicy;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -30,7 +33,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// and for each field, its type and "the way it should
|
||||
// be indexed".
|
||||
|
||||
// First we need to define a schema ...
|
||||
// first we need to define a schema ...
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
// Our first field is title.
|
||||
@@ -45,7 +48,7 @@ fn main() -> tantivy::Result<()> {
|
||||
//
|
||||
// `STORED` means that the field will also be saved
|
||||
// in a compressed, row-oriented key-value store.
|
||||
// This store is useful for reconstructing the
|
||||
// This store is useful to reconstruct the
|
||||
// documents that were selected during the search phase.
|
||||
schema_builder.add_text_field("title", TEXT | STORED);
|
||||
|
||||
@@ -54,7 +57,8 @@ fn main() -> tantivy::Result<()> {
|
||||
// need to be able to be able to retrieve it
|
||||
// for our application.
|
||||
//
|
||||
// We can make our index lighter by omitting the `STORED` flag.
|
||||
// We can make our index lighter and
|
||||
// by omitting `STORED` flag.
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
@@ -67,7 +71,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// with our schema in the directory.
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
// To insert a document we will need an index writer.
|
||||
// To insert document we need an index writer.
|
||||
// There must be only one writer at a time.
|
||||
// This single `IndexWriter` is already
|
||||
// multithreaded.
|
||||
@@ -145,8 +149,8 @@ fn main() -> tantivy::Result<()> {
|
||||
// At this point our documents are not searchable.
|
||||
//
|
||||
//
|
||||
// We need to call `.commit()` explicitly to force the
|
||||
// `index_writer` to finish processing the documents in the queue,
|
||||
// We need to call .commit() explicitly to force the
|
||||
// index_writer to finish processing the documents in the queue,
|
||||
// flush the current index to the disk, and advertise
|
||||
// the existence of new documents.
|
||||
//
|
||||
@@ -158,14 +162,14 @@ fn main() -> tantivy::Result<()> {
|
||||
// persistently indexed.
|
||||
//
|
||||
// In the scenario of a crash or a power failure,
|
||||
// tantivy behaves as if it has rolled back to its last
|
||||
// tantivy behaves as if has rolled back to its last
|
||||
// commit.
|
||||
|
||||
// # Searching
|
||||
//
|
||||
// ### Searcher
|
||||
//
|
||||
// A reader is required first in order to search an index.
|
||||
// A reader is required to get search the index.
|
||||
// It acts as a `Searcher` pool that reloads itself,
|
||||
// depending on a `ReloadPolicy`.
|
||||
//
|
||||
@@ -181,7 +185,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
// We now need to acquire a searcher.
|
||||
//
|
||||
// A searcher points to a snapshotted, immutable version of the index.
|
||||
// A searcher points to snapshotted, immutable version of the index.
|
||||
//
|
||||
// Some search experience might require more than
|
||||
// one query. Using the same searcher ensures that all of these queries will run on the
|
||||
@@ -201,7 +205,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// in both title and body.
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
|
||||
// `QueryParser` may fail if the query is not in the right
|
||||
// QueryParser may fail if the query is not in the right
|
||||
// format. For user facing applications, this can be a problem.
|
||||
// A ticket has been opened regarding this problem.
|
||||
let query = query_parser.parse_query("sea whale")?;
|
||||
@@ -217,7 +221,7 @@ fn main() -> tantivy::Result<()> {
|
||||
//
|
||||
// We are not interested in all of the documents but
|
||||
// only in the top 10. Keeping track of our top 10 best documents
|
||||
// is the role of the `TopDocs` collector.
|
||||
// is the role of the TopDocs.
|
||||
|
||||
// We can now perform our query.
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
@@ -9,12 +9,15 @@
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
use tantivy::fastfield::FastFieldReader;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::Field;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index, SegmentReader, TantivyError};
|
||||
use tantivy::SegmentReader;
|
||||
use tantivy::{Index, TantivyError};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Stats {
|
||||
|
||||
@@ -2,11 +2,14 @@
|
||||
//
|
||||
// In this example, we'll see how to define a tokenizer pipeline
|
||||
// by aligning a bunch of `TokenFilter`.
|
||||
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::NgramTokenizer;
|
||||
use tantivy::{doc, Index};
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
|
||||
@@ -8,10 +8,13 @@
|
||||
//
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, IndexReader};
|
||||
use tantivy::Index;
|
||||
use tantivy::IndexReader;
|
||||
|
||||
// A simple helper function to fetch a single document
|
||||
// given its id from our index.
|
||||
|
||||
@@ -12,10 +12,12 @@
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::FacetCollector;
|
||||
use tantivy::query::AllQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index};
|
||||
use tantivy::Index;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
|
||||
@@ -2,10 +2,14 @@
|
||||
//
|
||||
// Below is an example of creating an indexed integer field in your schema
|
||||
// You can use RangeQuery to get a Count of all occurrences in a given range.
|
||||
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::Count;
|
||||
use tantivy::query::RangeQuery;
|
||||
use tantivy::schema::{Schema, INDEXED};
|
||||
use tantivy::{doc, Index, Result};
|
||||
use tantivy::Index;
|
||||
use tantivy::Result;
|
||||
|
||||
fn run() -> Result<()> {
|
||||
// For the sake of simplicity, this schema will only have 1 field
|
||||
|
||||
@@ -9,8 +9,11 @@
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, DocId, DocSet, Index, Postings};
|
||||
use tantivy::Index;
|
||||
use tantivy::{DocId, DocSet, Postings};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// We first create a schema for the sake of the
|
||||
|
||||
@@ -25,11 +25,14 @@
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use tantivy::schema::{Schema, STORED, TEXT};
|
||||
use tantivy::{doc, Index, IndexWriter, Opstamp};
|
||||
use tantivy::Opstamp;
|
||||
use tantivy::{Index, IndexWriter};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -46,9 +49,10 @@ fn main() -> tantivy::Result<()> {
|
||||
thread::spawn(move || {
|
||||
// we index 100 times the document... for the sake of the example.
|
||||
for i in 0..100 {
|
||||
let opstamp = index_writer_clone_1
|
||||
.read().unwrap() //< A read lock is sufficient here.
|
||||
.add_document(
|
||||
let opstamp = {
|
||||
// A read lock is sufficient here.
|
||||
let index_writer_rlock = index_writer_clone_1.read().unwrap();
|
||||
index_writer_rlock.add_document(
|
||||
doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
@@ -59,7 +63,8 @@ fn main() -> tantivy::Result<()> {
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
))
|
||||
};
|
||||
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
}
|
||||
|
||||
@@ -7,10 +7,13 @@
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, Snippet, SnippetGenerator};
|
||||
use tantivy::Index;
|
||||
use tantivy::{Snippet, SnippetGenerator};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
|
||||
@@ -11,11 +11,13 @@
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::*;
|
||||
use tantivy::{doc, Index};
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// this example assumes you understand the content in `basic_search`
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.11.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
description = """Search engine library"""
|
||||
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
|
||||
homepage = "https://github.com/tantivy-search/tantivy"
|
||||
repository = "https://github.com/tantivy-search/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
combine = ">=3.6.0,<4.0.0"
|
||||
@@ -1,17 +0,0 @@
|
||||
#![recursion_limit = "100"]
|
||||
|
||||
mod occur;
|
||||
mod query_grammar;
|
||||
mod user_input_ast;
|
||||
use combine::parser::Parser;
|
||||
|
||||
pub use crate::occur::Occur;
|
||||
use crate::query_grammar::parse_to_ast;
|
||||
pub use crate::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
|
||||
|
||||
pub struct Error;
|
||||
|
||||
pub fn parse_query(query: &str) -> Result<UserInputAST, Error> {
|
||||
let (user_input_ast, _remaining) = parse_to_ast().parse(query).map_err(|_| Error)?;
|
||||
Ok(user_input_ast)
|
||||
}
|
||||
@@ -10,10 +10,12 @@ use crate::SegmentReader;
|
||||
/// documents match the query.
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{Index, Result};
|
||||
/// use tantivy::collector::Count;
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, Index, Result};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
@@ -123,4 +125,5 @@ mod tests {
|
||||
assert_eq!(count_collector.harvest(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -81,10 +81,12 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
///
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{Facet, Schema, TEXT};
|
||||
/// use tantivy::{Index, Result};
|
||||
/// use tantivy::collector::FacetCollector;
|
||||
/// use tantivy::query::AllQuery;
|
||||
/// use tantivy::schema::{Facet, Schema, TEXT};
|
||||
/// use tantivy::{doc, Index, Result};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
@@ -599,18 +601,19 @@ mod tests {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use crate::collector::FacetCollector;
|
||||
use crate::query::AllQuery;
|
||||
use crate::schema::{Facet, Schema};
|
||||
use crate::Index;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::thread_rng;
|
||||
use collector::FacetCollector;
|
||||
use query::AllQuery;
|
||||
use rand::{thread_rng, Rng};
|
||||
use schema::Facet;
|
||||
use schema::Schema;
|
||||
use test::Bencher;
|
||||
use Index;
|
||||
|
||||
#[bench]
|
||||
fn bench_facet_collector(b: &mut Bencher) {
|
||||
@@ -627,7 +630,7 @@ mod bench {
|
||||
}
|
||||
}
|
||||
// 40425 docs
|
||||
docs[..].shuffle(&mut thread_rng());
|
||||
thread_rng().shuffle(&mut docs[..]);
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for doc in docs {
|
||||
@@ -636,7 +639,7 @@ mod bench {
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
b.iter(|| {
|
||||
let searcher = reader.searcher();
|
||||
let searcher = index.searcher();
|
||||
let facet_collector = FacetCollector::for_field(facet_field);
|
||||
searcher.search(&AllQuery, &facet_collector).unwrap();
|
||||
});
|
||||
|
||||
@@ -35,6 +35,7 @@ The resulting `Fruit` will then be a typed tuple with each collector's original
|
||||
in their respective position.
|
||||
|
||||
```rust
|
||||
# extern crate tantivy;
|
||||
# use tantivy::schema::*;
|
||||
# use tantivy::*;
|
||||
# use tantivy::query::*;
|
||||
|
||||
@@ -105,10 +105,12 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
|
||||
/// [Combining several collectors section of the collector documentation](./index.html#combining-several-collectors).
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{Index, Result};
|
||||
/// use tantivy::collector::{Count, TopDocs, MultiCollector};
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, Index, Result};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
|
||||
@@ -23,10 +23,13 @@ use std::fmt;
|
||||
/// is `O(n log K)`.
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::DocAddress;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{Index, Result};
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, DocAddress, Index, Result};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
@@ -84,8 +87,10 @@ impl TopDocs {
|
||||
/// Set top-K to rank documents by a given fast field.
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, Result, DocAddress};
|
||||
/// # use tantivy::{Index, Result, DocAddress};
|
||||
/// # use tantivy::query::{Query, QueryParser};
|
||||
/// use tantivy::Searcher;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
@@ -123,7 +128,7 @@ impl TopDocs {
|
||||
/// ///
|
||||
/// /// `field` is required to be a FAST field.
|
||||
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
||||
/// query: &dyn Query,
|
||||
/// query: &Query,
|
||||
/// sort_by_field: Field)
|
||||
/// -> Result<Vec<(u64, DocAddress)>> {
|
||||
///
|
||||
@@ -192,8 +197,10 @@ impl TopDocs {
|
||||
/// learning-to-rank model over various features
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, DocAddress, DocId, Score};
|
||||
/// # use tantivy::{Index, DocAddress, DocId, Score};
|
||||
/// # use tantivy::query::QueryParser;
|
||||
/// use tantivy::SegmentReader;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
@@ -295,8 +302,10 @@ impl TopDocs {
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # #[macro_use]
|
||||
/// # extern crate tantivy;
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, DocAddress, DocId};
|
||||
/// # use tantivy::{Index, DocAddress, DocId};
|
||||
/// # use tantivy::query::QueryParser;
|
||||
/// use tantivy::SegmentReader;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
@@ -592,4 +601,5 @@ mod tests {
|
||||
let query = query_parser.parse_query(query).unwrap();
|
||||
(index, query)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use crate::common::BinarySerializable;
|
||||
use crate::common::CountingWriter;
|
||||
use crate::common::VInt;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use crate::directory::WritePtr;
|
||||
use crate::schema::Field;
|
||||
use crate::space_usage::FieldUsage;
|
||||
use crate::space_usage::PerFieldSpaceUsage;
|
||||
@@ -42,7 +42,7 @@ pub struct CompositeWrite<W = WritePtr> {
|
||||
offsets: HashMap<FileAddr, u64>,
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
impl<W: Write> CompositeWrite<W> {
|
||||
/// Crate a new API writer that writes a composite file
|
||||
/// in a given write.
|
||||
pub fn wrap(w: W) -> CompositeWrite<W> {
|
||||
@@ -91,7 +91,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
|
||||
let footer_len = (self.write.written_bytes() - footer_offset) as u32;
|
||||
footer_len.serialize(&mut self.write)?;
|
||||
self.write.terminate()
|
||||
self.write.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -230,4 +231,5 @@ mod test {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use crate::directory::AntiCallToken;
|
||||
use crate::directory::TerminatingWrite;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
@@ -44,13 +42,6 @@ impl<W: Write> Write for CountingWriter<W> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
||||
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()?;
|
||||
self.underlying.terminate_ref(token)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
|
||||
@@ -199,7 +199,10 @@ pub mod test {
|
||||
fn test_serialize_string() {
|
||||
assert_eq!(serialize_test(String::from("")), 1);
|
||||
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
|
||||
assert_eq!(serialize_test(String::from("富士さん見える。")), 1 + 3 * 8);
|
||||
assert_eq!(
|
||||
serialize_test(String::from("富士さん見える。")),
|
||||
1 + 3 * 8
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -26,10 +26,9 @@ use crate::IndexWriter;
|
||||
use crate::Result;
|
||||
use num_cpus;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
#[cfg(feature = "mmap")]
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
fn load_metas(directory: &dyn Directory, inventory: &SegmentMetaInventory) -> Result<IndexMeta> {
|
||||
@@ -217,22 +216,8 @@ impl Index {
|
||||
Index::open(mmap_directory)
|
||||
}
|
||||
|
||||
/// Returns the list of the segment metas tracked by the index.
|
||||
///
|
||||
/// Such segments can of course be part of the index,
|
||||
/// but also they could be segments being currently built or in the middle of a merge
|
||||
/// operation.
|
||||
pub fn list_all_segment_metas(&self) -> Vec<SegmentMeta> {
|
||||
self.inventory.all()
|
||||
}
|
||||
|
||||
/// Creates a new segment_meta (Advanced user only).
|
||||
///
|
||||
/// As long as the `SegmentMeta` lives, the files associated with the
|
||||
/// `SegmentMeta` are guaranteed to not be garbage collected, regardless of
|
||||
/// whether the segment is recorded as part of the index or not.
|
||||
pub fn new_segment_meta(&self, segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
|
||||
self.inventory.new_segment_meta(segment_id, max_doc)
|
||||
pub(crate) fn inventory(&self) -> &SegmentMetaInventory {
|
||||
&self.inventory
|
||||
}
|
||||
|
||||
/// Open the index using the provided directory
|
||||
@@ -369,11 +354,6 @@ impl Index {
|
||||
.map(SegmentMeta::id)
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// Returns the set of corrupted files
|
||||
pub fn validate_checksum(&self) -> Result<HashSet<PathBuf>> {
|
||||
self.directory.list_damaged().map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Index {
|
||||
@@ -601,4 +581,5 @@ mod tests {
|
||||
assert_eq!(searcher.num_docs(), 8_000);
|
||||
assert!(mem_right_after_merge_finished < mem_right_after_commit);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ impl SegmentMetaInventory {
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn new_segment_meta(&self, segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
|
||||
let inner = InnerSegmentMeta {
|
||||
segment_id,
|
||||
|
||||
@@ -4,8 +4,6 @@ use uuid::Uuid;
|
||||
|
||||
#[cfg(test)]
|
||||
use once_cell::sync::Lazy;
|
||||
use std::error::Error;
|
||||
use std::str::FromStr;
|
||||
#[cfg(test)]
|
||||
use std::sync::atomic;
|
||||
|
||||
@@ -54,51 +52,15 @@ impl SegmentId {
|
||||
/// and the rest is random.
|
||||
///
|
||||
/// Picking the first 8 chars is ok to identify
|
||||
/// segments in a display message (e.g. a5c4dfcb).
|
||||
/// segments in a display message.
|
||||
pub fn short_uuid_string(&self) -> String {
|
||||
(&self.0.to_simple_ref().to_string()[..8]).to_string()
|
||||
}
|
||||
|
||||
/// Returns a segment uuid string.
|
||||
///
|
||||
/// It consists in 32 lowercase hexadecimal chars
|
||||
/// (e.g. a5c4dfcbdfe645089129e308e26d5523)
|
||||
pub fn uuid_string(&self) -> String {
|
||||
self.0.to_simple_ref().to_string()
|
||||
}
|
||||
|
||||
/// Build a `SegmentId` string from the full uuid string.
|
||||
///
|
||||
/// E.g. "a5c4dfcbdfe645089129e308e26d5523"
|
||||
pub fn from_uuid_string(uuid_string: &str) -> Result<SegmentId, SegmentIdParseError> {
|
||||
FromStr::from_str(uuid_string)
|
||||
}
|
||||
}
|
||||
|
||||
/// Error type used when parsing a `SegmentId` from a string fails.
|
||||
pub struct SegmentIdParseError(uuid::parser::ParseError);
|
||||
|
||||
impl Error for SegmentIdParseError {}
|
||||
|
||||
impl fmt::Debug for SegmentIdParseError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SegmentIdParseError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for SegmentId {
|
||||
type Err = SegmentIdParseError;
|
||||
|
||||
fn from_str(uuid_string: &str) -> Result<Self, SegmentIdParseError> {
|
||||
let uuid = Uuid::parse_str(uuid_string).map_err(SegmentIdParseError)?;
|
||||
Ok(SegmentId(uuid))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentId {
|
||||
@@ -118,18 +80,3 @@ impl Ord for SegmentId {
|
||||
self.0.as_bytes().cmp(other.0.as_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::SegmentId;
|
||||
|
||||
#[test]
|
||||
fn test_to_uuid_string() {
|
||||
let full_uuid = "a5c4dfcbdfe645089129e308e26d5523";
|
||||
let segment_id = SegmentId::from_uuid_string(full_uuid).unwrap();
|
||||
assert_eq!(segment_id.uuid_string(), full_uuid);
|
||||
assert_eq!(segment_id.short_uuid_string(), "a5c4dfcb");
|
||||
// one extra char
|
||||
assert!(SegmentId::from_uuid_string("a5c4dfcbdfe645089129e308e26d5523b").is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,8 +118,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
///
|
||||
/// Specifically, subsequent writes or flushes should
|
||||
/// have no effect on the returned `ReadOnlySource` object.
|
||||
///
|
||||
/// You should only use this to read files create with [`open_write`]
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
||||
|
||||
/// Removes a file
|
||||
@@ -159,8 +157,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// atomic_write.
|
||||
///
|
||||
/// This should only be used for small files.
|
||||
///
|
||||
/// You should only use this to read files create with [`atomic_write`]
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
||||
|
||||
/// Atomically replace the content of a file with data.
|
||||
|
||||
@@ -1,213 +0,0 @@
|
||||
use crate::directory::read_only_source::ReadOnlySource;
|
||||
use crate::directory::{AntiCallToken, TerminatingWrite};
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use crc32fast::Hasher;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
const COMMON_FOOTER_SIZE: usize = 4 * 5;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Footer {
|
||||
pub tantivy_version: (u32, u32, u32),
|
||||
pub meta: String,
|
||||
pub versioned_footer: VersionedFooter,
|
||||
}
|
||||
|
||||
impl Footer {
|
||||
pub fn new(versioned_footer: VersionedFooter) -> Self {
|
||||
let tantivy_version = (
|
||||
env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
|
||||
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
|
||||
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
|
||||
);
|
||||
Footer {
|
||||
tantivy_version,
|
||||
meta: format!(
|
||||
"tantivy {}.{}.{}, index v{}",
|
||||
tantivy_version.0,
|
||||
tantivy_version.1,
|
||||
tantivy_version.2,
|
||||
versioned_footer.version()
|
||||
),
|
||||
versioned_footer,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_bytes(&self) -> Vec<u8> {
|
||||
let mut res = self.versioned_footer.to_bytes();
|
||||
res.extend_from_slice(self.meta.as_bytes());
|
||||
let len = res.len();
|
||||
res.resize(len + COMMON_FOOTER_SIZE, 0);
|
||||
let mut common_footer = &mut res[len..];
|
||||
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
|
||||
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
|
||||
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
|
||||
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
|
||||
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
|
||||
res
|
||||
}
|
||||
|
||||
pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
|
||||
let len = data.len();
|
||||
if len < COMMON_FOOTER_SIZE + 4 {
|
||||
// 4 bytes for index version, stored in versioned footer
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
|
||||
if len < size as usize {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
format!(
|
||||
"File corrupted. The footer len is {}, while the entire file len is {}",
|
||||
size, len
|
||||
),
|
||||
));
|
||||
}
|
||||
let footer = &data[len - size as usize..];
|
||||
let meta_len = LittleEndian::read_u32(&footer[size - 20..]) as usize;
|
||||
let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
|
||||
let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
|
||||
let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
|
||||
Ok(Footer {
|
||||
tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
|
||||
meta: String::from_utf8_lossy(&footer[size - meta_len - 20..size - 20]).into_owned(),
|
||||
versioned_footer: VersionedFooter::from_bytes(&footer[..size - meta_len - 20])?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
|
||||
let footer = Footer::from_bytes(source.as_slice())?;
|
||||
let reader = source.slice_to(source.as_slice().len() - footer.size());
|
||||
Ok((footer, reader))
|
||||
}
|
||||
|
||||
pub fn size(&self) -> usize {
|
||||
self.versioned_footer.size() as usize + self.meta.len() + 20
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum VersionedFooter {
|
||||
UnknownVersion { version: u32, size: u32 },
|
||||
V0(u32), // crc
|
||||
}
|
||||
|
||||
impl VersionedFooter {
|
||||
pub fn to_bytes(&self) -> Vec<u8> {
|
||||
match self {
|
||||
VersionedFooter::V0(crc) => {
|
||||
let mut res = vec![0; 8];
|
||||
LittleEndian::write_u32(&mut res, 0);
|
||||
LittleEndian::write_u32(&mut res[4..], *crc);
|
||||
res
|
||||
}
|
||||
VersionedFooter::UnknownVersion { .. } => {
|
||||
panic!("Unsupported index should never get serialized");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> {
|
||||
assert!(footer.len() >= 4);
|
||||
let version = LittleEndian::read_u32(footer);
|
||||
match version {
|
||||
0 => {
|
||||
if footer.len() == 8 {
|
||||
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..])))
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
format!(
|
||||
"File corrupted. The versioned footer len is {}, while it should be 8",
|
||||
footer.len()
|
||||
),
|
||||
))
|
||||
}
|
||||
}
|
||||
version => Ok(VersionedFooter::UnknownVersion {
|
||||
version,
|
||||
size: footer.len() as u32,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn size(&self) -> u32 {
|
||||
match self {
|
||||
VersionedFooter::V0(_) => 8,
|
||||
VersionedFooter::UnknownVersion { size, .. } => *size,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn version(&self) -> u32 {
|
||||
match self {
|
||||
VersionedFooter::V0(_) => 0,
|
||||
VersionedFooter::UnknownVersion { version, .. } => *version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn crc(&self) -> Option<u32> {
|
||||
match self {
|
||||
VersionedFooter::V0(crc) => Some(*crc),
|
||||
VersionedFooter::UnknownVersion { .. } => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct FooterProxy<W: TerminatingWrite> {
|
||||
/// always Some except after terminate call
|
||||
hasher: Option<Hasher>,
|
||||
/// always Some except after terminate call
|
||||
writer: Option<W>,
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> FooterProxy<W> {
|
||||
pub fn new(writer: W) -> Self {
|
||||
FooterProxy {
|
||||
hasher: Some(Hasher::new()),
|
||||
writer: Some(writer),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> Write for FooterProxy<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let count = self.writer.as_mut().unwrap().write(buf)?;
|
||||
self.hasher.as_mut().unwrap().update(&buf[..count]);
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.writer.as_mut().unwrap().flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||
let crc = self.hasher.take().unwrap().finalize();
|
||||
|
||||
let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes();
|
||||
let mut writer = self.writer.take().unwrap();
|
||||
writer.write_all(&footer)?;
|
||||
writer.terminate()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::directory::footer::{Footer, VersionedFooter};
|
||||
|
||||
#[test]
|
||||
fn test_serialize_deserialize_footer() {
|
||||
let crc = 123456;
|
||||
let footer = Footer::new(VersionedFooter::V0(crc));
|
||||
let footer_bytes = footer.to_bytes();
|
||||
|
||||
assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
use crate::core::MANAGED_FILEPATH;
|
||||
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::footer::{Footer, FooterProxy};
|
||||
use crate::directory::DirectoryLock;
|
||||
use crate::directory::Lock;
|
||||
use crate::directory::META_LOCK;
|
||||
@@ -9,7 +8,6 @@ use crate::directory::{WatchCallback, WatchHandle};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::Directory;
|
||||
use crate::Result;
|
||||
use crc32fast::Hasher;
|
||||
use serde_json;
|
||||
use std::collections::HashSet;
|
||||
use std::io;
|
||||
@@ -209,59 +207,17 @@ impl ManagedDirectory {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify checksum of a managed file
|
||||
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
||||
let reader = self.directory.open_read(path)?;
|
||||
let (footer, data) = Footer::extract_footer(reader)
|
||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(data.as_slice());
|
||||
let crc = hasher.finalize();
|
||||
Ok(footer
|
||||
.versioned_footer
|
||||
.crc()
|
||||
.map(|v| v == crc)
|
||||
.unwrap_or(false))
|
||||
}
|
||||
|
||||
/// List files for which checksum does not match content
|
||||
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
||||
let mut hashset = HashSet::new();
|
||||
let managed_paths = self
|
||||
.meta_informations
|
||||
.read()
|
||||
.expect("Managed directory rlock poisoned in list damaged.")
|
||||
.managed_paths
|
||||
.clone();
|
||||
|
||||
for path in managed_paths.into_iter() {
|
||||
if !self.validate_checksum(&path)? {
|
||||
hashset.insert(path);
|
||||
}
|
||||
}
|
||||
Ok(hashset)
|
||||
}
|
||||
}
|
||||
|
||||
impl Directory for ManagedDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
let read_only_source = self.directory.open_read(path)?;
|
||||
let (_footer, reader) = Footer::extract_footer(read_only_source)
|
||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||
Ok(reader)
|
||||
self.directory.open_read(path)
|
||||
}
|
||||
|
||||
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
||||
self.register_file_as_managed(path)
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
|
||||
self.directory
|
||||
.open_write(path)?
|
||||
.into_inner()
|
||||
.map_err(|_| ())
|
||||
.expect("buffer should be empty"),
|
||||
))))
|
||||
self.directory.open_write(path)
|
||||
}
|
||||
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
@@ -303,9 +259,8 @@ impl Clone for ManagedDirectory {
|
||||
#[cfg(test)]
|
||||
mod tests_mmap_specific {
|
||||
|
||||
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite};
|
||||
use crate::directory::{Directory, ManagedDirectory, MmapDirectory};
|
||||
use std::collections::HashSet;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tempfile::TempDir;
|
||||
@@ -320,8 +275,8 @@ mod tests_mmap_specific {
|
||||
{
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||
let write_file = managed_directory.open_write(test_path1).unwrap();
|
||||
write_file.terminate().unwrap();
|
||||
let mut write_file = managed_directory.open_write(test_path1).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
managed_directory
|
||||
.atomic_write(test_path2, &[0u8, 1u8])
|
||||
.unwrap();
|
||||
@@ -355,9 +310,9 @@ mod tests_mmap_specific {
|
||||
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||
write.write_all(&[0u8, 1u8]).unwrap();
|
||||
write.terminate().unwrap();
|
||||
managed_directory
|
||||
.atomic_write(test_path1, &vec![0u8, 1u8])
|
||||
.unwrap();
|
||||
assert!(managed_directory.exists(test_path1));
|
||||
|
||||
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
|
||||
@@ -376,38 +331,4 @@ mod tests_mmap_specific {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_checksum() {
|
||||
let test_path1: &'static Path = Path::new("some_path_for_test");
|
||||
let test_path2: &'static Path = Path::new("other_test_path");
|
||||
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||
write.write_all(&[0u8, 1u8]).unwrap();
|
||||
write.terminate().unwrap();
|
||||
|
||||
let mut write = managed_directory.open_write(test_path2).unwrap();
|
||||
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
|
||||
write.terminate().unwrap();
|
||||
|
||||
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
||||
|
||||
let mut corrupted_path = tempdir_path.clone();
|
||||
corrupted_path.push(test_path2);
|
||||
let mut file = OpenOptions::new()
|
||||
.write(true)
|
||||
.open(&corrupted_path)
|
||||
.unwrap();
|
||||
file.write_all(&[255u8]).unwrap();
|
||||
file.flush().unwrap();
|
||||
drop(file);
|
||||
|
||||
let damaged = managed_directory.list_damaged().unwrap();
|
||||
assert_eq!(damaged.len(), 1);
|
||||
assert!(damaged.contains(test_path2));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ use crate::directory::error::{
|
||||
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
||||
};
|
||||
use crate::directory::read_only_source::BoxedData;
|
||||
use crate::directory::AntiCallToken;
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::DirectoryLock;
|
||||
use crate::directory::Lock;
|
||||
@@ -19,7 +18,7 @@ use crate::directory::ReadOnlySource;
|
||||
use crate::directory::WatchCallback;
|
||||
use crate::directory::WatchCallbackList;
|
||||
use crate::directory::WatchHandle;
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use crate::directory::WritePtr;
|
||||
use atomicwrites;
|
||||
use memmap::Mmap;
|
||||
use std::collections::HashMap;
|
||||
@@ -142,28 +141,42 @@ impl MmapCache {
|
||||
}
|
||||
}
|
||||
|
||||
struct WatcherWrapper {
|
||||
struct InnerWatcherWrapper {
|
||||
_watcher: Mutex<notify::RecommendedWatcher>,
|
||||
watcher_router: Arc<WatchCallbackList>,
|
||||
watcher_router: WatchCallbackList,
|
||||
}
|
||||
|
||||
impl InnerWatcherWrapper {
|
||||
pub fn new(path: &Path) -> Result<(Self, Receiver<notify::RawEvent>), notify::Error> {
|
||||
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
|
||||
// We need to initialize the
|
||||
let mut watcher = notify::raw_watcher(tx)?;
|
||||
watcher.watch(path, RecursiveMode::Recursive)?;
|
||||
let inner = InnerWatcherWrapper {
|
||||
_watcher: Mutex::new(watcher),
|
||||
watcher_router: Default::default(),
|
||||
};
|
||||
Ok((inner, watcher_recv))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct WatcherWrapper {
|
||||
inner: Arc<InnerWatcherWrapper>,
|
||||
}
|
||||
|
||||
impl WatcherWrapper {
|
||||
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
|
||||
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
|
||||
// We need to initialize the
|
||||
let watcher = notify::raw_watcher(tx)
|
||||
.and_then(|mut watcher| {
|
||||
watcher.watch(path, RecursiveMode::Recursive)?;
|
||||
Ok(watcher)
|
||||
})
|
||||
.map_err(|err| match err {
|
||||
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
|
||||
_ => {
|
||||
panic!("Unknown error while starting watching directory {:?}", path);
|
||||
}
|
||||
})?;
|
||||
let watcher_router: Arc<WatchCallbackList> = Default::default();
|
||||
let watcher_router_clone = watcher_router.clone();
|
||||
let (inner, watcher_recv) = InnerWatcherWrapper::new(path).map_err(|err| match err {
|
||||
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
|
||||
_ => {
|
||||
panic!("Unknown error while starting watching directory {:?}", path);
|
||||
}
|
||||
})?;
|
||||
let watcher_wrapper = WatcherWrapper {
|
||||
inner: Arc::new(inner),
|
||||
};
|
||||
let watcher_wrapper_clone = watcher_wrapper.clone();
|
||||
thread::Builder::new()
|
||||
.name("meta-file-watch-thread".to_string())
|
||||
.spawn(move || {
|
||||
@@ -174,7 +187,7 @@ impl WatcherWrapper {
|
||||
// We might want to be more accurate than this at one point.
|
||||
if let Some(filename) = changed_path.file_name() {
|
||||
if filename == *META_FILEPATH {
|
||||
watcher_router_clone.broadcast();
|
||||
watcher_wrapper_clone.inner.watcher_router.broadcast();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -187,15 +200,13 @@ impl WatcherWrapper {
|
||||
}
|
||||
}
|
||||
}
|
||||
})?;
|
||||
Ok(WatcherWrapper {
|
||||
_watcher: Mutex::new(watcher),
|
||||
watcher_router,
|
||||
})
|
||||
})
|
||||
.expect("Failed to spawn thread to watch meta.json");
|
||||
Ok(watcher_wrapper)
|
||||
}
|
||||
|
||||
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
|
||||
self.watcher_router.subscribe(watch_callback)
|
||||
self.inner.watcher_router.subscribe(watch_callback)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -254,7 +265,7 @@ impl MmapDirectoryInner {
|
||||
}
|
||||
}
|
||||
if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() {
|
||||
Ok(watch_wrapper.watch(watch_callback))
|
||||
return Ok(watch_wrapper.watch(watch_callback));
|
||||
} else {
|
||||
unreachable!("At this point, watch wrapper is supposed to be initialized");
|
||||
}
|
||||
@@ -401,12 +412,6 @@ impl Seek for SafeFileWriter {
|
||||
}
|
||||
}
|
||||
|
||||
impl TerminatingWrite for SafeFileWriter {
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl Directory for MmapDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
debug!("Open Read {:?}", path);
|
||||
|
||||
@@ -9,7 +9,6 @@ mod mmap_directory;
|
||||
|
||||
mod directory;
|
||||
mod directory_lock;
|
||||
mod footer;
|
||||
mod managed_directory;
|
||||
mod ram_directory;
|
||||
mod read_only_source;
|
||||
@@ -25,49 +24,18 @@ pub use self::ram_directory::RAMDirectory;
|
||||
pub use self::read_only_source::ReadOnlySource;
|
||||
pub(crate) use self::watch_event_router::WatchCallbackList;
|
||||
pub use self::watch_event_router::{WatchCallback, WatchHandle};
|
||||
use std::io::{self, BufWriter, Write};
|
||||
use std::io::{BufWriter, Write};
|
||||
|
||||
#[cfg(feature = "mmap")]
|
||||
pub use self::mmap_directory::MmapDirectory;
|
||||
|
||||
pub use self::managed_directory::ManagedDirectory;
|
||||
|
||||
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
|
||||
pub struct AntiCallToken(());
|
||||
|
||||
/// Trait used to indicate when no more write need to be done on a writer
|
||||
pub trait TerminatingWrite: Write {
|
||||
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
|
||||
fn terminate(mut self) -> io::Result<()>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
self.terminate_ref(AntiCallToken(()))
|
||||
}
|
||||
|
||||
/// You should implement this function to define custom behavior.
|
||||
/// This function should flush any buffer it may hold.
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
|
||||
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
||||
self.as_mut().terminate_ref(token)
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
|
||||
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()?;
|
||||
self.get_mut().terminate_ref(a)
|
||||
}
|
||||
}
|
||||
|
||||
/// Write object for Directory.
|
||||
///
|
||||
/// `WritePtr` are required to implement both Write
|
||||
/// and Seek.
|
||||
pub type WritePtr = BufWriter<Box<dyn TerminatingWrite>>;
|
||||
pub type WritePtr = BufWriter<Box<dyn Write>>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::AntiCallToken;
|
||||
use crate::directory::WatchCallbackList;
|
||||
use crate::directory::WritePtr;
|
||||
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use fail::fail_point;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
@@ -72,12 +71,6 @@ impl Write for VecWriter {
|
||||
}
|
||||
}
|
||||
|
||||
impl TerminatingWrite for VecWriter {
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct InnerDirectory {
|
||||
fs: HashMap<PathBuf, ReadOnlySource>,
|
||||
|
||||
@@ -127,7 +127,7 @@ fn test_watch(directory: &mut dyn Directory) {
|
||||
assert!(directory
|
||||
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
|
||||
.is_ok());
|
||||
for _ in 0..1_000 {
|
||||
for _ in 0..100 {
|
||||
if counter.load(Ordering::SeqCst) > i {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -152,4 +152,5 @@ mod tests {
|
||||
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -429,6 +429,7 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
@@ -436,9 +437,9 @@ mod bench {
|
||||
use super::tests::FIELD;
|
||||
use super::tests::{generate_permutation, SCHEMA};
|
||||
use super::*;
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use common::CompositeFile;
|
||||
use directory::{Directory, RAMDirectory, WritePtr};
|
||||
use fastfield::FastFieldReader;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use test::{self, Bencher};
|
||||
@@ -536,4 +537,5 @@ mod bench {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ use crate::postings::UnorderedTermId;
|
||||
use crate::schema::{Document, Field};
|
||||
use crate::termdict::TermOrdinal;
|
||||
use crate::DocId;
|
||||
use fnv::FnvHashMap;
|
||||
use itertools::Itertools;
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
|
||||
/// Writer for multi-valued (as in, more than one value per document)
|
||||
@@ -102,7 +102,7 @@ impl MultiValueIntFastFieldWriter {
|
||||
pub fn serialize(
|
||||
&self,
|
||||
serializer: &mut FastFieldSerializer,
|
||||
mapping_opt: Option<&FnvHashMap<UnorderedTermId, TermOrdinal>>,
|
||||
mapping_opt: Option<&HashMap<UnorderedTermId, TermOrdinal>>,
|
||||
) -> io::Result<()> {
|
||||
{
|
||||
// writing the offset index
|
||||
|
||||
@@ -6,7 +6,6 @@ use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
|
||||
use crate::postings::UnorderedTermId;
|
||||
use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
|
||||
use crate::termdict::TermOrdinal;
|
||||
use fnv::FnvHashMap;
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
|
||||
@@ -117,7 +116,7 @@ impl FastFieldsWriter {
|
||||
pub fn serialize(
|
||||
&self,
|
||||
serializer: &mut FastFieldSerializer,
|
||||
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>,
|
||||
mapping: &HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>,
|
||||
) -> io::Result<()> {
|
||||
for field_writer in &self.single_value_writers {
|
||||
field_writer.serialize(serializer)?;
|
||||
|
||||
@@ -8,7 +8,6 @@ use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::directory::DirectoryLock;
|
||||
use crate::directory::TerminatingWrite;
|
||||
use crate::docset::DocSet;
|
||||
use crate::error::TantivyError;
|
||||
use crate::fastfield::write_delete_bitset;
|
||||
@@ -169,7 +168,6 @@ pub(crate) fn advance_deletes(
|
||||
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
|
||||
write_delete_bitset(&delete_bitset, &mut delete_file)?;
|
||||
delete_file.terminate()?;
|
||||
}
|
||||
}
|
||||
segment_entry.set_meta(segment.meta().clone());
|
||||
@@ -211,7 +209,10 @@ fn index_documents(
|
||||
assert!(num_docs > 0);
|
||||
|
||||
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
|
||||
let segment_meta = segment.index().new_segment_meta(segment_id, num_docs);
|
||||
let segment_meta = segment
|
||||
.index()
|
||||
.inventory()
|
||||
.new_segment_meta(segment_id, num_docs);
|
||||
|
||||
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
||||
|
||||
@@ -449,10 +450,12 @@ impl IndexWriter {
|
||||
/// by clearing and resubmitting necessary documents
|
||||
///
|
||||
/// ```rust
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::*;
|
||||
/// use tantivy::{doc, Index};
|
||||
/// use tantivy::Index;
|
||||
///
|
||||
/// fn main() -> tantivy::Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
@@ -1179,4 +1182,5 @@ mod tests {
|
||||
assert!(clear_again.is_ok());
|
||||
assert!(commit_again.is_ok());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -134,4 +134,5 @@ mod tests {
|
||||
}
|
||||
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -126,7 +126,9 @@ fn perform_merge(
|
||||
|
||||
let num_docs = merger.write(segment_serializer)?;
|
||||
|
||||
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
|
||||
let segment_meta = index
|
||||
.inventory()
|
||||
.new_segment_meta(merged_segment.id(), num_docs);
|
||||
|
||||
let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None);
|
||||
Ok(after_merge_segment_entry)
|
||||
@@ -280,7 +282,7 @@ impl SegmentUpdater {
|
||||
fn list_files(&self) -> HashSet<PathBuf> {
|
||||
let mut files = HashSet::new();
|
||||
files.insert(META_FILEPATH.to_path_buf());
|
||||
for segment_meta in self.0.index.list_all_segment_metas() {
|
||||
for segment_meta in self.0.index.inventory().all() {
|
||||
files.extend(segment_meta.list_files());
|
||||
}
|
||||
files
|
||||
|
||||
@@ -296,4 +296,5 @@ mod tests {
|
||||
assert_eq!(initial_table_size(10_000_000).unwrap(), 17);
|
||||
assert_eq!(initial_table_size(1_000_000_000).unwrap(), 19);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
14
src/lib.rs
14
src/lib.rs
@@ -1,4 +1,5 @@
|
||||
#![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")]
|
||||
#![recursion_limit = "100"]
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
#![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))]
|
||||
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
|
||||
@@ -10,12 +11,21 @@
|
||||
//! Think `Lucene`, but in Rust.
|
||||
//!
|
||||
//! ```rust
|
||||
|
||||
//! # extern crate tempfile;
|
||||
//! #
|
||||
//! #[macro_use]
|
||||
//! extern crate tantivy;
|
||||
//!
|
||||
//! // ...
|
||||
//!
|
||||
//! # use std::path::Path;
|
||||
//! # use tempfile::TempDir;
|
||||
//! # use tantivy::Index;
|
||||
//! # use tantivy::schema::*;
|
||||
//! # use tantivy::{Score, DocAddress};
|
||||
//! # use tantivy::collector::TopDocs;
|
||||
//! # use tantivy::query::QueryParser;
|
||||
//! # use tantivy::schema::*;
|
||||
//! # use tantivy::{doc, DocAddress, Index, Score};
|
||||
//! #
|
||||
//! # fn main() {
|
||||
//! # // Let's create a temporary directory for the
|
||||
|
||||
@@ -22,9 +22,11 @@
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// ```
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
///
|
||||
/// use tantivy::schema::{Schema, TEXT, FAST};
|
||||
/// use tantivy::doc;
|
||||
///
|
||||
/// //...
|
||||
///
|
||||
|
||||
@@ -274,15 +274,13 @@ pub mod tests {
|
||||
mod bench {
|
||||
|
||||
use super::*;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::Rng;
|
||||
use rand::SeedableRng;
|
||||
use rand::{Rng, XorShiftRng};
|
||||
use test::Bencher;
|
||||
|
||||
fn generate_array_with_seed(n: usize, ratio: f64, seed_val: u8) -> Vec<u32> {
|
||||
let mut seed: [u8; 32] = [0; 32];
|
||||
seed[31] = seed_val;
|
||||
let mut rng = StdRng::from_seed(seed);
|
||||
let seed: &[u8; 16] = &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, seed_val];
|
||||
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
|
||||
(0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect()
|
||||
}
|
||||
|
||||
|
||||
@@ -622,23 +622,23 @@ pub mod tests {
|
||||
assert!(!postings_unopt.advance());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use super::tests::*;
|
||||
use crate::docset::SkipResult;
|
||||
use crate::query::Intersection;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::tests;
|
||||
use crate::DocSet;
|
||||
use docset::SkipResult;
|
||||
use query::Intersection;
|
||||
use schema::IndexRecordOption;
|
||||
use test::{self, Bencher};
|
||||
use tests;
|
||||
use DocSet;
|
||||
|
||||
#[bench]
|
||||
fn bench_segment_postings(b: &mut Bencher) {
|
||||
let reader = INDEX.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let searcher = INDEX.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
b.iter(|| {
|
||||
@@ -652,8 +652,7 @@ mod bench {
|
||||
|
||||
#[bench]
|
||||
fn bench_segment_intersection(b: &mut Bencher) {
|
||||
let reader = INDEX.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let searcher = INDEX.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
b.iter(|| {
|
||||
let segment_postings_a = segment_reader
|
||||
@@ -683,8 +682,7 @@ mod bench {
|
||||
}
|
||||
|
||||
fn bench_skip_next(p: f64, b: &mut Bencher) {
|
||||
let reader = INDEX.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let searcher = INDEX.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let docs = tests::sample(segment_reader.num_docs(), p);
|
||||
|
||||
@@ -739,8 +737,7 @@ mod bench {
|
||||
|
||||
#[bench]
|
||||
fn bench_iterate_segment_postings(b: &mut Bencher) {
|
||||
let reader = INDEX.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let searcher = INDEX.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
b.iter(|| {
|
||||
let n: u32 = test::black_box(17);
|
||||
|
||||
@@ -12,7 +12,6 @@ use crate::tokenizer::TokenStream;
|
||||
use crate::tokenizer::{Token, MAX_TOKEN_LEN};
|
||||
use crate::DocId;
|
||||
use crate::Result;
|
||||
use fnv::FnvHashMap;
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
@@ -128,12 +127,12 @@ impl MultiFieldPostingsWriter {
|
||||
pub fn serialize(
|
||||
&self,
|
||||
serializer: &mut InvertedIndexSerializer,
|
||||
) -> Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
|
||||
) -> Result<HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>> {
|
||||
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
|
||||
self.term_index.iter().collect();
|
||||
term_offsets.sort_unstable_by_key(|&(k, _, _)| k);
|
||||
|
||||
let mut unordered_term_mappings: HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>> =
|
||||
let mut unordered_term_mappings: HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>> =
|
||||
HashMap::new();
|
||||
|
||||
let field_offsets = make_field_partition(&term_offsets);
|
||||
@@ -148,7 +147,7 @@ impl MultiFieldPostingsWriter {
|
||||
let unordered_term_ids = term_offsets[start..stop]
|
||||
.iter()
|
||||
.map(|&(_, _, bucket)| bucket);
|
||||
let mapping: FnvHashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
|
||||
let mapping: HashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
|
||||
.enumerate()
|
||||
.map(|(term_ord, unord_term_id)| {
|
||||
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal)
|
||||
|
||||
@@ -141,7 +141,10 @@ impl<'a> FieldSerializer<'a> {
|
||||
FieldType::Str(ref text_options) => {
|
||||
if let Some(text_indexing_options) = text_options.get_indexing_options() {
|
||||
let index_option = text_indexing_options.index_option();
|
||||
(index_option.has_freq(), index_option.has_positions())
|
||||
(
|
||||
index_option.is_termfreq_enabled(),
|
||||
index_option.is_position_enabled(),
|
||||
)
|
||||
} else {
|
||||
(false, false)
|
||||
}
|
||||
|
||||
@@ -310,7 +310,6 @@ mod bench {
|
||||
use super::super::MemoryArena;
|
||||
use super::ExpUnrolledLinkedList;
|
||||
use byteorder::{NativeEndian, WriteBytesExt};
|
||||
use std::iter;
|
||||
use test::Bencher;
|
||||
|
||||
const NUM_STACK: usize = 10_000;
|
||||
@@ -336,10 +335,11 @@ mod bench {
|
||||
fn bench_push_stack(bench: &mut Bencher) {
|
||||
bench.iter(|| {
|
||||
let mut heap = MemoryArena::new();
|
||||
let mut stacks: Vec<ExpUnrolledLinkedList> =
|
||||
iter::repeat_with(ExpUnrolledLinkedList::new)
|
||||
.take(NUM_STACK)
|
||||
.collect();
|
||||
let mut stacks = Vec::with_capacity(100);
|
||||
for _ in 0..NUM_STACK {
|
||||
let mut stack = ExpUnrolledLinkedList::new();
|
||||
stacks.push(stack);
|
||||
}
|
||||
for s in 0..NUM_STACK {
|
||||
for i in 0u32..STACK_SIZE {
|
||||
let t = s * 392017 % NUM_STACK;
|
||||
|
||||
@@ -130,4 +130,5 @@ mod tests {
|
||||
assert!(!scorer.advance());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -8,30 +8,30 @@ use crate::termdict::{TermDictionary, TermStreamer};
|
||||
use crate::DocId;
|
||||
use crate::TantivyError;
|
||||
use crate::{Result, SkipResult};
|
||||
use std::sync::Arc;
|
||||
use tantivy_fst::Automaton;
|
||||
|
||||
/// A weight struct for Fuzzy Term and Regex Queries
|
||||
pub struct AutomatonWeight<A> {
|
||||
pub struct AutomatonWeight<A>
|
||||
where
|
||||
A: Automaton + Send + Sync + 'static,
|
||||
A::State: Clone + Default + Sized,
|
||||
{
|
||||
field: Field,
|
||||
automaton: Arc<A>,
|
||||
automaton: A,
|
||||
}
|
||||
|
||||
impl<A> AutomatonWeight<A>
|
||||
where
|
||||
A: Automaton + Send + Sync + 'static,
|
||||
A::State: Clone + Default + Sized,
|
||||
{
|
||||
/// Create a new AutomationWeight
|
||||
pub fn new<IntoArcA: Into<Arc<A>>>(field: Field, automaton: IntoArcA) -> AutomatonWeight<A> {
|
||||
AutomatonWeight {
|
||||
field,
|
||||
automaton: automaton.into(),
|
||||
}
|
||||
pub fn new(field: Field, automaton: A) -> AutomatonWeight<A> {
|
||||
AutomatonWeight { field, automaton }
|
||||
}
|
||||
|
||||
fn automaton_stream<'a>(&'a self, term_dict: &'a TermDictionary) -> TermStreamer<'a, &'a A> {
|
||||
let automaton: &A = &*self.automaton;
|
||||
let term_stream_builder = term_dict.search(automaton);
|
||||
let term_stream_builder = term_dict.search(&self.automaton);
|
||||
term_stream_builder.into_stream()
|
||||
}
|
||||
}
|
||||
@@ -39,6 +39,7 @@ where
|
||||
impl<A> Weight for AutomatonWeight<A>
|
||||
where
|
||||
A: Automaton + Send + Sync + 'static,
|
||||
A::State: Clone + Default + Sized,
|
||||
{
|
||||
fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
|
||||
let max_doc = reader.max_doc();
|
||||
|
||||
@@ -216,6 +216,7 @@ mod tests {
|
||||
assert!(!docset.advance());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
@@ -223,12 +224,13 @@ mod bench {
|
||||
|
||||
use super::BitSet;
|
||||
use super::BitSetDocSet;
|
||||
use crate::test;
|
||||
use crate::tests;
|
||||
use crate::DocSet;
|
||||
use test;
|
||||
use tests;
|
||||
use DocSet;
|
||||
|
||||
#[bench]
|
||||
fn bench_bitset_1pct_insert(b: &mut test::Bencher) {
|
||||
use tests;
|
||||
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
|
||||
b.iter(|| {
|
||||
let mut bitset = BitSet::with_max_value(1_000_000);
|
||||
@@ -240,6 +242,7 @@ mod bench {
|
||||
|
||||
#[bench]
|
||||
fn bench_bitset_1pct_clone(b: &mut test::Bencher) {
|
||||
use tests;
|
||||
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
|
||||
let mut bitset = BitSet::with_max_value(1_000_000);
|
||||
for el in els {
|
||||
|
||||
@@ -137,4 +137,5 @@ mod tests {
|
||||
fn test_idf() {
|
||||
assert_nearly_equals(idf(1, 2), 0.6931472);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -247,7 +247,9 @@ mod tests {
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, text]);
|
||||
let query = query_parser.parse_query("Оксана Лифенко").unwrap();
|
||||
let query = query_parser
|
||||
.parse_query("Оксана Лифенко")
|
||||
.unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
scorer.advance();
|
||||
|
||||
@@ -175,4 +175,5 @@ mod tests {
|
||||
sample_skip,
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -28,10 +28,12 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
|
||||
/// containing a specific term that is within
|
||||
/// Levenshtein distance
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{Index, Result, Term};
|
||||
/// use tantivy::collector::{Count, TopDocs};
|
||||
/// use tantivy::query::FuzzyTermQuery;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, Index, Result, Term};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
|
||||
@@ -45,7 +45,7 @@ pub fn intersect_scorers(mut scorers: Vec<Box<dyn Scorer>>) -> Box<dyn Scorer> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a `DocSet` that iterate through the intersection of two or more `DocSet`s.
|
||||
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
|
||||
pub struct Intersection<TDocSet: DocSet, TOtherDocSet: DocSet = Box<dyn Scorer>> {
|
||||
left: TDocSet,
|
||||
right: TDocSet,
|
||||
|
||||
@@ -5,7 +5,7 @@ use Score;
|
||||
use SkipResult;
|
||||
|
||||
|
||||
/// Creates a `DocSet` that iterate through the intersection of two `DocSet`s.
|
||||
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
|
||||
pub struct IntersectionTwoTerms<TDocSet> {
|
||||
left: TDocSet,
|
||||
right: TDocSet
|
||||
|
||||
@@ -12,6 +12,7 @@ mod exclude;
|
||||
mod explanation;
|
||||
mod fuzzy_query;
|
||||
mod intersection;
|
||||
mod occur;
|
||||
mod phrase_query;
|
||||
mod query;
|
||||
mod query_parser;
|
||||
@@ -42,6 +43,7 @@ pub use self::exclude::Exclude;
|
||||
pub use self::explanation::Explanation;
|
||||
pub use self::fuzzy_query::FuzzyTermQuery;
|
||||
pub use self::intersection::intersect_scorers;
|
||||
pub use self::occur::Occur;
|
||||
pub use self::phrase_query::PhraseQuery;
|
||||
pub use self::query::Query;
|
||||
pub use self::query_parser::QueryParser;
|
||||
@@ -53,7 +55,6 @@ pub use self::scorer::ConstScorer;
|
||||
pub use self::scorer::Scorer;
|
||||
pub use self::term_query::TermQuery;
|
||||
pub use self::weight::Weight;
|
||||
pub use tantivy_query_grammar::Occur;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
use std::fmt;
|
||||
use std::fmt::Write;
|
||||
|
||||
/// Defines whether a term in a query must be present,
|
||||
/// should be present or must not be present.
|
||||
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
|
||||
@@ -21,38 +18,32 @@ impl Occur {
|
||||
/// - `Should` => '?',
|
||||
/// - `Must` => '+'
|
||||
/// - `Not` => '-'
|
||||
fn to_char(self) -> char {
|
||||
pub fn to_char(self) -> char {
|
||||
match self {
|
||||
Occur::Should => '?',
|
||||
Occur::Must => '+',
|
||||
Occur::MustNot => '-',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compose two occur values.
|
||||
pub fn compose(left: Occur, right: Occur) -> Occur {
|
||||
match left {
|
||||
Occur::Should => right,
|
||||
Occur::Must => {
|
||||
if right == Occur::MustNot {
|
||||
Occur::MustNot
|
||||
} else {
|
||||
Occur::Must
|
||||
}
|
||||
/// Compose two occur values.
|
||||
pub fn compose_occur(left: Occur, right: Occur) -> Occur {
|
||||
match left {
|
||||
Occur::Should => right,
|
||||
Occur::Must => {
|
||||
if right == Occur::MustNot {
|
||||
Occur::MustNot
|
||||
} else {
|
||||
Occur::Must
|
||||
}
|
||||
Occur::MustNot => {
|
||||
if right == Occur::MustNot {
|
||||
Occur::Must
|
||||
} else {
|
||||
Occur::MustNot
|
||||
}
|
||||
}
|
||||
Occur::MustNot => {
|
||||
if right == Occur::MustNot {
|
||||
Occur::Must
|
||||
} else {
|
||||
Occur::MustNot
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Occur {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.write_char(self.to_char())
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
mod query_grammar;
|
||||
mod query_parser;
|
||||
mod user_input_ast;
|
||||
|
||||
pub mod logical_ast;
|
||||
pub use self::query_parser::QueryParser;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use super::user_input_ast::*;
|
||||
use crate::Occur;
|
||||
use crate::query::occur::Occur;
|
||||
use crate::query::query_parser::user_input_ast::UserInputBound;
|
||||
use combine::char::*;
|
||||
use combine::error::StreamError;
|
||||
use combine::stream::StreamErrorFor;
|
||||
@@ -1,5 +1,9 @@
|
||||
use super::logical_ast::*;
|
||||
use super::query_grammar::parse_to_ast;
|
||||
use super::user_input_ast::*;
|
||||
use crate::core::Index;
|
||||
use crate::query::occur::compose_occur;
|
||||
use crate::query::query_parser::logical_ast::LogicalAST;
|
||||
use crate::query::AllQuery;
|
||||
use crate::query::BooleanQuery;
|
||||
use crate::query::EmptyQuery;
|
||||
@@ -12,11 +16,11 @@ use crate::schema::IndexRecordOption;
|
||||
use crate::schema::{Field, Schema};
|
||||
use crate::schema::{FieldType, Term};
|
||||
use crate::tokenizer::TokenizerManager;
|
||||
use combine::Parser;
|
||||
use std::borrow::Cow;
|
||||
use std::num::{ParseFloatError, ParseIntError};
|
||||
use std::ops::Bound;
|
||||
use std::str::FromStr;
|
||||
use tantivy_query_grammar::{UserInputAST, UserInputBound, UserInputLeaf};
|
||||
|
||||
/// Possible error that may happen when parsing a query.
|
||||
#[derive(Debug, PartialEq, Eq, Fail)]
|
||||
@@ -218,8 +222,9 @@ impl QueryParser {
|
||||
|
||||
/// Parse the user query into an AST.
|
||||
fn parse_query_to_logical_ast(&self, query: &str) -> Result<LogicalAST, QueryParserError> {
|
||||
let user_input_ast =
|
||||
tantivy_query_grammar::parse_query(query).map_err(|_| QueryParserError::SyntaxError)?;
|
||||
let (user_input_ast, _remaining) = parse_to_ast()
|
||||
.parse(query)
|
||||
.map_err(|_| QueryParserError::SyntaxError)?;
|
||||
self.compute_logical_ast(user_input_ast)
|
||||
}
|
||||
|
||||
@@ -394,7 +399,7 @@ impl QueryParser {
|
||||
let mut logical_sub_queries: Vec<(Occur, LogicalAST)> = Vec::new();
|
||||
for sub_query in sub_queries {
|
||||
let (occur, sub_ast) = self.compute_logical_ast_with_occur(sub_query)?;
|
||||
let new_occur = Occur::compose(default_occur, occur);
|
||||
let new_occur = compose_occur(default_occur, occur);
|
||||
logical_sub_queries.push((new_occur, sub_ast));
|
||||
}
|
||||
Ok((Occur::Should, LogicalAST::Clause(logical_sub_queries)))
|
||||
@@ -402,7 +407,7 @@ impl QueryParser {
|
||||
UserInputAST::Unary(left_occur, subquery) => {
|
||||
let (right_occur, logical_sub_queries) =
|
||||
self.compute_logical_ast_with_occur(*subquery)?;
|
||||
Ok((Occur::compose(left_occur, right_occur), logical_sub_queries))
|
||||
Ok((compose_occur(left_occur, right_occur), logical_sub_queries))
|
||||
}
|
||||
UserInputAST::Leaf(leaf) => {
|
||||
let result_ast = self.compute_logical_ast_from_leaf(*leaf)?;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::fmt;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
|
||||
use crate::Occur;
|
||||
use crate::query::Occur;
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum UserInputLeaf {
|
||||
@@ -151,7 +151,7 @@ impl fmt::Debug for UserInputAST {
|
||||
Ok(())
|
||||
}
|
||||
UserInputAST::Unary(ref occur, ref subquery) => {
|
||||
write!(formatter, "{}({:?})", occur, subquery)
|
||||
write!(formatter, "{}({:?})", occur.to_char(), subquery)
|
||||
}
|
||||
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
||||
}
|
||||
@@ -38,10 +38,14 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # use tantivy::collector::Count;
|
||||
/// # use tantivy::query::RangeQuery;
|
||||
///
|
||||
/// # #[macro_use]
|
||||
/// # extern crate tantivy;
|
||||
/// # use tantivy::Index;
|
||||
/// # use tantivy::schema::{Schema, INDEXED};
|
||||
/// # use tantivy::{doc, Index, Result};
|
||||
/// # use tantivy::collector::Count;
|
||||
/// # use tantivy::Result;
|
||||
/// # use tantivy::query::RangeQuery;
|
||||
/// #
|
||||
/// # fn run() -> Result<()> {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
@@ -479,4 +483,5 @@ mod tests {
|
||||
91
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -4,18 +4,22 @@ use crate::schema::Field;
|
||||
use crate::Result;
|
||||
use crate::Searcher;
|
||||
use std::clone::Clone;
|
||||
use std::sync::Arc;
|
||||
use tantivy_fst::Regex;
|
||||
|
||||
/// A Regex Query matches all of the documents
|
||||
// A Regex Query matches all of the documents
|
||||
/// containing a specific term that matches
|
||||
/// a regex pattern.
|
||||
/// a regex pattern
|
||||
/// A Fuzzy Query matches all of the documents
|
||||
/// containing a specific term that is within
|
||||
/// Levenshtein distance
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{Index, Result, Term};
|
||||
/// use tantivy::collector::Count;
|
||||
/// use tantivy::query::RegexQuery;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, Index, Result, Term};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
@@ -44,7 +48,7 @@ use tantivy_fst::Regex;
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// let term = Term::from_field_text(title, "Diary");
|
||||
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
|
||||
/// let query = RegexQuery::new("d[ai]{2}ry".to_string(), title);
|
||||
/// let count = searcher.search(&query, &Count)?;
|
||||
/// assert_eq!(count, 3);
|
||||
/// Ok(())
|
||||
@@ -52,34 +56,30 @@ use tantivy_fst::Regex;
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RegexQuery {
|
||||
regex: Arc<Regex>,
|
||||
regex_pattern: String,
|
||||
field: Field,
|
||||
}
|
||||
|
||||
impl RegexQuery {
|
||||
/// Creates a new RegexQuery from a given pattern
|
||||
pub fn from_pattern(regex_pattern: &str, field: Field) -> Result<Self> {
|
||||
let regex = Regex::new(®ex_pattern)
|
||||
.map_err(|_| TantivyError::InvalidArgument(regex_pattern.to_string()))?;
|
||||
Ok(RegexQuery::from_regex(regex, field))
|
||||
}
|
||||
|
||||
/// Creates a new RegexQuery from a fully built Regex
|
||||
pub fn from_regex<T: Into<Arc<Regex>>>(regex: T, field: Field) -> Self {
|
||||
/// Creates a new Fuzzy Query
|
||||
pub fn new(regex_pattern: String, field: Field) -> RegexQuery {
|
||||
RegexQuery {
|
||||
regex: regex.into(),
|
||||
regex_pattern,
|
||||
field,
|
||||
}
|
||||
}
|
||||
|
||||
fn specialized_weight(&self) -> AutomatonWeight<Regex> {
|
||||
AutomatonWeight::new(self.field, self.regex.clone())
|
||||
fn specialized_weight(&self) -> Result<AutomatonWeight<Regex>> {
|
||||
let automaton = Regex::new(&self.regex_pattern)
|
||||
.map_err(|_| TantivyError::InvalidArgument(self.regex_pattern.clone()))?;
|
||||
|
||||
Ok(AutomatonWeight::new(self.field, automaton))
|
||||
}
|
||||
}
|
||||
|
||||
impl Query for RegexQuery {
|
||||
fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<dyn Weight>> {
|
||||
Ok(Box::new(self.specialized_weight()))
|
||||
Ok(Box::new(self.specialized_weight()?))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,14 +87,13 @@ impl Query for RegexQuery {
|
||||
mod test {
|
||||
use super::RegexQuery;
|
||||
use crate::collector::TopDocs;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::TEXT;
|
||||
use crate::schema::{Field, Schema};
|
||||
use crate::tests::assert_nearly_equals;
|
||||
use crate::{Index, IndexReader};
|
||||
use std::sync::Arc;
|
||||
use tantivy_fst::Regex;
|
||||
use crate::Index;
|
||||
|
||||
fn build_test_index() -> (IndexReader, Field) {
|
||||
#[test]
|
||||
pub fn test_regex_query() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let country_field = schema_builder.add_text_field("country", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
@@ -110,65 +109,20 @@ mod test {
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
(reader, country_field)
|
||||
}
|
||||
|
||||
fn verify_regex_query(
|
||||
query_matching_one: RegexQuery,
|
||||
query_matching_zero: RegexQuery,
|
||||
reader: IndexReader,
|
||||
) {
|
||||
let searcher = reader.searcher();
|
||||
{
|
||||
let regex_query = RegexQuery::new("jap[ao]n".to_string(), country_field);
|
||||
let scored_docs = searcher
|
||||
.search(&query_matching_one, &TopDocs::with_limit(2))
|
||||
.search(®ex_query, &TopDocs::with_limit(2))
|
||||
.unwrap();
|
||||
assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
|
||||
let (score, _) = scored_docs[0];
|
||||
assert_nearly_equals(1f32, score);
|
||||
}
|
||||
let regex_query = RegexQuery::new("jap[A-Z]n".to_string(), country_field);
|
||||
let top_docs = searcher
|
||||
.search(&query_matching_zero, &TopDocs::with_limit(2))
|
||||
.search(®ex_query, &TopDocs::with_limit(2))
|
||||
.unwrap();
|
||||
assert!(top_docs.is_empty(), "Expected ZERO document");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_regex_query() {
|
||||
let (reader, field) = build_test_index();
|
||||
|
||||
let matching_one = RegexQuery::from_pattern("jap[ao]n", field).unwrap();
|
||||
let matching_zero = RegexQuery::from_pattern("jap[A-Z]n", field).unwrap();
|
||||
|
||||
verify_regex_query(matching_one, matching_zero, reader);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_construct_from_regex() {
|
||||
let (reader, field) = build_test_index();
|
||||
|
||||
let matching_one = RegexQuery::from_regex(Regex::new("jap[ao]n").unwrap(), field);
|
||||
let matching_zero = RegexQuery::from_regex(Regex::new("jap[A-Z]n").unwrap(), field);
|
||||
|
||||
verify_regex_query(matching_one, matching_zero, reader);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_construct_from_reused_regex() {
|
||||
let r1 = Arc::new(Regex::new("jap[ao]n").unwrap());
|
||||
let r2 = Arc::new(Regex::new("jap[A-Z]n").unwrap());
|
||||
|
||||
let (reader, field) = build_test_index();
|
||||
|
||||
let matching_one = RegexQuery::from_regex(r1.clone(), field);
|
||||
let matching_zero = RegexQuery::from_regex(r2.clone(), field);
|
||||
|
||||
verify_regex_query(matching_one, matching_zero, reader.clone());
|
||||
|
||||
let matching_one = RegexQuery::from_regex(r1.clone(), field);
|
||||
let matching_zero = RegexQuery::from_regex(r2.clone(), field);
|
||||
|
||||
verify_regex_query(matching_one, matching_zero, reader.clone());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -190,4 +190,5 @@ mod tests {
|
||||
skip_docs,
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -20,10 +20,12 @@ use std::fmt;
|
||||
/// * `field norm` - number of tokens in the field.
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
|
||||
/// use tantivy::{Index, Result, Term};
|
||||
/// use tantivy::collector::{Count, TopDocs};
|
||||
/// use tantivy::query::TermQuery;
|
||||
/// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
|
||||
/// use tantivy::{doc, Index, Result, Term};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
|
||||
@@ -28,7 +28,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a `DocSet` that iterate through the union of two or more `DocSet`s.
|
||||
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
|
||||
pub struct Union<TScorer, TScoreCombiner = DoNothingCombiner> {
|
||||
docsets: Vec<TScorer>,
|
||||
bitsets: Box<[TinySet; HORIZON_NUM_TINYBITSETS]>,
|
||||
@@ -409,17 +409,20 @@ mod tests {
|
||||
vec![1, 2, 3, 7, 8, 9, 99, 100, 101, 500, 20000],
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use crate::query::score_combiner::DoNothingCombiner;
|
||||
use crate::query::{ConstScorer, Union, VecDocSet};
|
||||
use crate::tests;
|
||||
use crate::DocId;
|
||||
use crate::DocSet;
|
||||
use query::score_combiner::DoNothingCombiner;
|
||||
use query::ConstScorer;
|
||||
use query::Union;
|
||||
use query::VecDocSet;
|
||||
use test::Bencher;
|
||||
use tests;
|
||||
use DocId;
|
||||
use DocSet;
|
||||
|
||||
#[bench]
|
||||
fn bench_union_3_high(bench: &mut Bencher) {
|
||||
|
||||
@@ -82,4 +82,5 @@ pub mod tests {
|
||||
}
|
||||
assert_eq!(postings.fill_buffer(&mut buffer[..]), 9);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -178,4 +178,5 @@ mod tests {
|
||||
doc.add_text(text_field, "My title");
|
||||
assert_eq!(doc.field_values().len(), 1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -29,6 +29,22 @@ pub enum IndexRecordOption {
|
||||
}
|
||||
|
||||
impl IndexRecordOption {
|
||||
/// Returns true iff the term frequency will be encoded.
|
||||
pub fn is_termfreq_enabled(self) -> bool {
|
||||
match self {
|
||||
IndexRecordOption::WithFreqsAndPositions | IndexRecordOption::WithFreqs => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true iff the term positions within the document are stored as well.
|
||||
pub fn is_position_enabled(self) -> bool {
|
||||
match self {
|
||||
IndexRecordOption::WithFreqsAndPositions => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true iff this option includes encoding
|
||||
/// term frequencies.
|
||||
pub fn has_freq(self) -> bool {
|
||||
|
||||
@@ -174,4 +174,5 @@ mod tests {
|
||||
assert!(!is_valid_field_name("シャボン玉"));
|
||||
assert!(is_valid_field_name("my_text_field"));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -301,26 +301,28 @@ impl Schema {
|
||||
|
||||
let mut doc = Document::default();
|
||||
for (field_name, json_value) in json_obj.iter() {
|
||||
let field = self
|
||||
.get_field(field_name)
|
||||
.ok_or_else(|| DocParsingError::NoSuchFieldInSchema(field_name.clone()))?;
|
||||
let field_entry = self.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
match *json_value {
|
||||
JsonValue::Array(ref json_items) => {
|
||||
for json_item in json_items {
|
||||
let value = field_type
|
||||
.value_from_json(json_item)
|
||||
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
|
||||
doc.add(FieldValue::new(field, value));
|
||||
match self.get_field(field_name) {
|
||||
Some(field) => {
|
||||
let field_entry = self.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
match *json_value {
|
||||
JsonValue::Array(ref json_items) => {
|
||||
for json_item in json_items {
|
||||
let value = field_type.value_from_json(json_item).map_err(|e| {
|
||||
DocParsingError::ValueError(field_name.clone(), e)
|
||||
})?;
|
||||
doc.add(FieldValue::new(field, value));
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
let value = field_type
|
||||
.value_from_json(json_value)
|
||||
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
|
||||
doc.add(FieldValue::new(field, value));
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
let value = field_type
|
||||
.value_from_json(json_value)
|
||||
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
|
||||
doc.add(FieldValue::new(field, value));
|
||||
}
|
||||
None => return Err(DocParsingError::NoSuchFieldInSchema(field_name.clone())),
|
||||
}
|
||||
}
|
||||
Ok(doc)
|
||||
|
||||
@@ -22,10 +22,10 @@ impl Term {
|
||||
/// Builds a term given a field, and a i64-value
|
||||
///
|
||||
/// Assuming the term has a field id of 1, and a i64 value of 3234,
|
||||
/// the Term will have 12 bytes.
|
||||
/// the Term will have 8 bytes.
|
||||
///
|
||||
/// The first four byte are dedicated to storing the field id as a u64.
|
||||
/// The 8 following bytes are encoding the u64 value.
|
||||
/// The 4 following bytes are encoding the u64 value.
|
||||
pub fn from_field_i64(field: Field, val: i64) -> Term {
|
||||
let val_u64: u64 = common::i64_to_u64(val);
|
||||
Term::from_field_u64(field, val_u64)
|
||||
@@ -33,11 +33,11 @@ impl Term {
|
||||
|
||||
/// Builds a term given a field, and a f64-value
|
||||
///
|
||||
/// Assuming the term has a field id of 1, and a f64 value of 1.5,
|
||||
/// the Term will have 12 bytes.
|
||||
/// Assuming the term has a field id of 1, and a u64 value of 3234,
|
||||
/// the Term will have 8 bytes. <= this is wrong
|
||||
///
|
||||
/// The first four byte are dedicated to storing the field id as a u64.
|
||||
/// The 8 following bytes are encoding the f64 as a u64 value.
|
||||
/// The 4 following bytes are encoding the u64 value.
|
||||
pub fn from_field_f64(field: Field, val: f64) -> Term {
|
||||
let val_u64: u64 = common::f64_to_u64(val);
|
||||
Term::from_field_u64(field, val_u64)
|
||||
@@ -46,10 +46,10 @@ impl Term {
|
||||
/// Builds a term given a field, and a DateTime value
|
||||
///
|
||||
/// Assuming the term has a field id of 1, and a timestamp i64 value of 3234,
|
||||
/// the Term will have 12 bytes.
|
||||
/// the Term will have 8 bytes.
|
||||
///
|
||||
/// The first four byte are dedicated to storing the field id as a u64.
|
||||
/// The 8 following bytes are encoding the DateTime as i64 timestamp value.
|
||||
/// The 4 following bytes are encoding the DateTime as i64 timestamp value.
|
||||
pub fn from_field_date(field: Field, val: &DateTime) -> Term {
|
||||
let val_timestamp = val.timestamp();
|
||||
Term::from_field_i64(field, val_timestamp)
|
||||
@@ -82,10 +82,10 @@ impl Term {
|
||||
/// Builds a term given a field, and a u64-value
|
||||
///
|
||||
/// Assuming the term has a field id of 1, and a u64 value of 3234,
|
||||
/// the Term will have 12 bytes.
|
||||
/// the Term will have 8 bytes.
|
||||
///
|
||||
/// The first four byte are dedicated to storing the field id as a u64.
|
||||
/// The 8 following bytes are encoding the u64 value.
|
||||
/// The 4 following bytes are encoding the u64 value.
|
||||
pub fn from_field_u64(field: Field, val: u64) -> Term {
|
||||
let mut term = Term(vec![0u8; INT_TERM_LEN]);
|
||||
term.set_field(field);
|
||||
@@ -182,7 +182,7 @@ where
|
||||
///
|
||||
/// # Panics
|
||||
/// ... or returns an invalid value
|
||||
/// if the term is not a `f64` field.
|
||||
/// if the term is not a `i64` field.
|
||||
pub fn get_f64(&self) -> f64 {
|
||||
common::u64_to_f64(BigEndian::read_u64(&self.0.as_ref()[4..]))
|
||||
}
|
||||
|
||||
@@ -213,9 +213,11 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # use tantivy::query::QueryParser;
|
||||
/// # #[macro_use]
|
||||
/// # extern crate tantivy;
|
||||
/// # use tantivy::Index;
|
||||
/// # use tantivy::schema::{Schema, TEXT};
|
||||
/// # use tantivy::{doc, Index};
|
||||
/// # use tantivy::query::QueryParser;
|
||||
/// use tantivy::SnippetGenerator;
|
||||
///
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
|
||||
@@ -120,16 +120,17 @@ pub mod tests {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use super::tests::write_lorem_ipsum_store;
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::RAMDirectory;
|
||||
use crate::store::StoreReader;
|
||||
use directory::Directory;
|
||||
use directory::RAMDirectory;
|
||||
use std::path::Path;
|
||||
use store::StoreReader;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
|
||||
@@ -165,4 +165,5 @@ mod tests {
|
||||
assert_eq!(output.len(), 65);
|
||||
assert_eq!(output[0], 128u8 + 3u8);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ use super::skiplist::SkipListBuilder;
|
||||
use super::StoreReader;
|
||||
use crate::common::CountingWriter;
|
||||
use crate::common::{BinarySerializable, VInt};
|
||||
use crate::directory::TerminatingWrite;
|
||||
use crate::directory::WritePtr;
|
||||
use crate::schema::Document;
|
||||
use crate::DocId;
|
||||
@@ -110,6 +109,6 @@ impl StoreWriter {
|
||||
self.offset_index_writer.write(&mut self.writer)?;
|
||||
header_offset.serialize(&mut self.writer)?;
|
||||
self.doc.serialize(&mut self.writer)?;
|
||||
self.writer.terminate()
|
||||
self.writer.flush()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use super::TermDictionary;
|
||||
use crate::postings::TermInfo;
|
||||
use crate::termdict::TermOrdinal;
|
||||
use tantivy_fst::automaton::AlwaysMatch;
|
||||
use tantivy_fst::map::{Stream, StreamBuilder};
|
||||
use tantivy_fst::map::{Stream, StreamBuilder, StreamWithState};
|
||||
use tantivy_fst::Automaton;
|
||||
use tantivy_fst::{IntoStreamer, Streamer};
|
||||
|
||||
@@ -11,6 +11,7 @@ use tantivy_fst::{IntoStreamer, Streamer};
|
||||
pub struct TermStreamerBuilder<'a, A = AlwaysMatch>
|
||||
where
|
||||
A: Automaton,
|
||||
A::State: Clone,
|
||||
{
|
||||
fst_map: &'a TermDictionary,
|
||||
stream_builder: StreamBuilder<'a, A>,
|
||||
@@ -19,6 +20,7 @@ where
|
||||
impl<'a, A> TermStreamerBuilder<'a, A>
|
||||
where
|
||||
A: Automaton,
|
||||
A::State: Clone + Default + Sized,
|
||||
{
|
||||
pub(crate) fn new(fst_map: &'a TermDictionary, stream_builder: StreamBuilder<'a, A>) -> Self {
|
||||
TermStreamerBuilder {
|
||||
@@ -56,10 +58,11 @@ where
|
||||
pub fn into_stream(self) -> TermStreamer<'a, A> {
|
||||
TermStreamer {
|
||||
fst_map: self.fst_map,
|
||||
stream: self.stream_builder.into_stream(),
|
||||
stream: self.stream_builder.with_state().into_stream(),
|
||||
term_ord: 0u64,
|
||||
current_key: Vec::with_capacity(100),
|
||||
current_value: TermInfo::default(),
|
||||
state: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -69,27 +72,31 @@ where
|
||||
pub struct TermStreamer<'a, A = AlwaysMatch>
|
||||
where
|
||||
A: Automaton,
|
||||
A::State: Clone + Default + Sized,
|
||||
{
|
||||
fst_map: &'a TermDictionary,
|
||||
stream: Stream<'a, A>,
|
||||
stream: StreamWithState<'a, A>,
|
||||
term_ord: TermOrdinal,
|
||||
current_key: Vec<u8>,
|
||||
current_value: TermInfo,
|
||||
state: A::State,
|
||||
}
|
||||
|
||||
impl<'a, A> TermStreamer<'a, A>
|
||||
where
|
||||
A: Automaton,
|
||||
A::State: Clone + Default + Sized,
|
||||
{
|
||||
/// Advance position the stream on the next item.
|
||||
/// Before the first call to `.advance()`, the stream
|
||||
/// is an unitialized state.
|
||||
pub fn advance(&mut self) -> bool {
|
||||
if let Some((term, term_ord)) = self.stream.next() {
|
||||
if let Some((term, term_ord, state)) = self.stream.next() {
|
||||
self.current_key.clear();
|
||||
self.current_key.extend_from_slice(term);
|
||||
self.term_ord = term_ord;
|
||||
self.current_value = self.fst_map.term_info_from_ord(term_ord);
|
||||
self.state = state;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
@@ -118,6 +125,10 @@ where
|
||||
&self.current_key
|
||||
}
|
||||
|
||||
pub fn state(&self) -> &A::State {
|
||||
&self.state
|
||||
}
|
||||
|
||||
/// Accesses the current value.
|
||||
///
|
||||
/// Calling `.value()` after the end of the stream will return the
|
||||
|
||||
@@ -328,4 +328,5 @@ mod tests {
|
||||
assert_eq!(term_info_store.get(i as u64), term_infos[i]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -197,7 +197,11 @@ impl TermDictionary {
|
||||
|
||||
/// Returns a search builder, to stream all of the terms
|
||||
/// within the Automaton
|
||||
pub fn search<'a, A: Automaton + 'a>(&'a self, automaton: A) -> TermStreamerBuilder<'a, A> {
|
||||
pub fn search<'a, A>(&'a self, automaton: A) -> TermStreamerBuilder<'a, A>
|
||||
where
|
||||
A: Automaton + 'a,
|
||||
A::State: Clone + Default + Sized,
|
||||
{
|
||||
let stream_builder = self.fst_index.search(automaton);
|
||||
TermStreamerBuilder::<A>::new(self, stream_builder)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
//! # Example
|
||||
//! ```rust
|
||||
//! ```
|
||||
//! extern crate tantivy;
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! # fn main() {
|
||||
|
||||
@@ -98,6 +98,10 @@ mod tests {
|
||||
#[test]
|
||||
fn test_lowercaser() {
|
||||
assert_eq!(lowercase_helper("Tree"), vec!["tree".to_string()]);
|
||||
assert_eq!(lowercase_helper("Русский"), vec!["русский".to_string()]);
|
||||
assert_eq!(
|
||||
lowercase_helper("Русский"),
|
||||
vec!["русский".to_string()]
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
//! You must define in your schema which tokenizer should be used for
|
||||
//! each of your fields :
|
||||
//!
|
||||
//! ```rust
|
||||
//! ```
|
||||
//! extern crate tantivy;
|
||||
//! use tantivy::schema::*;
|
||||
//!
|
||||
//! # fn main() {
|
||||
@@ -64,6 +65,8 @@
|
||||
//! For instance, the `en_stem` is defined as follows.
|
||||
//!
|
||||
//! ```rust
|
||||
//! # extern crate tantivy;
|
||||
//!
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! # fn main() {
|
||||
@@ -77,7 +80,8 @@
|
||||
//! Once your tokenizer is defined, you need to
|
||||
//! register it with a name in your index's [`TokenizerManager`](./struct.TokenizerManager.html).
|
||||
//!
|
||||
//! ```rust
|
||||
//! ```
|
||||
//! # extern crate tantivy;
|
||||
//! # use tantivy::schema::Schema;
|
||||
//! # use tantivy::tokenizer::*;
|
||||
//! # use tantivy::Index;
|
||||
@@ -97,7 +101,8 @@
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! ```rust
|
||||
//! ```
|
||||
//! extern crate tantivy;
|
||||
//! use tantivy::schema::{Schema, IndexRecordOption, TextOptions, TextFieldIndexing};
|
||||
//! use tantivy::tokenizer::*;
|
||||
//! use tantivy::Index;
|
||||
@@ -281,4 +286,5 @@ pub mod tests {
|
||||
assert!(tokens.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -29,7 +29,8 @@ use super::{Token, TokenStream, Tokenizer};
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// ```
|
||||
/// # extern crate tantivy;
|
||||
/// use tantivy::tokenizer::*;
|
||||
/// # fn main() {
|
||||
/// let tokenizer = NgramTokenizer::new(2, 3, false);
|
||||
@@ -460,4 +461,5 @@ mod tests {
|
||||
assert_eq!(it.next(), Some((8, 9)));
|
||||
assert_eq!(it.next(), None);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
//! # Example
|
||||
//! ```rust
|
||||
//! ```
|
||||
//! extern crate tantivy;
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! # fn main() {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
//! # Example
|
||||
//! ```rust
|
||||
//! ```
|
||||
//! extern crate tantivy;
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! # fn main() {
|
||||
|
||||
@@ -97,4 +97,5 @@ mod tests {
|
||||
|
||||
assert!(!token_chain.advance());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use fail;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory, TerminatingWrite};
|
||||
use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory};
|
||||
use tantivy::doc;
|
||||
use tantivy::schema::{Schema, TEXT};
|
||||
use tantivy::{Index, Term};
|
||||
@@ -17,7 +17,7 @@ fn test_failpoints_managed_directory_gc_if_delete_fails() {
|
||||
managed_directory
|
||||
.open_write(test_path)
|
||||
.unwrap()
|
||||
.terminate()
|
||||
.flush()
|
||||
.unwrap();
|
||||
assert!(managed_directory.exists(test_path));
|
||||
// triggering gc and setting the delete operation to fail.
|
||||
|
||||
Reference in New Issue
Block a user