Compare commits

..

2 Commits

Author SHA1 Message Date
Paul Masurel
6eb7c7f419 Added coveralls 2020-08-20 09:25:45 +09:00
Paul Masurel
e37ca8178a githubactions 2020-08-19 22:52:20 +09:00
120 changed files with 2808 additions and 3959 deletions

28
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,28 @@
name: Tantivy CI
on: [push]
jobs:
test:
name: Test Suite
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: actions-rs/cargo@v1
with:
command: test
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- run: rustup component add clippy
- uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings

66
.github/workflows/coveralls.yml vendored Normal file
View File

@@ -0,0 +1,66 @@
on: [push]
name: Code coverage with grcov
jobs:
grcov:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os:
- ubuntu-latest
#- macOS-latest
#- windows-latest
steps:
- uses: actions/checkout@v2
- name: Install toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
profile: minimal
- name: Execute tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --lib
env:
CARGO_INCREMENTAL: 0
RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Cpanic=abort -Zpanic_abort_tests"
# Note that `actions-rs/grcov` Action can install `grcov` too,
# but can't use faster installation methods yet.
# As a temporary experiment `actions-rs/install` Action plugged in here.
# Consider **NOT** to copy that into your workflow,
# but use `actions-rs/grcov` only
- name: Pre-installing grcov
uses: actions-rs/install@v0.1
with:
crate: grcov
use-tool-cache: true
- name: Gather coverage data
id: coverage
uses: actions-rs/grcov@v0.1
with:
coveralls-token: ${{ secrets.COVERALLS_TOKEN }}
- name: Coveralls upload
uses: coverallsapp/github-action@master
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
parallel: true
path-to-lcov: ${{ steps.coverage.outputs.report }}
grcov_finalize:
runs-on: ubuntu-latest
needs: grcov
steps:
- name: Coveralls finalization
uses: coverallsapp/github-action@master
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
parallel-finished: true

1
.gitignore vendored
View File

@@ -12,4 +12,3 @@ cpp/simdcomp/bitpackingbenchmark
*.bk *.bk
.idea .idea
trace.dat trace.dat
cargo-timing*

View File

@@ -1,24 +1,3 @@
Tantivy 0.14.0
=========================
- Remove dependency to atomicwrites #833 .Implemented by @pmasurel upon suggestion and research from @asafigan).
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
- Large API Change in the Directory API. Tantivy used to assume that all files could be somehow memory mapped. After this change, Directory return a `FileSlice` that can be reduced and eventually read into an `OwnedBytes` object. Long and blocking io operation are still required by they do not span over the entire file.
- Added support for Brotli compression in the DocStore. (@ppodolsky)
- Added helper for building intersections and unions in BooleanQuery (@guilload)
- Bugfix in `Query::explain`
- Making it possible to opt out the generation of fieldnorms information for indexed fields. This change breaks compatibility as the meta.json file format is slightly changed. (#922, @pmasurel)
Tantivy 0.13.2
===================
Bugfix. Acquiring a facet reader on a segment that does not contain any
doc with this facet returns `None`. (#896)
Tantivy 0.13.1
===================
Made `Query` and `Collector` `Send + Sync`.
Updated misc dependency versions.
Tantivy 0.13.0 Tantivy 0.13.0
====================== ======================
Tantivy 0.13 introduce a change in the index format that will require Tantivy 0.13 introduce a change in the index format that will require

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.14.0-dev" version = "0.13.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -13,40 +13,42 @@ keywords = ["search", "information", "retrieval"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
base64 = "0.13" base64 = "0.12.0"
byteorder = "1" byteorder = "1.0"
crc32fast = "1" crc32fast = "1.2.0"
once_cell = "1" once_cell = "1.0"
regex ={version = "1", default-features = false, features = ["std"]} regex ={version = "1.3.0", default-features = false, features = ["std"]}
tantivy-fst = "0.3" tantivy-fst = "0.3"
memmap = {version = "0.7", optional=true} memmap = {version = "0.7", optional=true}
lz4 = {version="1", optional=true} lz4 = {version="1.20", optional=true}
brotli = {version="3.3.0", optional=true}
snap = "1" snap = "1"
tempfile = {version="3", optional=true} atomicwrites = {version="0.2.2", optional=true}
tempfile = "3.0"
log = "0.4" log = "0.4"
serde = {version="1", features=["derive"]} serde = {version="1.0", features=["derive"]}
serde_json = "1" serde_json = "1.0"
num_cpus = "1" num_cpus = "1.2"
fs2={version="0.4", optional=true} fs2={version="0.4", optional=true}
levenshtein_automata = "0.2" levenshtein_automata = "0.2"
notify = {version="4", optional=true} notify = {version="4", optional=true}
uuid = { version = "0.8", features = ["v4", "serde"] } uuid = { version = "0.8", features = ["v4", "serde"] }
crossbeam = "0.8" crossbeam = "0.7"
futures = {version = "0.3", features=["thread-pool"] } futures = {version = "0.3", features=["thread-pool"] }
tantivy-query-grammar = { version="0.14.0-dev", path="./query-grammar" } owning_ref = "0.4"
stable_deref_trait = "1" stable_deref_trait = "1.0.0"
rust-stemmers = "1" rust-stemmers = "1.2"
downcast-rs = "1" downcast-rs = { version="1.0" }
tantivy-query-grammar = { version="0.13", path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]} bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.4" census = "0.4"
fnv = "1" fnv = "1.0.6"
thiserror = "1.0" owned-read = "0.4"
htmlescape = "0.3" failure = "0.1"
htmlescape = "0.3.1"
fail = "0.4" fail = "0.4"
murmurhash32 = "0.2" murmurhash32 = "0.2"
chrono = "0.4" chrono = "0.4"
smallvec = "1" smallvec = "1.0"
rayon = "1" rayon = "1"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
@@ -73,12 +75,12 @@ overflow-checks = true
[features] [features]
default = ["mmap"] default = ["mmap"]
mmap = ["fs2", "tempfile", "memmap", "notify"] mmap = ["atomicwrites", "fs2", "memmap", "notify"]
brotli-compression = ["brotli"]
lz4-compression = ["lz4"] lz4-compression = ["lz4"]
failpoints = ["fail/failpoints"] failpoints = ["fail/failpoints"]
unstable = [] # useful for benches. unstable = [] # useful for benches.
wasm-bindgen = ["uuid/wasm-bindgen"] wasm-bindgen = ["uuid/wasm-bindgen"]
scoref64 = [] # scores are f64 instead of f32. was introduced to debug blockwand.
[workspace] [workspace]
members = ["query-grammar"] members = ["query-grammar"]

View File

@@ -5,6 +5,7 @@
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Build status](https://ci.appveyor.com/api/projects/status/r7nb13kj23u8m9pj/branch/master?svg=true)](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master) [![Build status](https://ci.appveyor.com/api/projects/status/r7nb13kj23u8m9pj/branch/master?svg=true)](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
[![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy) [![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy)
[![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton)
![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png) ![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png)
@@ -33,6 +34,11 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
The following [benchmark](https://tantivy-search.github.io/bench/) break downs The following [benchmark](https://tantivy-search.github.io/bench/) break downs
performance for different type of queries / collection. performance for different type of queries / collection.
In general, Tantivy tends to be
- slower than Lucene on union with a Top-K due to Block-WAND optimization.
- faster than Lucene on intersection and phrase queries.
Your mileage WILL vary depending on the nature of queries and their load. Your mileage WILL vary depending on the nature of queries and their load.
# Features # Features
@@ -84,7 +90,7 @@ There are many ways to support this project.
- Help with documentation by asking questions or submitting PRs - Help with documentation by asking questions or submitting PRs
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy)) - Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
- Talk about Tantivy around you - Talk about Tantivy around you
- [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton) - Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
# Contributing code # Contributing code

View File

@@ -112,6 +112,18 @@ fn main() -> tantivy::Result<()> {
limbs and branches that arch over the pool" limbs and branches that arch over the pool"
)); ));
index_writer.add_document(doc!(
title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
over the yellow sands in the sunlight before reaching the narrow pool. On one \
side of the river the golden foothill slopes curve up to the strong and rocky \
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
));
// Multivalued field just need to be repeated. // Multivalued field just need to be repeated.
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "Frankenstein", title => "Frankenstein",

View File

@@ -56,7 +56,7 @@ fn main() -> tantivy::Result<()> {
); );
let top_docs_by_custom_score = let top_docs_by_custom_score =
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| { TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap(); let mut ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
let facet_dict = ingredient_reader.facet_dict(); let facet_dict = ingredient_reader.facet_dict();
let query_ords: HashSet<u64> = facets let query_ords: HashSet<u64> = facets

View File

@@ -45,7 +45,7 @@ fn main() -> tantivy::Result<()> {
// Inverted index stands for the combination of // Inverted index stands for the combination of
// - the term dictionary // - the term dictionary
// - the inverted lists associated to each terms and their positions // - the inverted lists associated to each terms and their positions
let inverted_index = segment_reader.inverted_index(title)?; let inverted_index = segment_reader.inverted_index(title);
// A `Term` is a text token associated with a field. // A `Term` is a text token associated with a field.
// Let's go through all docs containing the term `title:the` and access their position // Let's go through all docs containing the term `title:the` and access their position
@@ -58,7 +58,7 @@ fn main() -> tantivy::Result<()> {
// If you don't need all this information, you may get better performance by decompressing less // If you don't need all this information, you may get better performance by decompressing less
// information. // information.
if let Some(mut segment_postings) = if let Some(mut segment_postings) =
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)? inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)
{ {
// this buffer will be used to request for positions // this buffer will be used to request for positions
let mut positions: Vec<u32> = Vec::with_capacity(100); let mut positions: Vec<u32> = Vec::with_capacity(100);
@@ -106,7 +106,7 @@ fn main() -> tantivy::Result<()> {
// Inverted index stands for the combination of // Inverted index stands for the combination of
// - the term dictionary // - the term dictionary
// - the inverted lists associated to each terms and their positions // - the inverted lists associated to each terms and their positions
let inverted_index = segment_reader.inverted_index(title)?; let inverted_index = segment_reader.inverted_index(title);
// This segment posting object is like a cursor over the documents matching the term. // This segment posting object is like a cursor over the documents matching the term.
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies // The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
@@ -115,7 +115,7 @@ fn main() -> tantivy::Result<()> {
// If you don't need all this information, you may get better performance by decompressing less // If you don't need all this information, you may get better performance by decompressing less
// information. // information.
if let Some(mut block_segment_postings) = if let Some(mut block_segment_postings) =
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)? inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
{ {
loop { loop {
let docs = block_segment_postings.docs(); let docs = block_segment_postings.docs();

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy-query-grammar" name = "tantivy-query-grammar"
version = "0.14.0-dev" version = "0.13.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]

View File

@@ -52,7 +52,7 @@ mod test {
use crate::Occur; use crate::Occur;
#[test] #[test]
fn test_occur_compose() { fn test_Occur_compose() {
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should); assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must); assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
assert_eq!( assert_eq!(

View File

@@ -9,10 +9,8 @@ use combine::{
fn field<'a>() -> impl Parser<&'a str, Output = String> { fn field<'a>() -> impl Parser<&'a str, Output = String> {
( (
(letter().or(char('_'))), letter(),
many(satisfy(|c: char| { many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
c.is_alphanumeric() || c == '_' || c == '-'
})),
) )
.skip(char(':')) .skip(char(':'))
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2)) .map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
@@ -281,8 +279,6 @@ pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
type TestParseResult = Result<(), StringStreamError>;
use super::*; use super::*;
use combine::parser::Parser; use combine::parser::Parser;
@@ -300,10 +296,9 @@ mod test {
} }
#[test] #[test]
fn test_occur_symbol() -> TestParseResult { fn test_occur_symbol() {
assert_eq!(super::occur_symbol().parse("-")?, (Occur::MustNot, "")); assert_eq!(super::occur_symbol().parse("-"), Ok((Occur::MustNot, "")));
assert_eq!(super::occur_symbol().parse("+")?, (Occur::Must, "")); assert_eq!(super::occur_symbol().parse("+"), Ok((Occur::Must, "")));
Ok(())
} }
#[test] #[test]
@@ -415,25 +410,6 @@ mod test {
assert_eq!(format!("{:?}", ast), "\"abc\""); assert_eq!(format!("{:?}", ast), "\"abc\"");
} }
#[test]
fn test_field_name() -> TestParseResult {
assert_eq!(
super::field().parse("my-field-name:a")?,
("my-field-name".to_string(), "a")
);
assert_eq!(
super::field().parse("my_field_name:a")?,
("my_field_name".to_string(), "a")
);
assert!(super::field().parse(":a").is_err());
assert!(super::field().parse("-my_field:a").is_err());
assert_eq!(
super::field().parse("_my_field:a")?,
("_my_field".to_string(), "a")
);
Ok(())
}
#[test] #[test]
fn test_range_parser() { fn test_range_parser() {
// testing the range() parser separately // testing the range() parser separately

View File

@@ -46,7 +46,7 @@ pub trait CustomScorer<TScore>: Sync {
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore> impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
where where
TCustomScorer: CustomScorer<TScore> + Send + Sync, TCustomScorer: CustomScorer<TScore>,
TScore: 'static + PartialOrd + Clone + Send + Sync, TScore: 'static + PartialOrd + Clone + Send + Sync,
{ {
type Fruit = Vec<(TScore, DocAddress)>; type Fruit = Vec<(TScore, DocAddress)>;
@@ -58,10 +58,10 @@ where
segment_local_id: u32, segment_local_id: u32,
segment_reader: &SegmentReader, segment_reader: &SegmentReader,
) -> crate::Result<Self::Child> { ) -> crate::Result<Self::Child> {
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
let segment_collector = self let segment_collector = self
.collector .collector
.for_segment(segment_local_id, segment_reader)?; .for_segment(segment_local_id, segment_reader)?;
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
Ok(CustomScoreTopSegmentCollector { Ok(CustomScoreTopSegmentCollector {
segment_collector, segment_collector,
segment_scorer, segment_scorer,

View File

@@ -1,61 +0,0 @@
use std::collections::HashSet;
use crate::{DocAddress, DocId, Score};
use super::{Collector, SegmentCollector};
/// Collectors that returns the set of DocAddress that matches the query.
///
/// This collector is mostly useful for tests.
pub struct DocSetCollector;
impl Collector for DocSetCollector {
type Fruit = HashSet<DocAddress>;
type Child = DocSetChildCollector;
fn for_segment(
&self,
segment_local_id: crate::SegmentLocalId,
_segment: &crate::SegmentReader,
) -> crate::Result<Self::Child> {
Ok(DocSetChildCollector {
segment_local_id,
docs: HashSet::new(),
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(
&self,
segment_fruits: Vec<(u32, HashSet<DocId>)>,
) -> crate::Result<Self::Fruit> {
let len: usize = segment_fruits.iter().map(|(_, docset)| docset.len()).sum();
let mut result = HashSet::with_capacity(len);
for (segment_local_id, docs) in segment_fruits {
for doc in docs {
result.insert(DocAddress(segment_local_id, doc));
}
}
Ok(result)
}
}
pub struct DocSetChildCollector {
segment_local_id: u32,
docs: HashSet<DocId>,
}
impl SegmentCollector for DocSetChildCollector {
type Fruit = (u32, HashSet<DocId>);
fn collect(&mut self, doc: crate::DocId, _score: Score) {
self.docs.insert(doc);
}
fn harvest(self) -> (u32, HashSet<DocId>) {
(self.segment_local_id, self.docs)
}
}

View File

@@ -7,6 +7,7 @@ use crate::DocId;
use crate::Score; use crate::Score;
use crate::SegmentLocalId; use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
use crate::TantivyError;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::btree_map; use std::collections::btree_map;
use std::collections::BTreeMap; use std::collections::BTreeMap;
@@ -265,7 +266,10 @@ impl Collector for FacetCollector {
_: SegmentLocalId, _: SegmentLocalId,
reader: &SegmentReader, reader: &SegmentReader,
) -> crate::Result<FacetSegmentCollector> { ) -> crate::Result<FacetSegmentCollector> {
let facet_reader = reader.facet_reader(self.field)?; let field_name = reader.schema().get_field_name(self.field);
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
})?;
let mut collapse_mapping = Vec::new(); let mut collapse_mapping = Vec::new();
let mut counts = Vec::new(); let mut counts = Vec::new();
@@ -468,7 +472,7 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let num_facets: usize = 3 * 4 * 5; let num_facets: usize = 3 * 4 * 5;
let facets: Vec<Facet> = (0..num_facets) let facets: Vec<Facet> = (0..num_facets)
.map(|mut n| { .map(|mut n| {
@@ -527,7 +531,7 @@ mod tests {
let facet_field = schema_builder.add_facet_field("facets"); let facet_field = schema_builder.add_facet_field("facets");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!( index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/subjects/A/a"), facet_field => Facet::from_text(&"/subjects/A/a"),
facet_field => Facet::from_text(&"/subjects/B/a"), facet_field => Facet::from_text(&"/subjects/B/a"),
@@ -546,12 +550,12 @@ mod tests {
} }
#[test] #[test]
fn test_doc_search_by_facet() -> crate::Result<()> { fn test_doc_search_by_facet() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet"); let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!( index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/A"), facet_field => Facet::from_text(&"/A/A"),
)); ));
@@ -564,8 +568,8 @@ mod tests {
index_writer.add_document(doc!( index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/D/C/A"), facet_field => Facet::from_text(&"/D/C/A"),
)); ));
index_writer.commit()?; index_writer.commit().unwrap();
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 4); assert_eq!(searcher.num_docs(), 4);
@@ -582,17 +586,17 @@ mod tests {
assert_eq!(count_facet("/A/C"), 1); assert_eq!(count_facet("/A/C"), 1);
assert_eq!(count_facet("/A/C/A"), 1); assert_eq!(count_facet("/A/C/A"), 1);
assert_eq!(count_facet("/C/A"), 0); assert_eq!(count_facet("/C/A"), 0);
let query_parser = QueryParser::for_index(&index, vec![]);
{ {
let query = query_parser.parse_query("facet:/A/B")?; let query_parser = QueryParser::for_index(&index, vec![]);
assert_eq!(1, searcher.search(&query, &Count).unwrap()); {
let query = query_parser.parse_query("facet:/A/B").unwrap();
assert_eq!(1, searcher.search(&query, &Count).unwrap());
}
{
let query = query_parser.parse_query("facet:/A").unwrap();
assert_eq!(3, searcher.search(&query, &Count).unwrap());
}
} }
{
let query = query_parser.parse_query("facet:/A")?;
assert_eq!(3, searcher.search(&query, &Count)?);
}
Ok(())
} }
#[test] #[test]
@@ -627,7 +631,7 @@ mod tests {
.collect(); .collect();
docs[..].shuffle(&mut thread_rng()); docs[..].shuffle(&mut thread_rng());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs { for doc in docs {
index_writer.add_document(doc); index_writer.add_document(doc);
} }
@@ -680,7 +684,7 @@ mod bench {
// 40425 docs // 40425 docs
docs[..].shuffle(&mut thread_rng()); docs[..].shuffle(&mut thread_rng());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs { for doc in docs {
index_writer.add_document(doc); index_writer.add_document(doc);
} }

View File

@@ -0,0 +1,127 @@
use std::cmp::Eq;
use std::collections::HashMap;
use std::hash::Hash;
use collector::Collector;
use fastfield::FastFieldReader;
use schema::Field;
use DocId;
use Result;
use Score;
use SegmentReader;
use SegmentLocalId;
/// Facet collector for i64/u64 fast field
pub struct IntFacetCollector<T>
where
T: FastFieldReader,
T::ValueType: Eq + Hash,
{
counters: HashMap<T::ValueType, u64>,
field: Field,
ff_reader: Option<T>,
}
impl<T> IntFacetCollector<T>
where
T: FastFieldReader,
T::ValueType: Eq + Hash,
{
/// Creates a new facet collector for aggregating a given field.
pub fn new(field: Field) -> IntFacetCollector<T> {
IntFacetCollector {
counters: HashMap::new(),
field: field,
ff_reader: None,
}
}
}
impl<T> Collector for IntFacetCollector<T>
where
T: FastFieldReader,
T::ValueType: Eq + Hash,
{
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
Ok(())
}
fn collect(&mut self, doc: DocId, _: Score) {
let val = self.ff_reader
.as_ref()
.expect(
"collect() was called before set_segment. \
This should never happen.",
)
.get(doc);
*(self.counters.entry(val).or_insert(0)) += 1;
}
}
#[cfg(test)]
mod tests {
use collector::{chain, IntFacetCollector};
use query::QueryParser;
use fastfield::{I64FastFieldReader, U64FastFieldReader};
use schema::{self, FAST, STRING};
use Index;
#[test]
// create 10 documents, set num field value to 0 or 1 for even/odd ones
// make sure we have facet counters correctly filled
fn test_facet_collector_results() {
let mut schema_builder = schema::Schema::builder();
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
let num_field_f64 = schema_builder.add_f64_field("num_f64", FAST);
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
{
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
for i in 0u64..10u64 {
index_writer.add_document(doc!(
num_field_i64 => ((i as i64) % 3i64) as i64,
num_field_u64 => (i % 2u64) as u64,
num_field_f64 => (i % 4u64) as f64,
text_field => "text"
));
}
}
assert_eq!(index_writer.commit().unwrap(), 10u64);
}
let searcher = index.reader().searcher();
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);
let mut ffvf_f64: IntFacetCollector<F64FastFieldReader> = IntFacetCollector::new(num_field_f64);
{
// perform the query
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64).push(&mut ffvf_f64);
let mut query_parser = QueryParser::for_index(index, vec![text_field]);
let query = query_parser.parse_query("text:text").unwrap();
query.search(&searcher, &mut facet_collectors).unwrap();
}
assert_eq!(ffvf_u64.counters[&0], 5);
assert_eq!(ffvf_u64.counters[&1], 5);
assert_eq!(ffvf_i64.counters[&0], 4);
assert_eq!(ffvf_i64.counters[&1], 3);
assert_eq!(ffvf_f64.counters[&0.0], 3);
assert_eq!(ffvf_f64.counters[&2.0], 2);
}
}

View File

@@ -111,9 +111,6 @@ mod facet_collector;
pub use self::facet_collector::FacetCollector; pub use self::facet_collector::FacetCollector;
use crate::query::Weight; use crate::query::Weight;
mod docset_collector;
pub use self::docset_collector::DocSetCollector;
/// `Fruit` is the type for the result of our collection. /// `Fruit` is the type for the result of our collection.
/// e.g. `usize` for the `Count` collector. /// e.g. `usize` for the `Count` collector.
pub trait Fruit: Send + downcast_rs::Downcast {} pub trait Fruit: Send + downcast_rs::Downcast {}
@@ -136,13 +133,13 @@ impl<T> Fruit for T where T: Send + downcast_rs::Downcast {}
/// The collection logic itself is in the `SegmentCollector`. /// The collection logic itself is in the `SegmentCollector`.
/// ///
/// Segments are not guaranteed to be visited in any specific order. /// Segments are not guaranteed to be visited in any specific order.
pub trait Collector: Sync + Send { pub trait Collector: Sync {
/// `Fruit` is the type for the result of our collection. /// `Fruit` is the type for the result of our collection.
/// e.g. `usize` for the `Count` collector. /// e.g. `usize` for the `Count` collector.
type Fruit: Fruit; type Fruit: Fruit;
/// Type of the `SegmentCollector` associated to this collector. /// Type of the `SegmentCollector` associated to this collector.
type Child: SegmentCollector; type Child: SegmentCollector<Fruit = Self::Fruit>;
/// `set_segment` is called before beginning to enumerate /// `set_segment` is called before beginning to enumerate
/// on this segment. /// on this segment.
@@ -157,10 +154,7 @@ pub trait Collector: Sync + Send {
/// Combines the fruit associated to the collection of each segments /// Combines the fruit associated to the collection of each segments
/// into one fruit. /// into one fruit.
fn merge_fruits( fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit>;
&self,
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit>;
/// Created a segment collector and /// Created a segment collector and
fn collect_segment( fn collect_segment(
@@ -230,11 +224,11 @@ where
fn merge_fruits( fn merge_fruits(
&self, &self,
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>, children: Vec<(Left::Fruit, Right::Fruit)>,
) -> crate::Result<(Left::Fruit, Right::Fruit)> { ) -> crate::Result<(Left::Fruit, Right::Fruit)> {
let mut left_fruits = vec![]; let mut left_fruits = vec![];
let mut right_fruits = vec![]; let mut right_fruits = vec![];
for (left_fruit, right_fruit) in segment_fruits { for (left_fruit, right_fruit) in children {
left_fruits.push(left_fruit); left_fruits.push(left_fruit);
right_fruits.push(right_fruit); right_fruits.push(right_fruit);
} }
@@ -288,10 +282,7 @@ where
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring() self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
} }
fn merge_fruits( fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
&self,
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit> {
let mut one_fruits = vec![]; let mut one_fruits = vec![];
let mut two_fruits = vec![]; let mut two_fruits = vec![];
let mut three_fruits = vec![]; let mut three_fruits = vec![];
@@ -358,10 +349,7 @@ where
|| self.3.requires_scoring() || self.3.requires_scoring()
} }
fn merge_fruits( fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
&self,
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit> {
let mut one_fruits = vec![]; let mut one_fruits = vec![];
let mut two_fruits = vec![]; let mut two_fruits = vec![];
let mut three_fruits = vec![]; let mut three_fruits = vec![];

View File

@@ -34,13 +34,13 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
fn merge_fruits( fn merge_fruits(
&self, &self,
children: Vec<<Self::Child as SegmentCollector>::Fruit>, children: Vec<<Self as Collector>::Fruit>,
) -> crate::Result<Box<dyn Fruit>> { ) -> crate::Result<Box<dyn Fruit>> {
let typed_fruit: Vec<<TCollector::Child as SegmentCollector>::Fruit> = children let typed_fruit: Vec<TCollector::Fruit> = children
.into_iter() .into_iter()
.map(|untyped_fruit| { .map(|untyped_fruit| {
untyped_fruit untyped_fruit
.downcast::<<TCollector::Child as SegmentCollector>::Fruit>() .downcast::<TCollector::Fruit>()
.map(|boxed_but_typed| *boxed_but_typed) .map(|boxed_but_typed| *boxed_but_typed)
.map_err(|_| { .map_err(|_| {
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string()) TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
@@ -259,7 +259,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text=>"abc")); index_writer.add_document(doc!(text=>"abc"));
index_writer.add_document(doc!(text=>"abc abc abc")); index_writer.add_document(doc!(text=>"abc abc abc"));
index_writer.add_document(doc!(text=>"abc abc")); index_writer.add_document(doc!(text=>"abc abc"));

View File

@@ -185,15 +185,12 @@ impl Collector for BytesFastFieldTestCollector {
_segment_local_id: u32, _segment_local_id: u32,
segment_reader: &SegmentReader, segment_reader: &SegmentReader,
) -> crate::Result<BytesFastFieldSegmentCollector> { ) -> crate::Result<BytesFastFieldSegmentCollector> {
let reader = segment_reader
.fast_fields()
.bytes(self.field)
.ok_or_else(|| {
crate::TantivyError::InvalidArgument("Field is not a bytes fast field.".to_string())
})?;
Ok(BytesFastFieldSegmentCollector { Ok(BytesFastFieldSegmentCollector {
vals: Vec::new(), vals: Vec::new(),
reader, reader: segment_reader
.fast_fields()
.bytes(self.field)
.expect("Field is not a bytes fast field."),
}) })
} }

View File

@@ -1,4 +1,6 @@
use super::Collector; use super::Collector;
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
use crate::collector::top_collector::TopSegmentCollector;
use crate::collector::top_collector::{ComparableDoc, TopCollector}; use crate::collector::top_collector::{ComparableDoc, TopCollector};
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector; use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
use crate::collector::{ use crate::collector::{
@@ -12,71 +14,8 @@ use crate::DocId;
use crate::Score; use crate::Score;
use crate::SegmentLocalId; use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
use crate::{collector::custom_score_top_collector::CustomScoreTopCollector, fastfield::FastValue}; use std::collections::BinaryHeap;
use crate::{collector::top_collector::TopSegmentCollector, TantivyError};
use std::fmt; use std::fmt;
use std::{collections::BinaryHeap, marker::PhantomData};
struct FastFieldConvertCollector<
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
TFastValue: FastValue,
> {
pub collector: TCollector,
pub field: Field,
pub fast_value: std::marker::PhantomData<TFastValue>,
}
impl<TCollector, TFastValue> Collector for FastFieldConvertCollector<TCollector, TFastValue>
where
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
TFastValue: FastValue + 'static,
{
type Fruit = Vec<(TFastValue, DocAddress)>;
type Child = TCollector::Child;
fn for_segment(
&self,
segment_local_id: crate::SegmentLocalId,
segment: &SegmentReader,
) -> crate::Result<Self::Child> {
let schema = segment.schema();
let field_entry = schema.get_field_entry(self.field);
if !field_entry.is_fast() {
return Err(TantivyError::SchemaError(format!(
"Field {:?} is not a fast field.",
field_entry.name()
)));
}
let schema_type = TFastValue::to_type();
let requested_type = field_entry.field_type().value_type();
if schema_type != requested_type {
return Err(TantivyError::SchemaError(format!(
"Field {:?} is of type {:?}!={:?}",
field_entry.name(),
schema_type,
requested_type
)));
}
self.collector.for_segment(segment_local_id, segment)
}
fn requires_scoring(&self) -> bool {
self.collector.requires_scoring()
}
fn merge_fruits(
&self,
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit> {
let raw_result = self.collector.merge_fruits(segment_fruits)?;
let transformed_result = raw_result
.into_iter()
.map(|(score, doc_address)| (TFastValue::from_u64(score), doc_address))
.collect::<Vec<_>>();
Ok(transformed_result)
}
}
/// The `TopDocs` collector keeps track of the top `K` documents /// The `TopDocs` collector keeps track of the top `K` documents
/// sorted by their score. /// sorted by their score.
@@ -99,7 +38,7 @@ where
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
/// ///
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap(); /// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
/// index_writer.add_document(doc!(title => "The Name of the Wind")); /// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib")); /// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow")); /// index_writer.add_document(doc!(title => "A Dairy Cow"));
@@ -134,7 +73,7 @@ struct ScorerByFastFieldReader {
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader { impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
fn score(&mut self, doc: DocId) -> u64 { fn score(&mut self, doc: DocId) -> u64 {
self.ff_reader.get(doc) self.ff_reader.get_u64(u64::from(doc))
} }
} }
@@ -148,10 +87,10 @@ impl CustomScorer<u64> for ScorerByField {
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> { fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
let ff_reader = segment_reader let ff_reader = segment_reader
.fast_fields() .fast_fields()
.u64_lenient(self.field) .u64(self.field)
.ok_or_else(|| { .ok_or_else(|| {
crate::TantivyError::SchemaError(format!( crate::TantivyError::SchemaError(format!(
"Field requested ({:?}) is not a fast field.", "Field requested ({:?}) is not a i64/u64 fast field.",
self.field self.field
)) ))
})?; })?;
@@ -173,8 +112,6 @@ impl TopDocs {
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in /// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
/// Lucene's TopDocsCollector. /// Lucene's TopDocsCollector.
/// ///
/// # Example
///
/// ```rust /// ```rust
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
@@ -186,7 +123,7 @@ impl TopDocs {
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
/// ///
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap(); /// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
/// index_writer.add_document(doc!(title => "The Name of the Wind")); /// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib")); /// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow")); /// index_writer.add_document(doc!(title => "A Dairy Cow"));
@@ -211,14 +148,6 @@ impl TopDocs {
/// Set top-K to rank documents by a given fast field. /// Set top-K to rank documents by a given fast field.
/// ///
/// If the field is not a fast or does not exist, this method returns successfully (it is not aware of any schema).
/// An error will be returned at the moment of search.
///
/// If the field is a FAST field but not a u64 field, search will return successfully but it will return
/// returns a monotonic u64-representation (ie. the order is still correct) of the requested field type.
///
/// # Example
///
/// ```rust /// ```rust
/// # use tantivy::schema::{Schema, FAST, TEXT}; /// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress}; /// # use tantivy::{doc, Index, DocAddress};
@@ -234,13 +163,13 @@ impl TopDocs {
/// # let schema = schema_builder.build(); /// # let schema = schema_builder.build();
/// # /// #
/// # let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?; /// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64)); /// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64)); /// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64)); /// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64)); /// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # assert!(index_writer.commit().is_ok()); /// # assert!(index_writer.commit().is_ok());
/// # let reader = index.reader()?; /// # let reader = index.reader().unwrap();
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?; /// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?; /// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs, /// # assert_eq!(top_docs,
@@ -248,20 +177,25 @@ impl TopDocs {
/// # (80u64, DocAddress(0u32, 3))]); /// # (80u64, DocAddress(0u32, 3))]);
/// # Ok(()) /// # Ok(())
/// # } /// # }
///
///
/// /// Searches the document matching the given query, and /// /// Searches the document matching the given query, and
/// /// collects the top 10 documents, order by the u64-`field` /// /// collects the top 10 documents, order by the u64-`field`
/// /// given in argument. /// /// given in argument.
/// ///
/// /// `field` is required to be a FAST field.
/// fn docs_sorted_by_rating(searcher: &Searcher, /// fn docs_sorted_by_rating(searcher: &Searcher,
/// query: &dyn Query, /// query: &dyn Query,
/// rating_field: Field) /// sort_by_field: Field)
/// -> tantivy::Result<Vec<(u64, DocAddress)>> { /// -> tantivy::Result<Vec<(u64, DocAddress)>> {
/// ///
/// // This is where we build our topdocs collector /// // This is where we build our topdocs collector
/// // /// //
/// // Note the `rating_field` needs to be a FAST field here. /// // Note the generics parameter that needs to match the
/// let top_books_by_rating = TopDocs /// // type `sort_by_field`.
/// let top_docs_by_rating = TopDocs
/// ::with_limit(10) /// ::with_limit(10)
/// .order_by_u64_field(rating_field); /// .order_by_u64_field(sort_by_field);
/// ///
/// // ... and here are our documents. Note this is a simple vec. /// // ... and here are our documents. Note this is a simple vec.
/// // The `u64` in the pair is the value of our fast field for /// // The `u64` in the pair is the value of our fast field for
@@ -271,105 +205,21 @@ impl TopDocs {
/// // length of 10, or less if not enough documents matched the /// // length of 10, or less if not enough documents matched the
/// // query. /// // query.
/// let resulting_docs: Vec<(u64, DocAddress)> = /// let resulting_docs: Vec<(u64, DocAddress)> =
/// searcher.search(query, &top_books_by_rating)?; /// searcher.search(query, &top_docs_by_rating)?;
/// ///
/// Ok(resulting_docs) /// Ok(resulting_docs)
/// } /// }
/// ``` /// ```
/// ///
/// # See also /// # Panics
/// ///
/// To confortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to /// May panic if the field requested is not a fast field.
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method. ///
pub fn order_by_u64_field( pub fn order_by_u64_field(
self, self,
field: Field, field: Field,
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> { ) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
CustomScoreTopCollector::new(ScorerByField { field }, self.0.into_tscore()) self.custom_score(ScorerByField { field })
}
/// Set top-K to rank documents by a given fast field.
///
/// If the field is not a fast field, or its field type does not match the generic type, this method does not panic,
/// but an explicit error will be returned at the moment of collection.
///
/// Note that this method is a generic. The requested fast field type will be often
/// inferred in your code by the rust compiler.
///
/// Implementation-wise, for performance reason, tantivy will manipulate the u64 representation of your fast
/// field until the last moment.
///
/// # Example
///
/// ```rust
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress};
/// # use tantivy::query::{Query, AllQuery};
/// use tantivy::Searcher;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
///
/// # fn main() -> tantivy::Result<()> {
/// # let mut schema_builder = Schema::builder();
/// # let title = schema_builder.add_text_field("company", TEXT);
/// # let rating = schema_builder.add_i64_field("revenue", FAST);
/// # let schema = schema_builder.build();
/// #
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
/// # index_writer.add_document(doc!(title => "MadCow Inc.", rating => 92_000_000i64));
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", rating => 119_000_000i64));
/// # index_writer.add_document(doc!(title => "Declining Cow", rating => -63_000_000i64));
/// # assert!(index_writer.commit().is_ok());
/// # let reader = index.reader()?;
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, rating)?;
/// # assert_eq!(top_docs,
/// # vec![(119_000_000i64, DocAddress(0, 1)),
/// # (92_000_000i64, DocAddress(0, 0))]);
/// # Ok(())
/// # }
/// /// Searches the document matching the given query, and
/// /// collects the top 10 documents, order by the u64-`field`
/// /// given in argument.
/// fn docs_sorted_by_revenue(searcher: &Searcher,
/// query: &dyn Query,
/// revenue_field: Field)
/// -> tantivy::Result<Vec<(i64, DocAddress)>> {
///
/// // This is where we build our topdocs collector
/// //
/// // Note the generics parameter that needs to match the
/// // type `sort_by_field`. revenue_field here is a FAST i64 field.
/// let top_company_by_revenue = TopDocs
/// ::with_limit(2)
/// .order_by_fast_field(revenue_field);
///
/// // ... and here are our documents. Note this is a simple vec.
/// // The `i64` in the pair is the value of our fast field for
/// // each documents.
/// //
/// // The vec is sorted decreasingly by `sort_by_field`, and has a
/// // length of 10, or less if not enough documents matched the
/// // query.
/// let resulting_docs: Vec<(i64, DocAddress)> =
/// searcher.search(query, &top_company_by_revenue)?;
///
/// Ok(resulting_docs)
/// }
/// ```
pub fn order_by_fast_field<TFastValue>(
self,
fast_field: Field,
) -> impl Collector<Fruit = Vec<(TFastValue, DocAddress)>>
where
TFastValue: FastValue + 'static,
{
let u64_collector = self.order_by_u64_field(fast_field);
FastFieldConvertCollector {
collector: u64_collector,
field: fast_field,
fast_value: PhantomData,
}
} }
/// Ranks the documents using a custom score. /// Ranks the documents using a custom score.
@@ -414,7 +264,7 @@ impl TopDocs {
/// fn create_index() -> tantivy::Result<Index> { /// fn create_index() -> tantivy::Result<Index> {
/// let schema = create_schema(); /// let schema = create_schema();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?; /// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// let product_name = index.schema().get_field("product_name").unwrap(); /// let product_name = index.schema().get_field("product_name").unwrap();
/// let popularity: Field = index.schema().get_field("popularity").unwrap(); /// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64)); /// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
@@ -474,7 +324,7 @@ impl TopDocs {
where where
TScore: 'static + Send + Sync + Clone + PartialOrd, TScore: 'static + Send + Sync + Clone + PartialOrd,
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static, TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker> + Send + Sync, TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
{ {
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore()) TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
} }
@@ -521,7 +371,7 @@ impl TopDocs {
/// # fn main() -> tantivy::Result<()> { /// # fn main() -> tantivy::Result<()> {
/// # let schema = create_schema(); /// # let schema = create_schema();
/// # let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?; /// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # let product_name = index.schema().get_field("product_name").unwrap(); /// # let product_name = index.schema().get_field("product_name").unwrap();
/// # /// #
/// let popularity: Field = index.schema().get_field("popularity").unwrap(); /// let popularity: Field = index.schema().get_field("popularity").unwrap();
@@ -588,7 +438,7 @@ impl TopDocs {
where where
TScore: 'static + Send + Sync + Clone + PartialOrd, TScore: 'static + Send + Sync + Clone + PartialOrd,
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static, TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer> + Send + Sync, TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
{ {
CustomScoreTopCollector::new(custom_score, self.0.into_tscore()) CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
} }
@@ -711,7 +561,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"Hello happy tax payer.")); index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer")); index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
index_writer.add_document(doc!(text_field=>"I like Droopy")); index_writer.add_document(doc!(text_field=>"I like Droopy"));
@@ -872,94 +722,6 @@ mod tests {
); );
} }
#[test]
fn test_top_field_collector_datetime() -> crate::Result<()> {
use std::str::FromStr;
let mut schema_builder = Schema::builder();
let name = schema_builder.add_text_field("name", TEXT);
let birthday = schema_builder.add_date_field("birthday", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
let pr_birthday = crate::DateTime::from_str("1898-04-09T00:00:00+00:00")?;
index_writer.add_document(doc!(
name => "Paul Robeson",
birthday => pr_birthday
));
let mr_birthday = crate::DateTime::from_str("1947-11-08T00:00:00+00:00")?;
index_writer.add_document(doc!(
name => "Minnie Riperton",
birthday => mr_birthday
));
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let top_collector = TopDocs::with_limit(3).order_by_fast_field(birthday);
let top_docs: Vec<(crate::DateTime, DocAddress)> =
searcher.search(&AllQuery, &top_collector)?;
assert_eq!(
&top_docs[..],
&[
(mr_birthday, DocAddress(0, 1)),
(pr_birthday, DocAddress(0, 0)),
]
);
Ok(())
}
#[test]
fn test_top_field_collector_i64() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let city = schema_builder.add_text_field("city", TEXT);
let altitude = schema_builder.add_i64_field("altitude", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(
city => "georgetown",
altitude => -1i64,
));
index_writer.add_document(doc!(
city => "tokyo",
altitude => 40i64,
));
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
let top_docs: Vec<(i64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
assert_eq!(
&top_docs[..],
&[(40i64, DocAddress(0, 1)), (-1i64, DocAddress(0, 0)),]
);
Ok(())
}
#[test]
fn test_top_field_collector_f64() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let city = schema_builder.add_text_field("city", TEXT);
let altitude = schema_builder.add_f64_field("altitude", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(
city => "georgetown",
altitude => -1.0f64,
));
index_writer.add_document(doc!(
city => "tokyo",
altitude => 40f64,
));
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
let top_docs: Vec<(f64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
assert_eq!(
&top_docs[..],
&[(40f64, DocAddress(0, 1)), (-1.0f64, DocAddress(0, 0)),]
);
Ok(())
}
#[test] #[test]
#[should_panic] #[should_panic]
fn test_field_does_not_exist() { fn test_field_does_not_exist() {
@@ -982,41 +744,29 @@ mod tests {
} }
#[test] #[test]
fn test_field_not_fast_field() -> crate::Result<()> { fn test_field_not_fast_field() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, STORED); let size = schema_builder.add_u64_field(SIZE, STORED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let (index, _) = index("beer", title, schema, |index_writer| {
let mut index_writer = index.writer_for_tests()?; index_writer.add_document(doc!(
index_writer.add_document(doc!(size=>1u64)); title => "bottle of beer",
index_writer.commit()?; size => 12u64,
let searcher = index.reader()?.searcher(); ));
});
let searcher = index.reader().unwrap().searcher();
let segment = searcher.segment_reader(0); let segment = searcher.segment_reader(0);
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size); let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
let err = top_collector.for_segment(0, segment).err().unwrap(); let err = top_collector.for_segment(0, segment);
assert!( if let Err(crate::TantivyError::SchemaError(msg)) = err {
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field requested (Field(0)) is not a fast field.") assert_eq!(
); msg,
Ok(()) "Field requested (Field(1)) is not a i64/u64 fast field."
} );
} else {
#[test] assert!(false);
fn test_field_wrong_type() -> crate::Result<()> { }
let mut schema_builder = Schema::builder();
let size = schema_builder.add_u64_field(SIZE, STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(size=>1u64));
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let segment = searcher.segment_reader(0);
let top_collector = TopDocs::with_limit(4).order_by_fast_field::<i64>(size);
let err = top_collector.for_segment(0, segment).err().unwrap();
assert!(
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field \"size\" is not a fast field.")
);
Ok(())
} }
#[test] #[test]
@@ -1070,7 +820,8 @@ mod tests {
mut doc_adder: impl FnMut(&mut IndexWriter) -> (), mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
) -> (Index, Box<dyn Query>) { ) -> (Index, Box<dyn Query>) {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
doc_adder(&mut index_writer); doc_adder(&mut index_writer);
index_writer.commit().unwrap(); index_writer.commit().unwrap();
let query_parser = QueryParser::for_index(&index, vec![query_field]); let query_parser = QueryParser::for_index(&index, vec![query_field]);

View File

@@ -49,7 +49,7 @@ pub trait ScoreTweaker<TScore>: Sync {
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore> impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
where where
TScoreTweaker: ScoreTweaker<TScore> + Send + Sync, TScoreTweaker: ScoreTweaker<TScore>,
TScore: 'static + PartialOrd + Clone + Send + Sync, TScore: 'static + PartialOrd + Clone + Send + Sync,
{ {
type Fruit = Vec<(TScore, DocAddress)>; type Fruit = Vec<(TScore, DocAddress)>;

View File

@@ -1,7 +1,6 @@
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt}; use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use std::io; use std::io;
use std::ops::Deref;
use crate::directory::OwnedBytes;
pub(crate) struct BitPacker { pub(crate) struct BitPacker {
mini_buffer: u64, mini_buffer: u64,
@@ -61,14 +60,20 @@ impl BitPacker {
} }
#[derive(Clone)] #[derive(Clone)]
pub struct BitUnpacker { pub struct BitUnpacker<Data>
where
Data: Deref<Target = [u8]>,
{
num_bits: u64, num_bits: u64,
mask: u64, mask: u64,
data: OwnedBytes, data: Data,
} }
impl BitUnpacker { impl<Data> BitUnpacker<Data>
pub fn new(data: OwnedBytes, num_bits: u8) -> BitUnpacker { where
Data: Deref<Target = [u8]>,
{
pub fn new(data: Data, num_bits: u8) -> BitUnpacker<Data> {
let mask: u64 = if num_bits == 64 { let mask: u64 = if num_bits == 64 {
!0u64 !0u64
} else { } else {
@@ -85,7 +90,7 @@ impl BitUnpacker {
if self.num_bits == 0 { if self.num_bits == 0 {
return 0u64; return 0u64;
} }
let data: &[u8] = self.data.as_slice(); let data: &[u8] = &*self.data;
let num_bits = self.num_bits; let num_bits = self.num_bits;
let mask = self.mask; let mask = self.mask;
let addr_in_bits = idx * num_bits; let addr_in_bits = idx * num_bits;
@@ -104,9 +109,8 @@ impl BitUnpacker {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::{BitPacker, BitUnpacker}; use super::{BitPacker, BitUnpacker};
use crate::directory::OwnedBytes;
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>) { fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
let mut data = Vec::new(); let mut data = Vec::new();
let mut bitpacker = BitPacker::new(); let mut bitpacker = BitPacker::new();
let max_val: u64 = (1u64 << num_bits as u64) - 1u64; let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
@@ -118,7 +122,7 @@ mod test {
} }
bitpacker.close(&mut data).unwrap(); bitpacker.close(&mut data).unwrap();
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7); assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
let bitunpacker = BitUnpacker::new(OwnedBytes::new(data), num_bits); let bitunpacker = BitUnpacker::new(data, num_bits);
(bitunpacker, vals) (bitunpacker, vals)
} }

View File

@@ -1,15 +1,14 @@
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::CountingWriter; use crate::common::CountingWriter;
use crate::common::VInt; use crate::common::VInt;
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::{TerminatingWrite, WritePtr}; use crate::directory::{TerminatingWrite, WritePtr};
use crate::schema::Field; use crate::schema::Field;
use crate::space_usage::FieldUsage; use crate::space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
use std::collections::HashMap; use std::collections::HashMap;
use std::io::{self, Read, Write}; use std::io::Write;
use std::io::{self, Read};
use super::HasLen;
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)] #[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
pub struct FileAddr { pub struct FileAddr {
@@ -104,26 +103,25 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
/// for each field. /// for each field.
#[derive(Clone)] #[derive(Clone)]
pub struct CompositeFile { pub struct CompositeFile {
data: FileSlice, data: ReadOnlySource,
offsets_index: HashMap<FileAddr, (usize, usize)>, offsets_index: HashMap<FileAddr, (usize, usize)>,
} }
impl CompositeFile { impl CompositeFile {
/// Opens a composite file stored in a given /// Opens a composite file stored in a given
/// `FileSlice`. /// `ReadOnlySource`.
pub fn open(data: &FileSlice) -> io::Result<CompositeFile> { pub fn open(data: &ReadOnlySource) -> io::Result<CompositeFile> {
let end = data.len(); let end = data.len();
let footer_len_data = data.slice_from(end - 4).read_bytes()?; let footer_len_data = data.slice_from(end - 4);
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize; let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
let footer_start = end - 4 - footer_len; let footer_start = end - 4 - footer_len;
let footer_data = data let footer_data = data.slice(footer_start, footer_start + footer_len);
.slice(footer_start, footer_start + footer_len)
.read_bytes()?;
let mut footer_buffer = footer_data.as_slice(); let mut footer_buffer = footer_data.as_slice();
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize; let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
let mut file_addrs = vec![]; let mut file_addrs = vec![];
let mut offsets = vec![]; let mut offsets = vec![];
let mut field_index = HashMap::new(); let mut field_index = HashMap::new();
let mut offset = 0; let mut offset = 0;
@@ -152,19 +150,19 @@ impl CompositeFile {
pub fn empty() -> CompositeFile { pub fn empty() -> CompositeFile {
CompositeFile { CompositeFile {
offsets_index: HashMap::new(), offsets_index: HashMap::new(),
data: FileSlice::empty(), data: ReadOnlySource::empty(),
} }
} }
/// Returns the `FileSlice` associated /// Returns the `ReadOnlySource` associated
/// to a given `Field` and stored in a `CompositeFile`. /// to a given `Field` and stored in a `CompositeFile`.
pub fn open_read(&self, field: Field) -> Option<FileSlice> { pub fn open_read(&self, field: Field) -> Option<ReadOnlySource> {
self.open_read_with_idx(field, 0) self.open_read_with_idx(field, 0)
} }
/// Returns the `FileSlice` associated /// Returns the `ReadOnlySource` associated
/// to a given `Field` and stored in a `CompositeFile`. /// to a given `Field` and stored in a `CompositeFile`.
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> { pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
self.offsets_index self.offsets_index
.get(&FileAddr { field, idx }) .get(&FileAddr { field, idx })
.map(|&(from, to)| self.data.slice(from, to)) .map(|&(from, to)| self.data.slice(from, to))
@@ -194,44 +192,46 @@ mod test {
use std::path::Path; use std::path::Path;
#[test] #[test]
fn test_composite_file() -> crate::Result<()> { fn test_composite_file() {
let path = Path::new("test_path"); let path = Path::new("test_path");
let directory = RAMDirectory::create(); let mut directory = RAMDirectory::create();
{ {
let w = directory.open_write(path).unwrap(); let w = directory.open_write(path).unwrap();
let mut composite_write = CompositeWrite::wrap(w); let mut composite_write = CompositeWrite::wrap(w);
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32)); {
VInt(32431123u64).serialize(&mut write_0)?; let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
write_0.flush()?; VInt(32431123u64).serialize(&mut write_0).unwrap();
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32)); write_0.flush().unwrap();
VInt(2).serialize(&mut write_4)?; }
write_4.flush()?;
composite_write.close()?; {
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
VInt(2).serialize(&mut write_4).unwrap();
write_4.flush().unwrap();
}
composite_write.close().unwrap();
} }
{ {
let r = directory.open_read(path)?; let r = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&r)?; let composite_file = CompositeFile::open(&r).unwrap();
{ {
let file0 = composite_file let file0 = composite_file
.open_read(Field::from_field_id(0u32)) .open_read(Field::from_field_id(0u32))
.unwrap() .unwrap();
.read_bytes()?;
let mut file0_buf = file0.as_slice(); let mut file0_buf = file0.as_slice();
let payload_0 = VInt::deserialize(&mut file0_buf)?.0; let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
assert_eq!(file0_buf.len(), 0); assert_eq!(file0_buf.len(), 0);
assert_eq!(payload_0, 32431123u64); assert_eq!(payload_0, 32431123u64);
} }
{ {
let file4 = composite_file let file4 = composite_file
.open_read(Field::from_field_id(4u32)) .open_read(Field::from_field_id(4u32))
.unwrap() .unwrap();
.read_bytes()?;
let mut file4_buf = file4.as_slice(); let mut file4_buf = file4.as_slice();
let payload_4 = VInt::deserialize(&mut file4_buf)?.0; let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
assert_eq!(file4_buf.len(), 0); assert_eq!(file4_buf.len(), 0);
assert_eq!(payload_4, 2u64); assert_eq!(payload_4, 2u64);
} }
} }
Ok(())
} }
} }

View File

@@ -21,6 +21,7 @@ use crate::schema::FieldType;
use crate::schema::Schema; use crate::schema::Schema;
use crate::tokenizer::{TextAnalyzer, TokenizerManager}; use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::IndexWriter; use crate::IndexWriter;
use std::borrow::BorrowMut;
use std::collections::HashSet; use std::collections::HashSet;
use std::fmt; use std::fmt;
@@ -56,9 +57,7 @@ pub struct Index {
} }
impl Index { impl Index {
/// Examines the directory to see if it contains an index. /// Examines the director to see if it contains an index
///
/// Effectively, it only checks for the presence of the `meta.json` file.
pub fn exists<Dir: Directory>(dir: &Dir) -> bool { pub fn exists<Dir: Directory>(dir: &Dir) -> bool {
dir.exists(&META_FILEPATH) dir.exists(&META_FILEPATH)
} }
@@ -141,9 +140,7 @@ impl Index {
Index::create(mmap_directory, schema) Index::create(mmap_directory, schema)
} }
/// Creates a new index given an implementation of the trait `Directory`. /// Creates a new index given an implementation of the trait `Directory`
///
/// If a directory previously existed, it will be erased.
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> { pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
let directory = ManagedDirectory::wrap(dir)?; let directory = ManagedDirectory::wrap(dir)?;
Index::from_directory(directory, schema) Index::from_directory(directory, schema)
@@ -152,8 +149,8 @@ impl Index {
/// Create a new index from a directory. /// Create a new index from a directory.
/// ///
/// This will overwrite existing meta.json /// This will overwrite existing meta.json
fn from_directory(directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> { fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
save_new_metas(schema.clone(), &directory)?; save_new_metas(schema.clone(), directory.borrow_mut())?;
let metas = IndexMeta::with_schema(schema); let metas = IndexMeta::with_schema(schema);
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default()) Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
} }
@@ -303,15 +300,6 @@ impl Index {
) )
} }
/// Helper to create an index writer for tests.
///
/// That index writer only simply has a single thread and a heap of 5 MB.
/// Using a single thread gives us a deterministic allocation of DocId.
#[cfg(test)]
pub fn writer_for_tests(&self) -> crate::Result<IndexWriter> {
self.writer_with_num_threads(1, 10_000_000)
}
/// Creates a multithreaded writer /// Creates a multithreaded writer
/// ///
/// Tantivy will automatically define the number of threads to use. /// Tantivy will automatically define the number of threads to use.
@@ -514,7 +502,7 @@ mod tests {
let schema = throw_away_schema(); let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap(); let field = schema.get_field("num_likes").unwrap();
let mut index = Index::create_from_tempdir(schema).unwrap(); let mut index = Index::create_from_tempdir(schema).unwrap();
let mut writer = index.writer_for_tests().unwrap(); let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap(); writer.commit().unwrap();
let reader = index let reader = index
.reader_builder() .reader_builder()
@@ -551,33 +539,23 @@ mod tests {
test_index_on_commit_reload_policy_aux(field, &write_index, &reader); test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
} }
} }
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) { fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
let mut reader_index = reader.index(); let mut reader_index = reader.index();
let (sender, receiver) = crossbeam::channel::unbounded(); let (sender, receiver) = crossbeam::channel::unbounded();
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || { let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
let _ = sender.send(()); let _ = sender.send(());
})); }));
let mut writer = index.writer_for_tests().unwrap(); let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64)); writer.add_document(doc!(field=>1u64));
writer.commit().unwrap(); writer.commit().unwrap();
// We need a loop here because it is possible for notify to send more than assert!(receiver.recv().is_ok());
// one modify event. It was observed on CI on MacOS. assert_eq!(reader.searcher().num_docs(), 1);
loop {
assert!(receiver.recv().is_ok());
if reader.searcher().num_docs() == 1 {
break;
}
}
writer.add_document(doc!(field=>2u64)); writer.add_document(doc!(field=>2u64));
writer.commit().unwrap(); writer.commit().unwrap();
// ... Same as above assert!(receiver.recv().is_ok());
loop { assert_eq!(reader.searcher().num_docs(), 2);
assert!(receiver.recv().is_ok());
if reader.searcher().num_docs() == 2 {
break;
}
}
} }
// This test will not pass on windows, because windows // This test will not pass on windows, because windows

View File

@@ -301,7 +301,7 @@ mod tests {
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed"); let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
assert_eq!( assert_eq!(
json, json,
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default","fieldnorms":true},"stored":false}}],"opstamp":0}"# r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#
); );
} }
} }

View File

@@ -1,10 +1,9 @@
use std::io;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::positions::PositionReader; use crate::positions::PositionReader;
use crate::postings::TermInfo; use crate::postings::TermInfo;
use crate::postings::{BlockSegmentPostings, SegmentPostings}; use crate::postings::{BlockSegmentPostings, SegmentPostings};
use crate::schema::FieldType;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::schema::Term; use crate::schema::Term;
use crate::termdict::TermDictionary; use crate::termdict::TermDictionary;
@@ -16,7 +15,7 @@ use crate::termdict::TermDictionary;
/// ///
/// It is safe to delete the segment associated to /// It is safe to delete the segment associated to
/// an `InvertedIndexReader`. As long as it is open, /// an `InvertedIndexReader`. As long as it is open,
/// the `FileSlice` it is relying on should /// the `ReadOnlySource` it is relying on should
/// stay available. /// stay available.
/// ///
/// ///
@@ -24,9 +23,9 @@ use crate::termdict::TermDictionary;
/// the `SegmentReader`'s [`.inverted_index(...)`] method /// the `SegmentReader`'s [`.inverted_index(...)`] method
pub struct InvertedIndexReader { pub struct InvertedIndexReader {
termdict: TermDictionary, termdict: TermDictionary,
postings_file_slice: FileSlice, postings_source: ReadOnlySource,
positions_file_slice: FileSlice, positions_source: ReadOnlySource,
positions_idx_file_slice: FileSlice, positions_idx_source: ReadOnlySource,
record_option: IndexRecordOption, record_option: IndexRecordOption,
total_num_tokens: u64, total_num_tokens: u64,
} }
@@ -35,31 +34,35 @@ impl InvertedIndexReader {
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry #[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
pub(crate) fn new( pub(crate) fn new(
termdict: TermDictionary, termdict: TermDictionary,
postings_file_slice: FileSlice, postings_source: ReadOnlySource,
positions_file_slice: FileSlice, positions_source: ReadOnlySource,
positions_idx_file_slice: FileSlice, positions_idx_source: ReadOnlySource,
record_option: IndexRecordOption, record_option: IndexRecordOption,
) -> io::Result<InvertedIndexReader> { ) -> InvertedIndexReader {
let (total_num_tokens_slice, postings_body) = postings_file_slice.split(8); let total_num_tokens_data = postings_source.slice(0, 8);
let total_num_tokens = u64::deserialize(&mut total_num_tokens_slice.read_bytes()?)?; let mut total_num_tokens_cursor = total_num_tokens_data.as_slice();
Ok(InvertedIndexReader { let total_num_tokens = u64::deserialize(&mut total_num_tokens_cursor).unwrap_or(0u64);
InvertedIndexReader {
termdict, termdict,
postings_file_slice: postings_body, postings_source: postings_source.slice_from(8),
positions_file_slice, positions_source,
positions_idx_file_slice, positions_idx_source,
record_option, record_option,
total_num_tokens, total_num_tokens,
}) }
} }
/// Creates an empty `InvertedIndexReader` object, which /// Creates an empty `InvertedIndexReader` object, which
/// contains no terms at all. /// contains no terms at all.
pub fn empty(record_option: IndexRecordOption) -> InvertedIndexReader { pub fn empty(field_type: &FieldType) -> InvertedIndexReader {
let record_option = field_type
.get_index_record_option()
.unwrap_or(IndexRecordOption::Basic);
InvertedIndexReader { InvertedIndexReader {
termdict: TermDictionary::empty(), termdict: TermDictionary::empty(),
postings_file_slice: FileSlice::empty(), postings_source: ReadOnlySource::empty(),
positions_file_slice: FileSlice::empty(), positions_source: ReadOnlySource::empty(),
positions_idx_file_slice: FileSlice::empty(), positions_idx_source: ReadOnlySource::empty(),
record_option, record_option,
total_num_tokens: 0u64, total_num_tokens: 0u64,
} }
@@ -89,12 +92,11 @@ impl InvertedIndexReader {
&self, &self,
term_info: &TermInfo, term_info: &TermInfo,
block_postings: &mut BlockSegmentPostings, block_postings: &mut BlockSegmentPostings,
) -> io::Result<()> { ) {
let postings_slice = self let offset = term_info.postings_offset as usize;
.postings_file_slice let end_source = self.postings_source.len();
.slice_from(term_info.postings_offset as usize); let postings_slice = self.postings_source.slice(offset, end_source);
block_postings.reset(term_info.doc_freq, postings_slice.read_bytes()?); block_postings.reset(term_info.doc_freq, postings_slice);
Ok(())
} }
/// Returns a block postings given a `Term`. /// Returns a block postings given a `Term`.
@@ -105,11 +107,9 @@ impl InvertedIndexReader {
&self, &self,
term: &Term, term: &Term,
option: IndexRecordOption, option: IndexRecordOption,
) -> io::Result<Option<BlockSegmentPostings>> { ) -> Option<BlockSegmentPostings> {
Ok(self self.get_term_info(term)
.get_term_info(term)
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option)) .map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
.transpose()?)
} }
/// Returns a block postings given a `term_info`. /// Returns a block postings given a `term_info`.
@@ -120,10 +120,10 @@ impl InvertedIndexReader {
&self, &self,
term_info: &TermInfo, term_info: &TermInfo,
requested_option: IndexRecordOption, requested_option: IndexRecordOption,
) -> io::Result<BlockSegmentPostings> { ) -> BlockSegmentPostings {
let offset = term_info.postings_offset as usize; let offset = term_info.postings_offset as usize;
let postings_data = self.postings_file_slice.slice_from(offset); let postings_data = self.postings_source.slice_from(offset);
BlockSegmentPostings::open( BlockSegmentPostings::from_data(
term_info.doc_freq, term_info.doc_freq,
postings_data, postings_data,
self.record_option, self.record_option,
@@ -139,23 +139,20 @@ impl InvertedIndexReader {
&self, &self,
term_info: &TermInfo, term_info: &TermInfo,
option: IndexRecordOption, option: IndexRecordOption,
) -> io::Result<SegmentPostings> { ) -> SegmentPostings {
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?; let block_postings = self.read_block_postings_from_terminfo(term_info, option);
let position_stream = { let position_stream = {
if option.has_positions() { if option.has_positions() {
let position_reader = self.positions_file_slice.clone(); let position_reader = self.positions_source.clone();
let skip_reader = self.positions_idx_file_slice.clone(); let skip_reader = self.positions_idx_source.clone();
let position_reader = let position_reader =
PositionReader::new(position_reader, skip_reader, term_info.positions_idx)?; PositionReader::new(position_reader, skip_reader, term_info.positions_idx);
Some(position_reader) Some(position_reader)
} else { } else {
None None
} }
}; };
Ok(SegmentPostings::from_block_postings( SegmentPostings::from_block_postings(block_postings, position_stream)
block_postings,
position_stream,
))
} }
/// Returns the total number of tokens recorded for all documents /// Returns the total number of tokens recorded for all documents
@@ -174,31 +171,24 @@ impl InvertedIndexReader {
/// For instance, requesting `IndexRecordOption::Freq` for a /// For instance, requesting `IndexRecordOption::Freq` for a
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings` /// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
/// with `DocId`s and frequencies. /// with `DocId`s and frequencies.
pub fn read_postings( pub fn read_postings(&self, term: &Term, option: IndexRecordOption) -> Option<SegmentPostings> {
&self,
term: &Term,
option: IndexRecordOption,
) -> io::Result<Option<SegmentPostings>> {
self.get_term_info(term) self.get_term_info(term)
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option)) .map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
.transpose()
} }
pub(crate) fn read_postings_no_deletes( pub(crate) fn read_postings_no_deletes(
&self, &self,
term: &Term, term: &Term,
option: IndexRecordOption, option: IndexRecordOption,
) -> io::Result<Option<SegmentPostings>> { ) -> Option<SegmentPostings> {
self.get_term_info(term) self.get_term_info(term)
.map(|term_info| self.read_postings_from_terminfo(&term_info, option)) .map(|term_info| self.read_postings_from_terminfo(&term_info, option))
.transpose()
} }
/// Returns the number of documents containing the term. /// Returns the number of documents containing the term.
pub fn doc_freq(&self, term: &Term) -> io::Result<u32> { pub fn doc_freq(&self, term: &Term) -> u32 {
Ok(self self.get_term_info(term)
.get_term_info(term)
.map(|term_info| term_info.doc_freq) .map(|term_info| term_info.doc_freq)
.unwrap_or(0u32)) .unwrap_or(0u32)
} }
} }

View File

@@ -11,8 +11,8 @@ use crate::store::StoreReader;
use crate::termdict::TermMerger; use crate::termdict::TermMerger;
use crate::DocAddress; use crate::DocAddress;
use crate::Index; use crate::Index;
use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use std::{fmt, io};
/// Holds a list of `SegmentReader`s ready for search. /// Holds a list of `SegmentReader`s ready for search.
/// ///
@@ -32,17 +32,17 @@ impl Searcher {
schema: Schema, schema: Schema,
index: Index, index: Index,
segment_readers: Vec<SegmentReader>, segment_readers: Vec<SegmentReader>,
) -> io::Result<Searcher> { ) -> Searcher {
let store_readers: Vec<StoreReader> = segment_readers let store_readers = segment_readers
.iter() .iter()
.map(SegmentReader::get_store_reader) .map(SegmentReader::get_store_reader)
.collect::<io::Result<Vec<_>>>()?; .collect();
Ok(Searcher { Searcher {
schema, schema,
index, index,
segment_readers, segment_readers,
store_readers, store_readers,
}) }
} }
/// Returns the `Index` associated to the `Searcher` /// Returns the `Index` associated to the `Searcher`
@@ -75,14 +75,13 @@ impl Searcher {
/// Return the overall number of documents containing /// Return the overall number of documents containing
/// the given term. /// the given term.
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> { pub fn doc_freq(&self, term: &Term) -> u64 {
let mut total_doc_freq = 0; self.segment_readers
for segment_reader in &self.segment_readers { .iter()
let inverted_index = segment_reader.inverted_index(term.field())?; .map(|segment_reader| {
let doc_freq = inverted_index.doc_freq(term)?; u64::from(segment_reader.inverted_index(term.field()).doc_freq(term))
total_doc_freq += u64::from(doc_freq); })
} .sum::<u64>()
Ok(total_doc_freq)
} }
/// Return the list of segment readers /// Return the list of segment readers
@@ -149,22 +148,22 @@ impl Searcher {
} }
/// Return the field searcher associated to a `Field`. /// Return the field searcher associated to a `Field`.
pub fn field(&self, field: Field) -> crate::Result<FieldSearcher> { pub fn field(&self, field: Field) -> FieldSearcher {
let inv_index_readers: Vec<Arc<InvertedIndexReader>> = self let inv_index_readers = self
.segment_readers .segment_readers
.iter() .iter()
.map(|segment_reader| segment_reader.inverted_index(field)) .map(|segment_reader| segment_reader.inverted_index(field))
.collect::<crate::Result<Vec<_>>>()?; .collect::<Vec<_>>();
Ok(FieldSearcher::new(inv_index_readers)) FieldSearcher::new(inv_index_readers)
} }
/// Summarize total space usage of this searcher. /// Summarize total space usage of this searcher.
pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> { pub fn space_usage(&self) -> SearcherSpaceUsage {
let mut space_usage = SearcherSpaceUsage::new(); let mut space_usage = SearcherSpaceUsage::new();
for segment_reader in &self.segment_readers { for segment_reader in self.segment_readers.iter() {
space_usage.add_segment(segment_reader.space_usage()?); space_usage.add_segment(segment_reader.space_usage());
} }
Ok(space_usage) space_usage
} }
} }

View File

@@ -4,7 +4,7 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::directory::error::{OpenReadError, OpenWriteError}; use crate::directory::error::{OpenReadError, OpenWriteError};
use crate::directory::Directory; use crate::directory::Directory;
use crate::directory::{FileSlice, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::schema::Schema; use crate::schema::Schema;
use crate::Opstamp; use crate::Opstamp;
@@ -78,9 +78,10 @@ impl Segment {
} }
/// Open one of the component file for a *regular* read. /// Open one of the component file for a *regular* read.
pub fn open_read(&self, component: SegmentComponent) -> Result<FileSlice, OpenReadError> { pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> {
let path = self.relative_path(component); let path = self.relative_path(component);
self.index.directory().open_read(&path) let source = self.index.directory().open_read(&path)?;
Ok(source)
} }
/// Open one of the component file for *regular* write. /// Open one of the component file for *regular* write.

View File

@@ -20,7 +20,7 @@ pub enum SegmentComponent {
/// Dictionary associating `Term`s to `TermInfo`s which is /// Dictionary associating `Term`s to `TermInfo`s which is
/// simply an address into the `postings` file and the `positions` file. /// simply an address into the `postings` file and the `positions` file.
TERMS, TERMS,
/// Row-oriented, compressed storage of the documents. /// Row-oriented, LZ4-compressed storage of the documents.
/// Accessing a document from the store is relatively slow, as it /// Accessing a document from the store is relatively slow, as it
/// requires to decompress the entire block it belongs to. /// requires to decompress the entire block it belongs to.
STORE, STORE,

View File

@@ -1,26 +1,26 @@
use crate::common::CompositeFile;
use crate::common::HasLen; use crate::common::HasLen;
use crate::core::InvertedIndexReader; use crate::core::InvertedIndexReader;
use crate::core::Segment; use crate::core::Segment;
use crate::core::SegmentComponent; use crate::core::SegmentComponent;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::fastfield::DeleteBitSet; use crate::fastfield::DeleteBitSet;
use crate::fastfield::FacetReader; use crate::fastfield::FacetReader;
use crate::fastfield::FastFieldReaders; use crate::fastfield::FastFieldReaders;
use crate::fieldnorm::{FieldNormReader, FieldNormReaders}; use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::schema::Field;
use crate::schema::FieldType; use crate::schema::FieldType;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::{Field, IndexRecordOption};
use crate::space_usage::SegmentSpaceUsage; use crate::space_usage::SegmentSpaceUsage;
use crate::store::StoreReader; use crate::store::StoreReader;
use crate::termdict::TermDictionary; use crate::termdict::TermDictionary;
use crate::DocId; use crate::DocId;
use crate::{common::CompositeFile, error::DataCorruption};
use fail::fail_point; use fail::fail_point;
use std::collections::HashMap;
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use std::{collections::HashMap, io};
/// Entry point to access all of the datastructures of the `Segment` /// Entry point to access all of the datastructures of the `Segment`
/// ///
@@ -50,7 +50,7 @@ pub struct SegmentReader {
fast_fields_readers: Arc<FastFieldReaders>, fast_fields_readers: Arc<FastFieldReaders>,
fieldnorm_readers: FieldNormReaders, fieldnorm_readers: FieldNormReaders,
store_file: FileSlice, store_source: ReadOnlySource,
delete_bitset_opt: Option<DeleteBitSet>, delete_bitset_opt: Option<DeleteBitSet>,
schema: Schema, schema: Schema,
} }
@@ -106,26 +106,16 @@ impl SegmentReader {
} }
/// Accessor to the `FacetReader` associated to a given `Field`. /// Accessor to the `FacetReader` associated to a given `Field`.
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> { pub fn facet_reader(&self, field: Field) -> Option<FacetReader> {
let field_entry = self.schema.get_field_entry(field); let field_entry = self.schema.get_field_entry(field);
if field_entry.field_type() != &FieldType::HierarchicalFacet { if field_entry.field_type() != &FieldType::HierarchicalFacet {
return Err(crate::TantivyError::InvalidArgument(format!( return None;
"Field {:?} is not a facet field.",
field_entry.name()
)));
} }
let term_ords_reader = self.fast_fields().u64s(field).ok_or_else(|| { let term_ords_reader = self.fast_fields().u64s(field)?;
DataCorruption::comment_only(format!( let termdict_source = self.termdict_composite.open_read(field)?;
"Cannot find data for hierarchical facet {:?}", let termdict = TermDictionary::from_source(&termdict_source);
field_entry.name() let facet_reader = FacetReader::new(term_ords_reader, termdict);
)) Some(facet_reader)
})?;
let termdict = self
.termdict_composite
.open_read(field)
.map(TermDictionary::open)
.unwrap_or_else(|| Ok(TermDictionary::empty()))?;
Ok(FacetReader::new(term_ords_reader, termdict))
} }
/// Accessor to the segment's `Field norms`'s reader. /// Accessor to the segment's `Field norms`'s reader.
@@ -135,45 +125,47 @@ impl SegmentReader {
/// ///
/// They are simply stored as a fast field, serialized in /// They are simply stored as a fast field, serialized in
/// the `.fieldnorm` file of the segment. /// the `.fieldnorm` file of the segment.
pub fn get_fieldnorms_reader(&self, field: Field) -> crate::Result<FieldNormReader> { pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| { if let Some(fieldnorm_reader) = self.fieldnorm_readers.get_field(field) {
fieldnorm_reader
} else {
let field_name = self.schema.get_field_name(field); let field_name = self.schema.get_field_name(field);
let err_msg = format!( let err_msg = format!(
"Field norm not found for field {:?}. Was it marked as indexed during indexing?", "Field norm not found for field {:?}. Was it market as indexed during indexing.",
field_name field_name
); );
crate::TantivyError::SchemaError(err_msg) panic!(err_msg);
}) }
} }
/// Accessor to the segment's `StoreReader`. /// Accessor to the segment's `StoreReader`.
pub fn get_store_reader(&self) -> io::Result<StoreReader> { pub fn get_store_reader(&self) -> StoreReader {
StoreReader::open(self.store_file.clone()) StoreReader::from_source(self.store_source.clone())
} }
/// Open a new segment for reading. /// Open a new segment for reading.
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> { pub fn open(segment: &Segment) -> crate::Result<SegmentReader> {
let termdict_file = segment.open_read(SegmentComponent::TERMS)?; let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
let termdict_composite = CompositeFile::open(&termdict_file)?; let termdict_composite = CompositeFile::open(&termdict_source)?;
let store_file = segment.open_read(SegmentComponent::STORE)?; let store_source = segment.open_read(SegmentComponent::STORE)?;
fail_point!("SegmentReader::open#middle"); fail_point!("SegmentReader::open#middle");
let postings_file = segment.open_read(SegmentComponent::POSTINGS)?; let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
let postings_composite = CompositeFile::open(&postings_file)?; let postings_composite = CompositeFile::open(&postings_source)?;
let positions_composite = { let positions_composite = {
if let Ok(positions_file) = segment.open_read(SegmentComponent::POSITIONS) { if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
CompositeFile::open(&positions_file)? CompositeFile::open(&source)?
} else { } else {
CompositeFile::empty() CompositeFile::empty()
} }
}; };
let positions_idx_composite = { let positions_idx_composite = {
if let Ok(positions_skip_file) = segment.open_read(SegmentComponent::POSITIONSSKIP) { if let Ok(source) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
CompositeFile::open(&positions_skip_file)? CompositeFile::open(&source)?
} else { } else {
CompositeFile::empty() CompositeFile::empty()
} }
@@ -191,14 +183,13 @@ impl SegmentReader {
let delete_bitset_opt = if segment.meta().has_deletes() { let delete_bitset_opt = if segment.meta().has_deletes() {
let delete_data = segment.open_read(SegmentComponent::DELETE)?; let delete_data = segment.open_read(SegmentComponent::DELETE)?;
let delete_bitset = DeleteBitSet::open(delete_data)?; Some(DeleteBitSet::open(delete_data))
Some(delete_bitset)
} else { } else {
None None
}; };
Ok(SegmentReader { Ok(SegmentReader {
inv_idx_reader_cache: Default::default(), inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
max_doc: segment.meta().max_doc(), max_doc: segment.meta().max_doc(),
num_docs: segment.meta().num_docs(), num_docs: segment.meta().num_docs(),
termdict_composite, termdict_composite,
@@ -206,7 +197,7 @@ impl SegmentReader {
fast_fields_readers: fast_field_readers, fast_fields_readers: fast_field_readers,
fieldnorm_readers, fieldnorm_readers,
segment_id: segment.id(), segment_id: segment.id(),
store_file, store_source,
delete_bitset_opt, delete_bitset_opt,
positions_composite, positions_composite,
positions_idx_composite, positions_idx_composite,
@@ -221,64 +212,58 @@ impl SegmentReader {
/// The field reader is in charge of iterating through the /// The field reader is in charge of iterating through the
/// term dictionary associated to a specific field, /// term dictionary associated to a specific field,
/// and opening the posting list associated to any term. /// and opening the posting list associated to any term.
/// pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
/// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
/// is returned.
/// Similarly if the field is marked as indexed but no term has been indexed for the given
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
if let Some(inv_idx_reader) = self if let Some(inv_idx_reader) = self
.inv_idx_reader_cache .inv_idx_reader_cache
.read() .read()
.expect("Lock poisoned. This should never happen") .expect("Lock poisoned. This should never happen")
.get(&field) .get(&field)
{ {
return Ok(Arc::clone(inv_idx_reader)); return Arc::clone(inv_idx_reader);
} }
let field_entry = self.schema.get_field_entry(field); let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
let record_option_opt = field_type.get_index_record_option(); let record_option_opt = field_type.get_index_record_option();
if record_option_opt.is_none() { if record_option_opt.is_none() {
warn!("Field {:?} does not seem indexed.", field_entry.name()); panic!("Field {:?} does not seem indexed.", field_entry.name());
} }
let postings_file_opt = self.postings_composite.open_read(field); let record_option = record_option_opt.unwrap();
if postings_file_opt.is_none() || record_option_opt.is_none() { let postings_source_opt = self.postings_composite.open_read(field);
if postings_source_opt.is_none() {
// no documents in the segment contained this field. // no documents in the segment contained this field.
// As a result, no data is associated to the inverted index. // As a result, no data is associated to the inverted index.
// //
// Returns an empty inverted index. // Returns an empty inverted index.
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic); return Arc::new(InvertedIndexReader::empty(field_type));
return Ok(Arc::new(InvertedIndexReader::empty(record_option)));
} }
let record_option = record_option_opt.unwrap(); let postings_source = postings_source_opt.unwrap();
let postings_file = postings_file_opt.unwrap();
let termdict_file: FileSlice = self.termdict_composite.open_read(field) let termdict_source = self.termdict_composite.open_read(field).expect(
.ok_or_else(|| "Failed to open field term dictionary in composite file. Is the field indexed?",
DataCorruption::comment_only(format!("Failed to open field {:?}'s term dictionary in the composite file. Has the schema been modified?", field_entry.name())) );
)?;
let positions_file = self let positions_source = self
.positions_composite .positions_composite
.open_read(field) .open_read(field)
.expect("Index corrupted. Failed to open field positions in composite file."); .expect("Index corrupted. Failed to open field positions in composite file.");
let positions_idx_file = self let positions_idx_source = self
.positions_idx_composite .positions_idx_composite
.open_read(field) .open_read(field)
.expect("Index corrupted. Failed to open field positions in composite file."); .expect("Index corrupted. Failed to open field positions in composite file.");
let inv_idx_reader = Arc::new(InvertedIndexReader::new( let inv_idx_reader = Arc::new(InvertedIndexReader::new(
TermDictionary::open(termdict_file)?, TermDictionary::from_source(&termdict_source),
postings_file, postings_source,
positions_file, positions_source,
positions_idx_file, positions_idx_source,
record_option, record_option,
)?); ));
// by releasing the lock in between, we may end up opening the inverting index // by releasing the lock in between, we may end up opening the inverting index
// twice, but this is fine. // twice, but this is fine.
@@ -287,7 +272,7 @@ impl SegmentReader {
.expect("Field reader cache lock poisoned. This should never happen.") .expect("Field reader cache lock poisoned. This should never happen.")
.insert(field, Arc::clone(&inv_idx_reader)); .insert(field, Arc::clone(&inv_idx_reader));
Ok(inv_idx_reader) inv_idx_reader
} }
/// Returns the segment id /// Returns the segment id
@@ -315,8 +300,8 @@ impl SegmentReader {
} }
/// Summarize total space usage of this segment. /// Summarize total space usage of this segment.
pub fn space_usage(&self) -> io::Result<SegmentSpaceUsage> { pub fn space_usage(&self) -> SegmentSpaceUsage {
Ok(SegmentSpaceUsage::new( SegmentSpaceUsage::new(
self.num_docs(), self.num_docs(),
self.termdict_composite.space_usage(), self.termdict_composite.space_usage(),
self.postings_composite.space_usage(), self.postings_composite.space_usage(),
@@ -324,12 +309,12 @@ impl SegmentReader {
self.positions_idx_composite.space_usage(), self.positions_idx_composite.space_usage(),
self.fast_fields_readers.space_usage(), self.fast_fields_readers.space_usage(),
self.fieldnorm_readers.space_usage(), self.fieldnorm_readers.space_usage(),
self.get_store_reader()?.space_usage(), self.get_store_reader().space_usage(),
self.delete_bitset_opt self.delete_bitset_opt
.as_ref() .as_ref()
.map(DeleteBitSet::space_usage) .map(DeleteBitSet::space_usage)
.unwrap_or(0), .unwrap_or(0),
)) )
} }
} }
@@ -346,7 +331,7 @@ mod test {
use crate::DocId; use crate::DocId;
#[test] #[test]
fn test_alive_docs_iterator() -> crate::Result<()> { fn test_alive_docs_iterator() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("name", TEXT | STORED); schema_builder.add_text_field("name", TEXT | STORED);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -354,26 +339,26 @@ mod test {
let name = schema.get_field("name").unwrap(); let name = schema.get_field("name").unwrap();
{ {
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(name => "tantivy")); index_writer.add_document(doc!(name => "tantivy"));
index_writer.add_document(doc!(name => "horse")); index_writer.add_document(doc!(name => "horse"));
index_writer.add_document(doc!(name => "jockey")); index_writer.add_document(doc!(name => "jockey"));
index_writer.add_document(doc!(name => "cap")); index_writer.add_document(doc!(name => "cap"));
// we should now have one segment with two docs // we should now have one segment with two docs
index_writer.commit()?; index_writer.commit().unwrap();
} }
{ {
let mut index_writer2 = index.writer(50_000_000)?; let mut index_writer2 = index.writer(50_000_000).unwrap();
index_writer2.delete_term(Term::from_field_text(name, "horse")); index_writer2.delete_term(Term::from_field_text(name, "horse"));
index_writer2.delete_term(Term::from_field_text(name, "cap")); index_writer2.delete_term(Term::from_field_text(name, "cap"));
// ok, now we should have a deleted doc // ok, now we should have a deleted doc
index_writer2.commit()?; index_writer2.commit().unwrap();
} }
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect(); let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
assert_eq!(vec![0u32, 2u32], docs); assert_eq!(vec![0u32, 2u32], docs);
Ok(())
} }
} }

View File

@@ -3,7 +3,7 @@ use crate::directory::error::LockError;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::WatchCallback; use crate::directory::WatchCallback;
use crate::directory::WatchHandle; use crate::directory::WatchHandle;
use crate::directory::{FileSlice, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use std::fmt; use std::fmt;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -11,6 +11,7 @@ use std::marker::Send;
use std::marker::Sync; use std::marker::Sync;
use std::path::Path; use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
use std::result;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
@@ -79,7 +80,7 @@ fn try_acquire_lock(
) -> Result<DirectoryLock, TryAcquireLockError> { ) -> Result<DirectoryLock, TryAcquireLockError> {
let mut write = directory.open_write(filepath).map_err(|e| match e { let mut write = directory.open_write(filepath).map_err(|e| match e {
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists, OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
OpenWriteError::IOError { io_error, .. } => TryAcquireLockError::IOError(io_error), OpenWriteError::IOError(io_error) => TryAcquireLockError::IOError(io_error.into()),
})?; })?;
write.flush().map_err(TryAcquireLockError::IOError)?; write.flush().map_err(TryAcquireLockError::IOError)?;
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard { Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
@@ -116,19 +117,19 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// change. /// change.
/// ///
/// Specifically, subsequent writes or flushes should /// Specifically, subsequent writes or flushes should
/// have no effect on the returned `FileSlice` object. /// have no effect on the returned `ReadOnlySource` object.
/// ///
/// You should only use this to read files create with [Directory::open_write]. /// You should only use this to read files create with [Directory::open_write].
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError>; fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Removes a file /// Removes a file
/// ///
/// Removing a file will not affect an eventual /// Removing a file will not affect an eventual
/// existing FileSlice pointing to it. /// existing ReadOnlySource pointing to it.
/// ///
/// Removing a nonexistent file, yields a /// Removing a nonexistent file, yields a
/// `DeleteError::DoesNotExist`. /// `DeleteError::DoesNotExist`.
fn delete(&self, path: &Path) -> Result<(), DeleteError>; fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
/// Returns true iff the file exists /// Returns true iff the file exists
fn exists(&self, path: &Path) -> bool; fn exists(&self, path: &Path) -> bool;
@@ -138,7 +139,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// ///
/// Right after this call, the file should be created /// Right after this call, the file should be created
/// and any subsequent call to `open_read` for the /// and any subsequent call to `open_read` for the
/// same path should return a `FileSlice`. /// same path should return a `ReadOnlySource`.
/// ///
/// Write operations may be aggressively buffered. /// Write operations may be aggressively buffered.
/// The client of this trait is responsible for calling flush /// The client of this trait is responsible for calling flush
@@ -152,7 +153,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// was not called. /// was not called.
/// ///
/// The file may not previously exist. /// The file may not previously exist.
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError>; fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
/// Reads the full content file that has been written using /// Reads the full content file that has been written using
/// atomic_write. /// atomic_write.
@@ -168,7 +169,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// a partially written file. /// a partially written file.
/// ///
/// The file may or may not previously exist. /// The file may or may not previously exist.
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()>; fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()>;
/// Acquire a lock in the given directory. /// Acquire a lock in the given directory.
/// ///

View File

@@ -1,67 +1,162 @@
use crate::Version; use crate::Version;
use std::error::Error as StdError;
use std::fmt; use std::fmt;
use std::io; use std::io;
use std::path::PathBuf; use std::path::PathBuf;
/// Error while trying to acquire a directory lock. /// Error while trying to acquire a directory lock.
#[derive(Debug, Error)] #[derive(Debug, Fail)]
pub enum LockError { pub enum LockError {
/// Failed to acquired a lock as it is already held by another /// Failed to acquired a lock as it is already held by another
/// client. /// client.
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period. /// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call. /// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
#[error("Could not acquire lock as it is already held, possibly by a different process.")] #[fail(
display = "Could not acquire lock as it is already held, possibly by a different process."
)]
LockBusy, LockBusy,
/// Trying to acquire a lock failed with an `IOError` /// Trying to acquire a lock failed with an `IOError`
#[error("Failed to acquire the lock due to an io:Error.")] #[fail(display = "Failed to acquire the lock due to an io:Error.")]
IOError(io::Error), IOError(io::Error),
} }
/// General IO error with an optional path to the offending file.
#[derive(Debug)]
pub struct IOError {
path: Option<PathBuf>,
err: io::Error,
}
impl Into<io::Error> for IOError {
fn into(self) -> io::Error {
self.err
}
}
impl fmt::Display for IOError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.path {
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
None => write!(f, "io error occurred: '{}'", self.err),
}
}
}
impl StdError for IOError {
fn description(&self) -> &str {
"io error occurred"
}
fn cause(&self) -> Option<&dyn StdError> {
Some(&self.err)
}
}
impl IOError {
pub(crate) fn with_path(path: PathBuf, err: io::Error) -> Self {
IOError {
path: Some(path),
err,
}
}
}
impl From<io::Error> for IOError {
fn from(err: io::Error) -> IOError {
IOError { path: None, err }
}
}
/// Error that may occur when opening a directory /// Error that may occur when opening a directory
#[derive(Debug, Error)] #[derive(Debug)]
pub enum OpenDirectoryError { pub enum OpenDirectoryError {
/// The underlying directory does not exists. /// The underlying directory does not exists.
#[error("Directory does not exist: '{0}'.")]
DoesNotExist(PathBuf), DoesNotExist(PathBuf),
/// The path exists but is not a directory. /// The path exists but is not a directory.
#[error("Path exists but is not a directory: '{0}'.")]
NotADirectory(PathBuf), NotADirectory(PathBuf),
/// Failed to create a temp directory.
#[error("Failed to create a temporary directory: '{0}'.")]
FailedToCreateTempDir(io::Error),
/// IoError /// IoError
#[error("IOError '{io_error:?}' while create directory in: '{directory_path:?}'.")] IoError(io::Error),
IoError { }
/// underlying io Error.
io_error: io::Error, impl From<io::Error> for OpenDirectoryError {
/// directory we tried to open. fn from(io_err: io::Error) -> Self {
directory_path: PathBuf, OpenDirectoryError::IoError(io_err)
}, }
}
impl fmt::Display for OpenDirectoryError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenDirectoryError::DoesNotExist(ref path) => {
write!(f, "the underlying directory '{:?}' does not exist", path)
}
OpenDirectoryError::NotADirectory(ref path) => {
write!(f, "the path '{:?}' exists but is not a directory", path)
}
OpenDirectoryError::IoError(ref err) => write!(
f,
"IOError while trying to open/create the directory. {:?}",
err
),
}
}
}
impl StdError for OpenDirectoryError {
fn description(&self) -> &str {
"error occurred while opening a directory"
}
fn cause(&self) -> Option<&dyn StdError> {
None
}
} }
/// Error that may occur when starting to write in a file /// Error that may occur when starting to write in a file
#[derive(Debug, Error)] #[derive(Debug)]
pub enum OpenWriteError { pub enum OpenWriteError {
/// Our directory is WORM, writing an existing file is forbidden. /// Our directory is WORM, writing an existing file is forbidden.
/// Checkout the `Directory` documentation. /// Checkout the `Directory` documentation.
#[error("File already exists: '{0}'")]
FileAlreadyExists(PathBuf), FileAlreadyExists(PathBuf),
/// Any kind of IO error that happens when /// Any kind of IO error that happens when
/// writing in the underlying IO device. /// writing in the underlying IO device.
#[error("IOError '{io_error:?}' while opening file for write: '{filepath}'.")] IOError(IOError),
IOError {
/// The underlying `io::Error`.
io_error: io::Error,
/// File path of the file that tantivy failed to open for write.
filepath: PathBuf,
},
} }
impl OpenWriteError { impl From<IOError> for OpenWriteError {
pub(crate) fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self { fn from(err: IOError) -> OpenWriteError {
Self::IOError { io_error, filepath } OpenWriteError::IOError(err)
} }
} }
impl fmt::Display for OpenWriteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenWriteError::FileAlreadyExists(ref path) => {
write!(f, "the file '{:?}' already exists", path)
}
OpenWriteError::IOError(ref err) => write!(
f,
"an io error occurred while opening a file for writing: '{}'",
err
),
}
}
}
impl StdError for OpenWriteError {
fn description(&self) -> &str {
"error occurred while opening a file for writing"
}
fn cause(&self) -> Option<&dyn StdError> {
match *self {
OpenWriteError::FileAlreadyExists(_) => None,
OpenWriteError::IOError(ref err) => Some(err),
}
}
}
/// Type of index incompatibility between the library and the index found on disk /// Type of index incompatibility between the library and the index found on disk
/// Used to catch and provide a hint to solve this incompatibility issue /// Used to catch and provide a hint to solve this incompatibility issue
pub enum Incompatibility { pub enum Incompatibility {
@@ -122,46 +217,55 @@ impl fmt::Debug for Incompatibility {
} }
/// Error that may occur when accessing a file read /// Error that may occur when accessing a file read
#[derive(Debug, Error)] #[derive(Debug)]
pub enum OpenReadError { pub enum OpenReadError {
/// The file does not exists. /// The file does not exists.
#[error("Files does not exists: {0:?}")]
FileDoesNotExist(PathBuf),
/// Any kind of io::Error.
#[error(
"IOError: '{io_error:?}' happened while opening the following file for Read: {filepath}."
)]
IOError {
/// The underlying `io::Error`.
io_error: io::Error,
/// File path of the file that tantivy failed to open for read.
filepath: PathBuf,
},
/// This library does not support the index version found in file footer.
#[error("Index version unsupported: {0:?}")]
IncompatibleIndex(Incompatibility),
}
impl OpenReadError {
pub(crate) fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
Self::IOError { io_error, filepath }
}
}
/// Error that may occur when trying to delete a file
#[derive(Debug, Error)]
pub enum DeleteError {
/// The file does not exists.
#[error("File does not exists: '{0}'.")]
FileDoesNotExist(PathBuf), FileDoesNotExist(PathBuf),
/// Any kind of IO error that happens when /// Any kind of IO error that happens when
/// interacting with the underlying IO device. /// interacting with the underlying IO device.
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")] IOError(IOError),
IOError { /// This library doesn't support the index version found on disk
/// The underlying `io::Error`. IncompatibleIndex(Incompatibility),
io_error: io::Error, }
/// File path of the file that tantivy failed to delete.
filepath: PathBuf, impl From<IOError> for OpenReadError {
}, fn from(err: IOError) -> OpenReadError {
OpenReadError::IOError(err)
}
}
impl fmt::Display for OpenReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenReadError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path)
}
OpenReadError::IOError(ref err) => write!(
f,
"an io error occurred while opening a file for reading: '{}'",
err
),
OpenReadError::IncompatibleIndex(ref footer) => {
write!(f, "Incompatible index format: {:?}", footer)
}
}
}
}
/// Error that may occur when trying to delete a file
#[derive(Debug)]
pub enum DeleteError {
/// The file does not exists.
FileDoesNotExist(PathBuf),
/// Any kind of IO error that happens when
/// interacting with the underlying IO device.
IOError(IOError),
}
impl From<IOError> for DeleteError {
fn from(err: IOError) -> DeleteError {
DeleteError::IOError(err)
}
} }
impl From<Incompatibility> for OpenReadError { impl From<Incompatibility> for OpenReadError {
@@ -169,3 +273,29 @@ impl From<Incompatibility> for OpenReadError {
OpenReadError::IncompatibleIndex(incompatibility) OpenReadError::IncompatibleIndex(incompatibility)
} }
} }
impl fmt::Display for DeleteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
DeleteError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path)
}
DeleteError::IOError(ref err) => {
write!(f, "an io error occurred while deleting a file: '{}'", err)
}
}
}
}
impl StdError for DeleteError {
fn description(&self) -> &str {
"error occurred while deleting a file"
}
fn cause(&self) -> Option<&dyn StdError> {
match *self {
DeleteError::FileDoesNotExist(_) => None,
DeleteError::IOError(ref err) => Some(err),
}
}
}

View File

@@ -1,237 +0,0 @@
use stable_deref_trait::StableDeref;
use crate::common::HasLen;
use crate::directory::OwnedBytes;
use std::sync::Arc;
use std::{io, ops::Deref};
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
/// Objects that represents files sections in tantivy.
///
/// By contract, whatever happens to the directory file, as long as a FileHandle
/// is alive, the data associated with it cannot be altered or destroyed.
///
/// The underlying behavior is therefore specific to the `Directory` that created it.
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
/// on the filesystem.
pub trait FileHandle: 'static + Send + Sync + HasLen {
/// Reads a slice of bytes.
///
/// This method may panic if the range requested is invalid.
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes>;
}
impl FileHandle for &'static [u8] {
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
let bytes = &self[from..to];
Ok(OwnedBytes::new(bytes))
}
}
impl<T: Deref<Target = [u8]>> HasLen for T {
fn len(&self) -> usize {
self.as_ref().len()
}
}
impl<B> From<B> for FileSlice
where
B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync,
{
fn from(bytes: B) -> FileSlice {
FileSlice::new(OwnedBytes::new(bytes))
}
}
/// Logical slice of read only file in tantivy.
//
/// It can be cloned and sliced cheaply.
///
#[derive(Clone)]
pub struct FileSlice {
data: Arc<Box<dyn FileHandle>>,
start: usize,
stop: usize,
}
impl FileSlice {
/// Wraps a FileHandle.
pub fn new<D>(data: D) -> Self
where
D: FileHandle,
{
let len = data.len();
FileSlice {
data: Arc::new(Box::new(data)),
start: 0,
stop: len,
}
}
/// Creates a fileslice that is just a view over a slice of the data.
///
/// # Panics
///
/// Panics if `to < from` or if `to` exceeds the filesize.
pub fn slice(&self, from: usize, to: usize) -> FileSlice {
assert!(to <= self.len());
assert!(to >= from);
FileSlice {
data: self.data.clone(),
start: self.start + from,
stop: self.start + to,
}
}
/// Creates an empty FileSlice
pub fn empty() -> FileSlice {
const EMPTY_SLICE: &[u8] = &[];
FileSlice::from(EMPTY_SLICE)
}
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
///
/// The behavior is strongly dependant on the implementation of the underlying
/// `Directory` and the `FileSliceTrait` it creates.
/// In particular, it is up to the `Directory` implementation
/// to handle caching if needed.
pub fn read_bytes(&self) -> io::Result<OwnedBytes> {
self.data.read_bytes(self.start, self.stop)
}
/// Reads a specific slice of data.
///
/// This is equivalent to running `file_slice.slice(from, to).read_bytes()`.
pub fn read_bytes_slice(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
assert!(from <= to);
assert!(
self.start + to <= self.stop,
"`to` exceeds the fileslice length"
);
self.data.read_bytes(self.start + from, self.start + to)
}
/// Splits the FileSlice at the given offset and return two file slices.
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
///
/// This operation is cheap and must not copy any underlying data.
pub fn split(self, left_len: usize) -> (FileSlice, FileSlice) {
let left = self.slice_to(left_len);
let right = self.slice_from(left_len);
(left, right)
}
/// Splits the file slice at the given offset and return two file slices.
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
pub fn split_from_end(self, right_len: usize) -> (FileSlice, FileSlice) {
let left_len = self.len() - right_len;
self.split(left_len)
}
/// Like `.slice(...)` but enforcing only the `from`
/// boundary.
///
/// Equivalent to `.slice(from_offset, self.len())`
pub fn slice_from(&self, from_offset: usize) -> FileSlice {
self.slice(from_offset, self.len())
}
/// Like `.slice(...)` but enforcing only the `to`
/// boundary.
///
/// Equivalent to `.slice(0, to_offset)`
pub fn slice_to(&self, to_offset: usize) -> FileSlice {
self.slice(0, to_offset)
}
}
impl HasLen for FileSlice {
fn len(&self) -> usize {
self.stop - self.start
}
}
#[cfg(test)]
mod tests {
use super::{FileHandle, FileSlice};
use crate::common::HasLen;
use std::io;
#[test]
fn test_file_slice() -> io::Result<()> {
let file_slice = FileSlice::new(b"abcdef".as_ref());
assert_eq!(file_slice.len(), 6);
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
assert_eq!(
file_slice
.slice_from(1)
.slice_to(2)
.read_bytes()?
.as_slice(),
b"bc"
);
{
let (left, right) = file_slice.clone().split(0);
assert_eq!(left.read_bytes()?.as_slice(), b"");
assert_eq!(right.read_bytes()?.as_slice(), b"abcdef");
}
{
let (left, right) = file_slice.clone().split(2);
assert_eq!(left.read_bytes()?.as_slice(), b"ab");
assert_eq!(right.read_bytes()?.as_slice(), b"cdef");
}
{
let (left, right) = file_slice.clone().split_from_end(0);
assert_eq!(left.read_bytes()?.as_slice(), b"abcdef");
assert_eq!(right.read_bytes()?.as_slice(), b"");
}
{
let (left, right) = file_slice.clone().split_from_end(2);
assert_eq!(left.read_bytes()?.as_slice(), b"abcd");
assert_eq!(right.read_bytes()?.as_slice(), b"ef");
}
Ok(())
}
#[test]
fn test_file_slice_trait_slice_len() {
let blop: &'static [u8] = b"abc";
let owned_bytes: Box<dyn FileHandle> = Box::new(blop);
assert_eq!(owned_bytes.len(), 3);
}
#[test]
fn test_slice_simple_read() -> io::Result<()> {
let slice = FileSlice::new(&b"abcdef"[..]);
assert_eq!(slice.len(), 6);
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
assert_eq!(slice.slice(1, 4).read_bytes()?.as_ref(), b"bcd");
Ok(())
}
#[test]
fn test_slice_read_slice() -> io::Result<()> {
let slice_deref = FileSlice::new(&b"abcdef"[..]);
assert_eq!(slice_deref.read_bytes_slice(1, 4)?.as_ref(), b"bcd");
Ok(())
}
#[test]
#[should_panic(expected = "assertion failed: from <= to")]
fn test_slice_read_slice_invalid_range() {
let slice_deref = FileSlice::new(&b"abcdef"[..]);
assert_eq!(slice_deref.read_bytes_slice(1, 0).unwrap().as_ref(), b"bcd");
}
#[test]
#[should_panic(expected = "`to` exceeds the fileslice length")]
fn test_slice_read_slice_invalid_range_exceeds() {
let slice_deref = FileSlice::new(&b"abcdef"[..]);
assert_eq!(
slice_deref.read_bytes_slice(0, 10).unwrap().as_ref(),
b"bcd"
);
}
}

View File

@@ -1,8 +1,9 @@
use crate::common::{BinarySerializable, CountingWriter, FixedSize, HasLen, VInt}; use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
use crate::directory::error::Incompatibility; use crate::directory::error::Incompatibility;
use crate::directory::FileSlice; use crate::directory::read_only_source::ReadOnlySource;
use crate::directory::{AntiCallToken, TerminatingWrite}; use crate::directory::{AntiCallToken, TerminatingWrite};
use crate::Version; use crate::Version;
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use crc32fast::Hasher; use crc32fast::Hasher;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -63,26 +64,26 @@ impl Footer {
let mut counting_write = CountingWriter::wrap(&mut write); let mut counting_write = CountingWriter::wrap(&mut write);
self.serialize(&mut counting_write)?; self.serialize(&mut counting_write)?;
let written_len = counting_write.written_bytes(); let written_len = counting_write.written_bytes();
(written_len as u32).serialize(write)?; write.write_u32::<LittleEndian>(written_len as u32)?;
Ok(()) Ok(())
} }
pub fn extract_footer(file: FileSlice) -> io::Result<(Footer, FileSlice)> { pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
if file.len() < 4 { if source.len() < 4 {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::UnexpectedEof, io::ErrorKind::UnexpectedEof,
format!( format!(
"File corrupted. The file is smaller than 4 bytes (len={}).", "File corrupted. The file is smaller than 4 bytes (len={}).",
file.len() source.len()
), ),
)); ));
} }
let (body_footer, footer_len_file) = file.split_from_end(u32::SIZE_IN_BYTES); let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES);
let mut footer_len_bytes = footer_len_file.read_bytes()?; let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize;
let footer_len = u32::deserialize(&mut footer_len_bytes)? as usize; let body_len = body_footer.len() - footer_len;
let (body, footer) = body_footer.split_from_end(footer_len); let (body, footer_data) = body_footer.split(body_len);
let mut footer_bytes = footer.read_bytes()?; let mut cursor = footer_data.as_slice();
let footer = Footer::deserialize(&mut footer_bytes)?; let footer = Footer::deserialize(&mut cursor)?;
Ok((footer, body)) Ok((footer, body))
} }
@@ -270,11 +271,7 @@ mod tests {
let mut vec = Vec::new(); let mut vec = Vec::new();
let footer_proxy = FooterProxy::new(&mut vec); let footer_proxy = FooterProxy::new(&mut vec);
assert!(footer_proxy.terminate().is_ok()); assert!(footer_proxy.terminate().is_ok());
if crate::store::COMPRESSION == "lz4" { assert_eq!(vec.len(), 167);
assert_eq!(vec.len(), 158);
} else {
assert_eq!(vec.len(), 167);
}
let footer = Footer::deserialize(&mut &vec[..]).unwrap(); let footer = Footer::deserialize(&mut &vec[..]).unwrap();
assert!(matches!( assert!(matches!(
footer.versioned_footer, footer.versioned_footer,

View File

@@ -1,11 +1,11 @@
use crate::core::{MANAGED_FILEPATH, META_FILEPATH}; use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, LockError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy}; use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::DirectoryLock; use crate::directory::DirectoryLock;
use crate::directory::GarbageCollectionResult; use crate::directory::GarbageCollectionResult;
use crate::directory::Lock; use crate::directory::Lock;
use crate::directory::META_LOCK; use crate::directory::META_LOCK;
use crate::directory::{FileSlice, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use crate::directory::{WatchCallback, WatchHandle}; use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::Directory; use crate::Directory;
@@ -53,7 +53,7 @@ struct MetaInformation {
/// Saves the file containing the list of existing files /// Saves the file containing the list of existing files
/// that were created by tantivy. /// that were created by tantivy.
fn save_managed_paths( fn save_managed_paths(
directory: &dyn Directory, directory: &mut dyn Directory,
wlock: &RwLockWriteGuard<'_, MetaInformation>, wlock: &RwLockWriteGuard<'_, MetaInformation>,
) -> io::Result<()> { ) -> io::Result<()> {
let mut w = serde_json::to_vec(&wlock.managed_paths)?; let mut w = serde_json::to_vec(&wlock.managed_paths)?;
@@ -86,7 +86,7 @@ impl ManagedDirectory {
directory: Box::new(directory), directory: Box::new(directory),
meta_informations: Arc::default(), meta_informations: Arc::default(),
}), }),
io_err @ Err(OpenReadError::IOError { .. }) => Err(io_err.err().unwrap().into()), Err(OpenReadError::IOError(e)) => Err(From::from(e)),
Err(OpenReadError::IncompatibleIndex(incompatibility)) => { Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
// For the moment, this should never happen `meta.json` // For the moment, this should never happen `meta.json`
// do not have any footer and cannot detect incompatibility. // do not have any footer and cannot detect incompatibility.
@@ -168,7 +168,7 @@ impl ManagedDirectory {
DeleteError::FileDoesNotExist(_) => { DeleteError::FileDoesNotExist(_) => {
deleted_files.push(file_to_delete.clone()); deleted_files.push(file_to_delete.clone());
} }
DeleteError::IOError { .. } => { DeleteError::IOError(_) => {
failed_to_delete_files.push(file_to_delete.clone()); failed_to_delete_files.push(file_to_delete.clone());
if !cfg!(target_os = "windows") { if !cfg!(target_os = "windows") {
// On windows, delete is expected to fail if the file // On windows, delete is expected to fail if the file
@@ -212,7 +212,7 @@ impl ManagedDirectory {
/// File starting by "." are reserved to locks. /// File starting by "." are reserved to locks.
/// They are not managed and cannot be subjected /// They are not managed and cannot be subjected
/// to garbage collection. /// to garbage collection.
fn register_file_as_managed(&self, filepath: &Path) -> io::Result<()> { fn register_file_as_managed(&mut self, filepath: &Path) -> io::Result<()> {
// Files starting by "." (e.g. lock files) are not managed. // Files starting by "." (e.g. lock files) are not managed.
if !is_managed(filepath) { if !is_managed(filepath) {
return Ok(()); return Ok(());
@@ -223,7 +223,7 @@ impl ManagedDirectory {
.expect("Managed file lock poisoned"); .expect("Managed file lock poisoned");
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned()); let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
if has_changed { if has_changed {
save_managed_paths(self.directory.as_ref(), &meta_wlock)?; save_managed_paths(self.directory.as_mut(), &meta_wlock)?;
} }
Ok(()) Ok(())
} }
@@ -231,19 +231,10 @@ impl ManagedDirectory {
/// Verify checksum of a managed file /// Verify checksum of a managed file
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> { pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
let reader = self.directory.open_read(path)?; let reader = self.directory.open_read(path)?;
let (footer, data) = let (footer, data) = Footer::extract_footer(reader)
Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IOError { .map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
io_error,
filepath: path.to_path_buf(),
})?;
let bytes = data
.read_bytes()
.map_err(|io_error| OpenReadError::IOError {
filepath: path.to_path_buf(),
io_error,
})?;
let mut hasher = Hasher::new(); let mut hasher = Hasher::new();
hasher.update(bytes.as_slice()); hasher.update(data.as_slice());
let crc = hasher.finalize(); let crc = hasher.finalize();
Ok(footer Ok(footer
.versioned_footer .versioned_footer
@@ -254,37 +245,35 @@ impl ManagedDirectory {
/// List files for which checksum does not match content /// List files for which checksum does not match content
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> { pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
let mut managed_paths = self let mut hashset = HashSet::new();
let managed_paths = self
.meta_informations .meta_informations
.read() .read()
.expect("Managed directory rlock poisoned in list damaged.") .expect("Managed directory rlock poisoned in list damaged.")
.managed_paths .managed_paths
.clone(); .clone();
managed_paths.remove(*META_FILEPATH); for path in managed_paths.into_iter() {
let mut damaged_files = HashSet::new();
for path in managed_paths {
if !self.validate_checksum(&path)? { if !self.validate_checksum(&path)? {
damaged_files.insert(path); hashset.insert(path);
} }
} }
Ok(damaged_files) Ok(hashset)
} }
} }
impl Directory for ManagedDirectory { impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let file_slice = self.directory.open_read(path)?; let read_only_source = self.directory.open_read(path)?;
let (footer, reader) = Footer::extract_footer(file_slice) let (footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|io_error| OpenReadError::wrap_io_error(io_error, path.to_path_buf()))?; .map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
footer.is_compatible()?; footer.is_compatible()?;
Ok(reader) Ok(reader)
} }
fn open_write(&self, path: &Path) -> result::Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path) self.register_file_as_managed(path)
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(io::BufWriter::new(Box::new(FooterProxy::new( Ok(io::BufWriter::new(Box::new(FooterProxy::new(
self.directory self.directory
.open_write(path)? .open_write(path)?
@@ -294,7 +283,7 @@ impl Directory for ManagedDirectory {
)))) ))))
} }
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
self.register_file_as_managed(path)?; self.register_file_as_managed(path)?;
self.directory.atomic_write(path, data) self.directory.atomic_write(path, data)
} }
@@ -408,37 +397,39 @@ mod tests_mmap_specific {
} }
#[test] #[test]
fn test_checksum() -> crate::Result<()> { fn test_checksum() {
let test_path1: &'static Path = Path::new("some_path_for_test"); let test_path1: &'static Path = Path::new("some_path_for_test");
let test_path2: &'static Path = Path::new("other_test_path"); let test_path2: &'static Path = Path::new("other_test_path");
let tempdir = TempDir::new().unwrap(); let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path()); let tempdir_path = PathBuf::from(tempdir.path());
let mmap_directory = MmapDirectory::open(&tempdir_path)?; let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let managed_directory = ManagedDirectory::wrap(mmap_directory)?; let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1)?; let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8])?; write.write_all(&[0u8, 1u8]).unwrap();
write.terminate()?; write.terminate().unwrap();
let mut write = managed_directory.open_write(test_path2)?; let mut write = managed_directory.open_write(test_path2).unwrap();
write.write_all(&[3u8, 4u8, 5u8])?; write.write_all(&[3u8, 4u8, 5u8]).unwrap();
write.terminate()?; write.terminate().unwrap();
let read_file = managed_directory.open_read(test_path2)?.read_bytes()?; let read_source = managed_directory.open_read(test_path2).unwrap();
assert_eq!(read_file.as_slice(), &[3u8, 4u8, 5u8]); assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
assert!(managed_directory.list_damaged().unwrap().is_empty()); assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone(); let mut corrupted_path = tempdir_path.clone();
corrupted_path.push(test_path2); corrupted_path.push(test_path2);
let mut file = OpenOptions::new().write(true).open(&corrupted_path)?; let mut file = OpenOptions::new()
file.write_all(&[255u8])?; .write(true)
file.flush()?; .open(&corrupted_path)
.unwrap();
file.write_all(&[255u8]).unwrap();
file.flush().unwrap();
drop(file); drop(file);
let damaged = managed_directory.list_damaged()?; let damaged = managed_directory.list_damaged().unwrap();
assert_eq!(damaged.len(), 1); assert_eq!(damaged.len(), 1);
assert!(damaged.contains(test_path2)); assert!(damaged.contains(test_path2));
Ok(())
} }
} }

View File

@@ -1,12 +1,14 @@
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::error::LockError; use crate::directory::error::LockError;
use crate::directory::error::{DeleteError, OpenDirectoryError, OpenReadError, OpenWriteError}; use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::directory::read_only_source::BoxedData;
use crate::directory::AntiCallToken; use crate::directory::AntiCallToken;
use crate::directory::BoxedData;
use crate::directory::Directory; use crate::directory::Directory;
use crate::directory::DirectoryLock; use crate::directory::DirectoryLock;
use crate::directory::FileSlice;
use crate::directory::Lock; use crate::directory::Lock;
use crate::directory::ReadOnlySource;
use crate::directory::WatchCallback; use crate::directory::WatchCallback;
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
use crate::directory::WatchHandle; use crate::directory::WatchHandle;
@@ -17,7 +19,7 @@ use notify::RawEvent;
use notify::RecursiveMode; use notify::RecursiveMode;
use notify::Watcher; use notify::Watcher;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use stable_deref_trait::StableDeref; use std::collections::HashMap;
use std::convert::From; use std::convert::From;
use std::fmt; use std::fmt;
use std::fs::OpenOptions; use std::fs::OpenOptions;
@@ -32,7 +34,6 @@ use std::sync::Mutex;
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::Weak; use std::sync::Weak;
use std::thread; use std::thread;
use std::{collections::HashMap, ops::Deref};
use tempfile::TempDir; use tempfile::TempDir;
/// Create a default io error given a string. /// Create a default io error given a string.
@@ -43,17 +44,17 @@ pub(crate) fn make_io_err(msg: String) -> io::Error {
/// Returns None iff the file exists, can be read, but is empty (and hence /// Returns None iff the file exists, can be read, but is empty (and hence
/// cannot be mmapped) /// cannot be mmapped)
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> { fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
let file = File::open(full_path).map_err(|io_err| { let file = File::open(full_path).map_err(|e| {
if io_err.kind() == io::ErrorKind::NotFound { if e.kind() == io::ErrorKind::NotFound {
OpenReadError::FileDoesNotExist(full_path.to_path_buf()) OpenReadError::FileDoesNotExist(full_path.to_owned())
} else { } else {
OpenReadError::wrap_io_error(io_err, full_path.to_path_buf()) OpenReadError::IOError(IOError::with_path(full_path.to_owned(), e))
} }
})?; })?;
let meta_data = file let meta_data = file
.metadata() .metadata()
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_owned()))?; .map_err(|e| IOError::with_path(full_path.to_owned(), e))?;
if meta_data.len() == 0 { if meta_data.len() == 0 {
// if the file size is 0, it will not be possible // if the file size is 0, it will not be possible
// to mmap the file, so we return None // to mmap the file, so we return None
@@ -63,7 +64,7 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
unsafe { unsafe {
memmap::Mmap::map(&file) memmap::Mmap::map(&file)
.map(Some) .map(Some)
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_path_buf())) .map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e)))
} }
} }
@@ -182,10 +183,6 @@ impl WatcherWrapper {
} }
} }
} }
})
.map_err(|io_error| OpenDirectoryError::IoError {
io_error,
directory_path: path.to_path_buf(),
})?; })?;
Ok(WatcherWrapper { Ok(WatcherWrapper {
_watcher: Mutex::new(watcher), _watcher: Mutex::new(watcher),
@@ -275,11 +272,9 @@ impl MmapDirectory {
/// This is mostly useful to test the MmapDirectory itself. /// This is mostly useful to test the MmapDirectory itself.
/// For your unit tests, prefer the RAMDirectory. /// For your unit tests, prefer the RAMDirectory.
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> { pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?; let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
Ok(MmapDirectory::new( let tempdir_path = PathBuf::from(tempdir.path());
tempdir.path().to_path_buf(), Ok(MmapDirectory::new(tempdir_path, Some(tempdir)))
Some(tempdir),
))
} }
/// Opens a MmapDirectory in a directory. /// Opens a MmapDirectory in a directory.
@@ -401,20 +396,8 @@ impl TerminatingWrite for SafeFileWriter {
} }
} }
#[derive(Clone)]
struct MmapArc(Arc<Box<dyn Deref<Target = [u8]> + Send + Sync>>);
impl Deref for MmapArc {
type Target = [u8];
fn deref(&self) -> &[u8] {
self.0.deref()
}
}
unsafe impl StableDeref for MmapArc {}
impl Directory for MmapDirectory { impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path); debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -424,14 +407,12 @@ impl Directory for MmapDirectory {
on mmap cache while reading {:?}", on mmap cache while reading {:?}",
path path
); );
let io_err = make_io_err(msg); IOError::with_path(path.to_owned(), make_io_err(msg))
OpenReadError::wrap_io_error(io_err, path.to_path_buf())
})?; })?;
if let Some(mmap_arc) = mmap_cache.get_mmap(&full_path)? { Ok(mmap_cache
Ok(FileSlice::from(MmapArc(mmap_arc))) .get_mmap(&full_path)?
} else { .map(ReadOnlySource::from)
Ok(FileSlice::empty()) .unwrap_or_else(ReadOnlySource::empty))
}
} }
/// Any entry associated to the path in the mmap will be /// Any entry associated to the path in the mmap will be
@@ -439,18 +420,14 @@ impl Directory for MmapDirectory {
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
match fs::remove_file(&full_path) { match fs::remove_file(&full_path) {
Ok(_) => self.sync_directory().map_err(|e| DeleteError::IOError { Ok(_) => self
io_error: e, .sync_directory()
filepath: path.to_path_buf(), .map_err(|e| IOError::with_path(path.to_owned(), e).into()),
}),
Err(e) => { Err(e) => {
if e.kind() == io::ErrorKind::NotFound { if e.kind() == io::ErrorKind::NotFound {
Err(DeleteError::FileDoesNotExist(path.to_owned())) Err(DeleteError::FileDoesNotExist(path.to_owned()))
} else { } else {
Err(DeleteError::IOError { Err(IOError::with_path(path.to_owned(), e).into())
io_error: e,
filepath: path.to_path_buf(),
})
} }
} }
} }
@@ -461,7 +438,7 @@ impl Directory for MmapDirectory {
full_path.exists() full_path.exists()
} }
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
debug!("Open Write {:?}", path); debug!("Open Write {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -470,22 +447,22 @@ impl Directory for MmapDirectory {
.create_new(true) .create_new(true)
.open(full_path); .open(full_path);
let mut file = open_res.map_err(|io_err| { let mut file = open_res.map_err(|err| {
if io_err.kind() == io::ErrorKind::AlreadyExists { if err.kind() == io::ErrorKind::AlreadyExists {
OpenWriteError::FileAlreadyExists(path.to_path_buf()) OpenWriteError::FileAlreadyExists(path.to_owned())
} else { } else {
OpenWriteError::wrap_io_error(io_err, path.to_path_buf()) IOError::with_path(path.to_owned(), err).into()
} }
})?; })?;
// making sure the file is created. // making sure the file is created.
file.flush() file.flush()
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
// Apparetntly, on some filesystem syncing the parent // Apparetntly, on some filesystem syncing the parent
// directory is required. // directory is required.
self.sync_directory() self.sync_directory()
.map_err(|io_err| OpenWriteError::wrap_io_error(io_err, path.to_path_buf()))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
let writer = SafeFileWriter::new(file); let writer = SafeFileWriter::new(file);
Ok(BufWriter::new(Box::new(writer))) Ok(BufWriter::new(Box::new(writer)))
@@ -496,28 +473,25 @@ impl Directory for MmapDirectory {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
match File::open(&full_path) { match File::open(&full_path) {
Ok(mut file) => { Ok(mut file) => {
file.read_to_end(&mut buffer).map_err(|io_error| { file.read_to_end(&mut buffer)
OpenReadError::wrap_io_error(io_error, path.to_path_buf()) .map_err(|e| IOError::with_path(path.to_owned(), e))?;
})?;
Ok(buffer) Ok(buffer)
} }
Err(io_error) => { Err(e) => {
if io_error.kind() == io::ErrorKind::NotFound { if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned())) Err(OpenReadError::FileDoesNotExist(path.to_owned()))
} else { } else {
Err(OpenReadError::wrap_io_error(io_error, path.to_path_buf())) Err(IOError::with_path(path.to_owned(), e).into())
} }
} }
} }
} }
fn atomic_write(&self, path: &Path, content: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
debug!("Atomic Write {:?}", path); debug!("Atomic Write {:?}", path);
let mut tempfile = tempfile::Builder::new().tempfile_in(&self.inner.root_path)?;
tempfile.write_all(content)?;
tempfile.flush()?;
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
tempfile.into_temp_path().persist(full_path)?; let meta_file = atomicwrites::AtomicFile::new(full_path, atomicwrites::AllowOverwrite);
meta_file.write(|f| f.write_all(data))?;
Ok(()) Ok(())
} }
@@ -553,10 +527,10 @@ mod tests {
// The following tests are specific to the MmapDirectory // The following tests are specific to the MmapDirectory
use super::*; use super::*;
use crate::indexer::LogMergePolicy;
use crate::schema::{Schema, SchemaBuilder, TEXT}; use crate::schema::{Schema, SchemaBuilder, TEXT};
use crate::Index; use crate::Index;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use crate::{common::HasLen, indexer::LogMergePolicy};
use std::fs; use std::fs;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
@@ -571,7 +545,7 @@ mod tests {
// cannot be mmapped. // cannot be mmapped.
// //
// In that case the directory returns a SharedVecSlice. // In that case the directory returns a SharedVecSlice.
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap(); let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
let path = PathBuf::from("test"); let path = PathBuf::from("test");
{ {
let mut w = mmap_directory.open_write(&path).unwrap(); let mut w = mmap_directory.open_write(&path).unwrap();
@@ -587,7 +561,7 @@ mod tests {
// here we test if the cache releases // here we test if the cache releases
// mmaps correctly. // mmaps correctly.
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap(); let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
let num_paths = 10; let num_paths = 10;
let paths: Vec<PathBuf> = (0..num_paths) let paths: Vec<PathBuf> = (0..num_paths)
.map(|i| PathBuf::from(&*format!("file_{}", i))) .map(|i| PathBuf::from(&*format!("file_{}", i)))
@@ -678,7 +652,7 @@ mod tests {
{ {
let index = Index::create(mmap_directory.clone(), schema).unwrap(); let index = Index::create(mmap_directory.clone(), schema).unwrap();
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut log_merge_policy = LogMergePolicy::default(); let mut log_merge_policy = LogMergePolicy::default();
log_merge_policy.set_min_merge_size(3); log_merge_policy.set_min_merge_size(3);
index_writer.set_merge_policy(Box::new(log_merge_policy)); index_writer.set_merge_policy(Box::new(log_merge_policy));

View File

@@ -9,11 +9,10 @@ mod mmap_directory;
mod directory; mod directory;
mod directory_lock; mod directory_lock;
mod file_slice;
mod footer; mod footer;
mod managed_directory; mod managed_directory;
mod owned_bytes;
mod ram_directory; mod ram_directory;
mod read_only_source;
mod watch_event_router; mod watch_event_router;
/// Errors specific to the directory module. /// Errors specific to the directory module.
@@ -22,14 +21,11 @@ pub mod error;
pub use self::directory::DirectoryLock; pub use self::directory::DirectoryLock;
pub use self::directory::{Directory, DirectoryClone}; pub use self::directory::{Directory, DirectoryClone};
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK}; pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
pub(crate) use self::file_slice::BoxedData;
pub use self::file_slice::{FileHandle, FileSlice};
pub use self::owned_bytes::OwnedBytes;
pub use self::ram_directory::RAMDirectory; pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource;
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle}; pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
use std::io::{self, BufWriter, Write}; use std::io::{self, BufWriter, Write};
use std::path::PathBuf; use std::path::PathBuf;
/// Outcome of the Garbage collection /// Outcome of the Garbage collection
pub struct GarbageCollectionResult { pub struct GarbageCollectionResult {
/// List of files that were deleted in this cycle /// List of files that were deleted in this cycle

View File

@@ -1,255 +0,0 @@
use crate::directory::FileHandle;
use stable_deref_trait::StableDeref;
use std::mem;
use std::ops::Deref;
use std::sync::Arc;
use std::{fmt, io};
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
/// this data as a static slice.
///
/// The backing object is required to be `StableDeref`.
#[derive(Clone)]
pub struct OwnedBytes {
data: &'static [u8],
box_stable_deref: Arc<dyn Deref<Target = [u8]> + Sync + Send>,
}
impl FileHandle for OwnedBytes {
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
Ok(self.slice(from, to))
}
}
impl OwnedBytes {
/// Creates an empty `OwnedBytes`.
pub fn empty() -> OwnedBytes {
OwnedBytes::new(&[][..])
}
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
data_holder: T,
) -> OwnedBytes {
let box_stable_deref = Arc::new(data_holder);
let bytes: &[u8] = box_stable_deref.as_ref();
let data = unsafe { mem::transmute::<_, &'static [u8]>(bytes.deref()) };
OwnedBytes {
box_stable_deref,
data,
}
}
/// creates a fileslice that is just a view over a slice of the data.
pub fn slice(&self, from: usize, to: usize) -> Self {
OwnedBytes {
data: &self.data[from..to],
box_stable_deref: self.box_stable_deref.clone(),
}
}
/// Returns the underlying slice of data.
/// `Deref` and `AsRef` are also available.
#[inline(always)]
pub fn as_slice(&self) -> &[u8] {
self.data
}
/// Returns the len of the slice.
#[inline(always)]
pub fn len(&self) -> usize {
self.data.len()
}
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
///
/// Left will hold `split_len` bytes.
///
/// This operation is cheap and does not require to copy any memory.
/// On the other hand, both `left` and `right` retain a handle over
/// the entire slice of memory. In other words, the memory will only
/// be released when both left and right are dropped.
pub fn split(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
let right_box_stable_deref = self.box_stable_deref.clone();
let left = OwnedBytes {
data: &self.data[..split_len],
box_stable_deref: self.box_stable_deref,
};
let right = OwnedBytes {
data: &self.data[split_len..],
box_stable_deref: right_box_stable_deref,
};
(left, right)
}
/// Returns true iff this `OwnedBytes` is empty.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.as_slice().is_empty()
}
/// Drops the left most `advance_len` bytes.
///
/// See also [.clip(clip_len: usize))](#method.clip).
#[inline(always)]
pub fn advance(&mut self, advance_len: usize) {
self.data = &self.data[advance_len..]
}
}
impl fmt::Debug for OwnedBytes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// We truncate the bytes in order to make sure the debug string
// is not too long.
let bytes_truncated: &[u8] = if self.len() > 8 {
&self.as_slice()[..10]
} else {
self.as_slice()
};
write!(f, "OwnedBytes({:?}, len={})", bytes_truncated, self.len())
}
}
impl Deref for OwnedBytes {
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
impl io::Read for OwnedBytes {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let read_len = {
let data = self.as_slice();
if data.len() >= buf.len() {
let buf_len = buf.len();
buf.copy_from_slice(&data[..buf_len]);
buf.len()
} else {
let data_len = data.len();
buf[..data_len].copy_from_slice(data);
data_len
}
};
self.advance(read_len);
Ok(read_len)
}
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
let read_len = {
let data = self.as_slice();
buf.extend(data);
data.len()
};
self.advance(read_len);
Ok(read_len)
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let read_len = self.read(buf)?;
if read_len != buf.len() {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"failed to fill whole buffer",
));
}
Ok(())
}
}
impl AsRef<[u8]> for OwnedBytes {
fn as_ref(&self) -> &[u8] {
self.as_slice()
}
}
#[cfg(test)]
mod tests {
use std::io::{self, Read};
use super::OwnedBytes;
#[test]
fn test_owned_bytes_debug() {
let short_bytes = OwnedBytes::new(b"abcd".as_ref());
assert_eq!(
format!("{:?}", short_bytes),
"OwnedBytes([97, 98, 99, 100], len=4)"
);
let long_bytes = OwnedBytes::new(b"abcdefghijklmnopq".as_ref());
assert_eq!(
format!("{:?}", long_bytes),
"OwnedBytes([97, 98, 99, 100, 101, 102, 103, 104, 105, 106], len=17)"
);
}
#[test]
fn test_owned_bytes_read() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcdefghiklmnopqrstuvwxyz".as_ref());
{
let mut buf = [0u8; 5];
bytes.read_exact(&mut buf[..]).unwrap();
assert_eq!(&buf, b"abcde");
assert_eq!(bytes.as_slice(), b"fghiklmnopqrstuvwxyz")
}
{
let mut buf = [0u8; 2];
bytes.read_exact(&mut buf[..]).unwrap();
assert_eq!(&buf, b"fg");
assert_eq!(bytes.as_slice(), b"hiklmnopqrstuvwxyz")
}
Ok(())
}
#[test]
fn test_owned_bytes_read_right_at_the_end() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = [0u8; 5];
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
assert_eq!(&buf, b"abcde");
assert_eq!(bytes.as_slice(), b"");
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
assert_eq!(&buf, b"abcde");
Ok(())
}
#[test]
fn test_owned_bytes_read_incomplete() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = [0u8; 7];
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
assert_eq!(&buf[..5], b"abcde");
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_read_to_end() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = Vec::new();
bytes.read_to_end(&mut buf)?;
assert_eq!(buf.as_slice(), b"abcde".as_ref());
Ok(())
}
#[test]
fn test_owned_bytes_split() {
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
let (left, right) = bytes.split(3);
assert_eq!(left.as_slice(), b"abc");
assert_eq!(right.as_slice(), b"defghi");
}
#[test]
fn test_owned_bytes_split_boundary() {
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
{
let (left, right) = bytes.clone().split(0);
assert_eq!(left.as_slice(), b"");
assert_eq!(right.as_slice(), b"abcdefghi");
}
{
let (left, right) = bytes.split(9);
assert_eq!(left.as_slice(), b"abcdefghi");
assert_eq!(right.as_slice(), b"");
}
}
}

View File

@@ -1,9 +1,9 @@
use crate::core::META_FILEPATH;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken; use crate::directory::AntiCallToken;
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
use crate::directory::{Directory, FileSlice, WatchCallback, WatchHandle}; use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use crate::directory::{TerminatingWrite, WritePtr}; use crate::directory::{TerminatingWrite, WritePtr};
use crate::{common::HasLen, core::META_FILEPATH};
use fail::fail_point; use fail::fail_point;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
@@ -80,17 +80,17 @@ impl TerminatingWrite for VecWriter {
#[derive(Default)] #[derive(Default)]
struct InnerDirectory { struct InnerDirectory {
fs: HashMap<PathBuf, FileSlice>, fs: HashMap<PathBuf, ReadOnlySource>,
watch_router: WatchCallbackList, watch_router: WatchCallbackList,
} }
impl InnerDirectory { impl InnerDirectory {
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool { fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
let data = FileSlice::from(data.to_vec()); let data = ReadOnlySource::new(Vec::from(data));
self.fs.insert(path, data).is_some() self.fs.insert(path, data).is_some()
} }
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> { fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
self.fs self.fs
.get(path) .get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path))) .ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
@@ -151,11 +151,11 @@ impl RAMDirectory {
/// written using the `atomic_write` api. /// written using the `atomic_write` api.
/// ///
/// If an error is encounterred, files may be persisted partially. /// If an error is encounterred, files may be persisted partially.
pub fn persist(&self, dest: &dyn Directory) -> crate::Result<()> { pub fn persist(&self, dest: &mut dyn Directory) -> crate::Result<()> {
let wlock = self.fs.write().unwrap(); let wlock = self.fs.write().unwrap();
for (path, file) in wlock.fs.iter() { for (path, source) in wlock.fs.iter() {
let mut dest_wrt = dest.open_write(path)?; let mut dest_wrt = dest.open_write(path)?;
dest_wrt.write_all(file.read_bytes()?.as_slice())?; dest_wrt.write_all(source.as_slice())?;
dest_wrt.terminate()?; dest_wrt.terminate()?;
} }
Ok(()) Ok(())
@@ -163,16 +163,15 @@ impl RAMDirectory {
} }
impl Directory for RAMDirectory { impl Directory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.read().unwrap().open_read(path) self.fs.read().unwrap().open_read(path)
} }
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
fail_point!("RAMDirectory::delete", |_| { fail_point!("RAMDirectory::delete", |_| {
Err(DeleteError::IOError { use crate::directory::error::IOError;
io_error: io::Error::from(io::ErrorKind::Other), let io_error = IOError::from(io::Error::from(io::ErrorKind::Other));
filepath: path.to_path_buf(), Err(DeleteError::from(io_error))
})
}); });
self.fs.write().unwrap().delete(path) self.fs.write().unwrap().delete(path)
} }
@@ -181,7 +180,7 @@ impl Directory for RAMDirectory {
self.fs.read().unwrap().exists(path) self.fs.read().unwrap().exists(path)
} }
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
let mut fs = self.fs.write().unwrap(); let mut fs = self.fs.write().unwrap();
let path_buf = PathBuf::from(path); let path_buf = PathBuf::from(path);
let vec_writer = VecWriter::new(path_buf.clone(), self.clone()); let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
@@ -195,17 +194,10 @@ impl Directory for RAMDirectory {
} }
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> { fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let bytes = Ok(self.open_read(path)?.as_slice().to_owned())
self.open_read(path)?
.read_bytes()
.map_err(|io_error| OpenReadError::IOError {
io_error,
filepath: path.to_path_buf(),
})?;
Ok(bytes.as_slice().to_owned())
} }
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new( fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Other,
msg.unwrap_or_else(|| "Undefined".to_string()) msg.unwrap_or_else(|| "Undefined".to_string())
@@ -242,13 +234,13 @@ mod tests {
let msg_seq: &'static [u8] = b"sequential is the way"; let msg_seq: &'static [u8] = b"sequential is the way";
let path_atomic: &'static Path = Path::new("atomic"); let path_atomic: &'static Path = Path::new("atomic");
let path_seq: &'static Path = Path::new("seq"); let path_seq: &'static Path = Path::new("seq");
let directory = RAMDirectory::create(); let mut directory = RAMDirectory::create();
assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok()); assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok());
let mut wrt = directory.open_write(path_seq).unwrap(); let mut wrt = directory.open_write(path_seq).unwrap();
assert!(wrt.write_all(msg_seq).is_ok()); assert!(wrt.write_all(msg_seq).is_ok());
assert!(wrt.flush().is_ok()); assert!(wrt.flush().is_ok());
let directory_copy = RAMDirectory::create(); let mut directory_copy = RAMDirectory::create();
assert!(directory.persist(&directory_copy).is_ok()); assert!(directory.persist(&mut directory_copy).is_ok());
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic); assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq); assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
} }

View File

@@ -0,0 +1,137 @@
use crate::common::HasLen;
use stable_deref_trait::{CloneStableDeref, StableDeref};
use std::ops::Deref;
use std::sync::Arc;
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
/// Read object that represents files in tantivy.
///
/// These read objects are only in charge to deliver
/// the data in the form of a constant read-only `&[u8]`.
/// Whatever happens to the directory file, the data
/// hold by this object should never be altered or destroyed.
pub struct ReadOnlySource {
data: Arc<BoxedData>,
start: usize,
stop: usize,
}
unsafe impl StableDeref for ReadOnlySource {}
unsafe impl CloneStableDeref for ReadOnlySource {}
impl Deref for ReadOnlySource {
type Target = [u8];
fn deref(&self) -> &[u8] {
self.as_slice()
}
}
impl From<Arc<BoxedData>> for ReadOnlySource {
fn from(data: Arc<BoxedData>) -> Self {
let len = data.len();
ReadOnlySource {
data,
start: 0,
stop: len,
}
}
}
impl ReadOnlySource {
pub(crate) fn new<D>(data: D) -> ReadOnlySource
where
D: Deref<Target = [u8]> + Send + Sync + 'static,
{
let len = data.len();
ReadOnlySource {
data: Arc::new(Box::new(data)),
start: 0,
stop: len,
}
}
/// Creates an empty ReadOnlySource
pub fn empty() -> ReadOnlySource {
ReadOnlySource::new(&[][..])
}
/// Returns the data underlying the ReadOnlySource object.
pub fn as_slice(&self) -> &[u8] {
&self.data[self.start..self.stop]
}
/// Splits into 2 `ReadOnlySource`, at the offset given
/// as an argument.
pub fn split(self, addr: usize) -> (ReadOnlySource, ReadOnlySource) {
let left = self.slice(0, addr);
let right = self.slice_from(addr);
(left, right)
}
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
let left_len = self.len() - right_len;
self.split(left_len)
}
/// Creates a ReadOnlySource that is just a
/// view over a slice of the data.
///
/// Keep in mind that any living slice extends
/// the lifetime of the original ReadOnlySource,
///
/// For instance, if `ReadOnlySource` wraps 500MB
/// worth of data in anonymous memory, and only a
/// 1KB slice is remaining, the whole `500MBs`
/// are retained in memory.
pub fn slice(&self, start: usize, stop: usize) -> ReadOnlySource {
assert!(
start <= stop,
"Requested negative slice [{}..{}]",
start,
stop
);
assert!(stop <= self.len());
ReadOnlySource {
data: self.data.clone(),
start: self.start + start,
stop: self.start + stop,
}
}
/// Like `.slice(...)` but enforcing only the `from`
/// boundary.
///
/// Equivalent to `.slice(from_offset, self.len())`
pub fn slice_from(&self, from_offset: usize) -> ReadOnlySource {
self.slice(from_offset, self.len())
}
/// Like `.slice(...)` but enforcing only the `to`
/// boundary.
///
/// Equivalent to `.slice(0, to_offset)`
pub fn slice_to(&self, to_offset: usize) -> ReadOnlySource {
self.slice(0, to_offset)
}
}
impl HasLen for ReadOnlySource {
fn len(&self) -> usize {
self.stop - self.start
}
}
impl Clone for ReadOnlySource {
fn clone(&self) -> Self {
self.slice_from(0)
}
}
impl From<Vec<u8>> for ReadOnlySource {
fn from(data: Vec<u8>) -> ReadOnlySource {
ReadOnlySource::new(data)
}
}

View File

@@ -20,47 +20,45 @@ mod mmap_directory_tests {
} }
#[test] #[test]
fn test_simple() -> crate::Result<()> { fn test_simple() {
let directory = make_directory(); let mut directory = make_directory();
super::test_simple(&directory) super::test_simple(&mut directory);
} }
#[test] #[test]
fn test_write_create_the_file() { fn test_write_create_the_file() {
let directory = make_directory(); let mut directory = make_directory();
super::test_write_create_the_file(&directory); super::test_write_create_the_file(&mut directory);
} }
#[test] #[test]
fn test_rewrite_forbidden() -> crate::Result<()> { fn test_rewrite_forbidden() {
let directory = make_directory(); let mut directory = make_directory();
super::test_rewrite_forbidden(&directory)?; super::test_rewrite_forbidden(&mut directory);
Ok(())
} }
#[test] #[test]
fn test_directory_delete() -> crate::Result<()> { fn test_directory_delete() {
let directory = make_directory(); let mut directory = make_directory();
super::test_directory_delete(&directory)?; super::test_directory_delete(&mut directory);
Ok(())
} }
#[test] #[test]
fn test_lock_non_blocking() { fn test_lock_non_blocking() {
let directory = make_directory(); let mut directory = make_directory();
super::test_lock_non_blocking(&directory); super::test_lock_non_blocking(&mut directory);
} }
#[test] #[test]
fn test_lock_blocking() { fn test_lock_blocking() {
let directory = make_directory(); let mut directory = make_directory();
super::test_lock_blocking(&directory); super::test_lock_blocking(&mut directory);
} }
#[test] #[test]
fn test_watch() { fn test_watch() {
let directory = make_directory(); let mut directory = make_directory();
super::test_watch(&directory); super::test_watch(&mut directory);
} }
} }
@@ -74,47 +72,45 @@ mod ram_directory_tests {
} }
#[test] #[test]
fn test_simple() -> crate::Result<()> { fn test_simple() {
let directory = make_directory(); let mut directory = make_directory();
super::test_simple(&directory) super::test_simple(&mut directory);
} }
#[test] #[test]
fn test_write_create_the_file() { fn test_write_create_the_file() {
let directory = make_directory(); let mut directory = make_directory();
super::test_write_create_the_file(&directory); super::test_write_create_the_file(&mut directory);
} }
#[test] #[test]
fn test_rewrite_forbidden() -> crate::Result<()> { fn test_rewrite_forbidden() {
let directory = make_directory(); let mut directory = make_directory();
super::test_rewrite_forbidden(&directory)?; super::test_rewrite_forbidden(&mut directory);
Ok(())
} }
#[test] #[test]
fn test_directory_delete() -> crate::Result<()> { fn test_directory_delete() {
let directory = make_directory(); let mut directory = make_directory();
super::test_directory_delete(&directory)?; super::test_directory_delete(&mut directory);
Ok(())
} }
#[test] #[test]
fn test_lock_non_blocking() { fn test_lock_non_blocking() {
let directory = make_directory(); let mut directory = make_directory();
super::test_lock_non_blocking(&directory); super::test_lock_non_blocking(&mut directory);
} }
#[test] #[test]
fn test_lock_blocking() { fn test_lock_blocking() {
let directory = make_directory(); let mut directory = make_directory();
super::test_lock_blocking(&directory); super::test_lock_blocking(&mut directory);
} }
#[test] #[test]
fn test_watch() { fn test_watch() {
let directory = make_directory(); let mut directory = make_directory();
super::test_watch(&directory); super::test_watch(&mut directory);
} }
} }
@@ -122,37 +118,43 @@ mod ram_directory_tests {
#[should_panic] #[should_panic]
fn ram_directory_panics_if_flush_forgotten() { fn ram_directory_panics_if_flush_forgotten() {
let test_path: &'static Path = Path::new("some_path_for_test"); let test_path: &'static Path = Path::new("some_path_for_test");
let ram_directory = RAMDirectory::create(); let mut ram_directory = RAMDirectory::create();
let mut write_file = ram_directory.open_write(test_path).unwrap(); let mut write_file = ram_directory.open_write(test_path).unwrap();
assert!(write_file.write_all(&[4]).is_ok()); assert!(write_file.write_all(&[4]).is_ok());
} }
fn test_simple(directory: &dyn Directory) -> crate::Result<()> { fn test_simple(directory: &mut dyn Directory) {
let test_path: &'static Path = Path::new("some_path_for_test"); let test_path: &'static Path = Path::new("some_path_for_test");
let mut write_file = directory.open_write(test_path)?; {
assert!(directory.exists(test_path)); let mut write_file = directory.open_write(test_path).unwrap();
write_file.write_all(&[4])?; assert!(directory.exists(test_path));
write_file.write_all(&[3])?; write_file.write_all(&[4]).unwrap();
write_file.write_all(&[7, 3, 5])?; write_file.write_all(&[3]).unwrap();
write_file.flush()?; write_file.write_all(&[7, 3, 5]).unwrap();
let read_file = directory.open_read(test_path)?.read_bytes()?; write_file.flush().unwrap();
assert_eq!(read_file.as_slice(), &[4u8, 3u8, 7u8, 3u8, 5u8]); }
mem::drop(read_file); {
let read_file = directory.open_read(test_path).unwrap();
let data: &[u8] = &*read_file;
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
}
assert!(directory.delete(test_path).is_ok()); assert!(directory.delete(test_path).is_ok());
assert!(!directory.exists(test_path)); assert!(!directory.exists(test_path));
Ok(())
} }
fn test_rewrite_forbidden(directory: &dyn Directory) -> crate::Result<()> { fn test_rewrite_forbidden(directory: &mut dyn Directory) {
let test_path: &'static Path = Path::new("some_path_for_test"); let test_path: &'static Path = Path::new("some_path_for_test");
directory.open_write(test_path)?; {
assert!(directory.exists(test_path)); directory.open_write(test_path).unwrap();
assert!(directory.open_write(test_path).is_err()); assert!(directory.exists(test_path));
}
{
assert!(directory.open_write(test_path).is_err());
}
assert!(directory.delete(test_path).is_ok()); assert!(directory.delete(test_path).is_ok());
Ok(())
} }
fn test_write_create_the_file(directory: &dyn Directory) { fn test_write_create_the_file(directory: &mut dyn Directory) {
let test_path: &'static Path = Path::new("some_path_for_test"); let test_path: &'static Path = Path::new("some_path_for_test");
{ {
assert!(directory.open_read(test_path).is_err()); assert!(directory.open_read(test_path).is_err());
@@ -163,20 +165,21 @@ fn test_write_create_the_file(directory: &dyn Directory) {
} }
} }
fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> { fn test_directory_delete(directory: &mut dyn Directory) {
let test_path: &'static Path = Path::new("some_path_for_test"); let test_path: &'static Path = Path::new("some_path_for_test");
assert!(directory.open_read(test_path).is_err()); assert!(directory.open_read(test_path).is_err());
let mut write_file = directory.open_write(&test_path)?; let mut write_file = directory.open_write(&test_path).unwrap();
write_file.write_all(&[1, 2, 3, 4])?; write_file.write_all(&[1, 2, 3, 4]).unwrap();
write_file.flush()?; write_file.flush().unwrap();
{ {
let read_handle = directory.open_read(&test_path)?.read_bytes()?; let read_handle = directory.open_read(&test_path).unwrap();
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]); assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
// Mapped files can't be deleted on Windows // Mapped files can't be deleted on Windows
if !cfg!(windows) { if !cfg!(windows) {
assert!(directory.delete(&test_path).is_ok()); assert!(directory.delete(&test_path).is_ok());
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]); assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
} }
assert!(directory.delete(Path::new("SomeOtherPath")).is_err()); assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
} }
@@ -186,10 +189,9 @@ fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
assert!(directory.open_read(&test_path).is_err()); assert!(directory.open_read(&test_path).is_err());
assert!(directory.delete(&test_path).is_err()); assert!(directory.delete(&test_path).is_err());
Ok(())
} }
fn test_watch(directory: &dyn Directory) { fn test_watch(directory: &mut dyn Directory) {
let num_progress: Arc<AtomicUsize> = Default::default(); let num_progress: Arc<AtomicUsize> = Default::default();
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone(); let counter_clone = counter.clone();
@@ -209,22 +211,22 @@ fn test_watch(directory: &dyn Directory) {
.unwrap(); .unwrap();
for i in 0..10 { for i in 0..10 {
assert!(i <= counter.load(SeqCst)); assert_eq!(i, counter.load(SeqCst));
assert!(directory assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2") .atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok()); .is_ok());
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i)); assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
assert!(i + 1 <= counter.load(SeqCst)); // notify can trigger more than once. assert_eq!(i + 1, counter.load(SeqCst));
} }
mem::drop(watch_handle); mem::drop(watch_handle);
assert!(directory assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data") .atomic_write(Path::new("meta.json"), b"random_test_data")
.is_ok()); .is_ok());
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok()); assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
assert!(10 <= counter.load(SeqCst)); assert_eq!(10, counter.load(SeqCst));
} }
fn test_lock_non_blocking(directory: &dyn Directory) { fn test_lock_non_blocking(directory: &mut dyn Directory) {
{ {
let lock_a_res = directory.acquire_lock(&Lock { let lock_a_res = directory.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"), filepath: PathBuf::from("a.lock"),
@@ -249,7 +251,7 @@ fn test_lock_non_blocking(directory: &dyn Directory) {
assert!(lock_a_res.is_ok()); assert!(lock_a_res.is_ok());
} }
fn test_lock_blocking(directory: &dyn Directory) { fn test_lock_blocking(directory: &mut dyn Directory) {
let lock_a_res = directory.acquire_lock(&Lock { let lock_a_res = directory.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"), filepath: PathBuf::from("a.lock"),
is_blocking: true, is_blocking: true,

View File

@@ -29,13 +29,6 @@ impl WatchHandle {
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle { pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
WatchHandle(watch_callback) WatchHandle(watch_callback)
} }
/// Returns an empty watch handle.
///
/// This function is only useful when implementing a readonly directory.
pub fn empty() -> WatchHandle {
WatchHandle::new(Arc::new(Box::new(|| {})))
}
} }
impl WatchCallbackList { impl WatchCallbackList {

View File

@@ -2,27 +2,21 @@
use std::io; use std::io;
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use crate::directory::error::{Incompatibility, LockError}; use crate::directory::error::{Incompatibility, LockError};
use crate::fastfield::FastFieldNotAvailableError; use crate::fastfield::FastFieldNotAvailableError;
use crate::query; use crate::query;
use crate::{ use crate::schema;
directory::error::{OpenDirectoryError, OpenReadError, OpenWriteError},
schema,
};
use std::fmt; use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::PoisonError; use std::sync::PoisonError;
/// Represents a `DataCorruption` error.
///
/// When facing data corruption, tantivy actually panic or return this error.
pub struct DataCorruption { pub struct DataCorruption {
filepath: Option<PathBuf>, filepath: Option<PathBuf>,
comment: String, comment: String,
} }
impl DataCorruption { impl DataCorruption {
/// Creates a `DataCorruption` Error.
pub fn new(filepath: PathBuf, comment: String) -> DataCorruption { pub fn new(filepath: PathBuf, comment: String) -> DataCorruption {
DataCorruption { DataCorruption {
filepath: Some(filepath), filepath: Some(filepath),
@@ -30,11 +24,10 @@ impl DataCorruption {
} }
} }
/// Creates a `DataCorruption` Error, when the filepath is irrelevant. pub fn comment_only(comment: String) -> DataCorruption {
pub fn comment_only<TStr: ToString>(comment: TStr) -> DataCorruption {
DataCorruption { DataCorruption {
filepath: None, filepath: None,
comment: comment.to_string(), comment,
} }
} }
} }
@@ -50,47 +43,44 @@ impl fmt::Debug for DataCorruption {
} }
} }
/// The library's error enum /// The library's failure based error enum
#[derive(Debug, Error)] #[derive(Debug, Fail)]
pub enum TantivyError { pub enum TantivyError {
/// Failed to open the directory. /// Path does not exist.
#[error("Failed to open the directory: '{0:?}'")] #[fail(display = "Path does not exist: '{:?}'", _0)]
OpenDirectoryError(#[from] OpenDirectoryError), PathDoesNotExist(PathBuf),
/// Failed to open a file for read. /// File already exists, this is a problem when we try to write into a new file.
#[error("Failed to open file for read: '{0:?}'")] #[fail(display = "File already exists: '{:?}'", _0)]
OpenReadError(#[from] OpenReadError), FileAlreadyExists(PathBuf),
/// Failed to open a file for write.
#[error("Failed to open file for write: '{0:?}'")]
OpenWriteError(#[from] OpenWriteError),
/// Index already exists in this directory /// Index already exists in this directory
#[error("Index already exists")] #[fail(display = "Index already exists")]
IndexAlreadyExists, IndexAlreadyExists,
/// Failed to acquire file lock /// Failed to acquire file lock
#[error("Failed to acquire Lockfile: {0:?}. {1:?}")] #[fail(display = "Failed to acquire Lockfile: {:?}. {:?}", _0, _1)]
LockFailure(LockError, Option<String>), LockFailure(LockError, Option<String>),
/// IO Error. /// IO Error.
#[error("An IO error occurred: '{0}'")] #[fail(display = "An IO error occurred: '{}'", _0)]
IOError(#[from] io::Error), IOError(#[cause] IOError),
/// Data corruption. /// Data corruption.
#[error("Data corrupted: '{0:?}'")] #[fail(display = "{:?}", _0)]
DataCorruption(DataCorruption), DataCorruption(DataCorruption),
/// A thread holding the locked panicked and poisoned the lock. /// A thread holding the locked panicked and poisoned the lock.
#[error("A thread holding the locked panicked and poisoned the lock")] #[fail(display = "A thread holding the locked panicked and poisoned the lock")]
Poisoned, Poisoned,
/// Invalid argument was passed by the user. /// Invalid argument was passed by the user.
#[error("An invalid argument was passed: '{0}'")] #[fail(display = "An invalid argument was passed: '{}'", _0)]
InvalidArgument(String), InvalidArgument(String),
/// An Error happened in one of the thread. /// An Error happened in one of the thread.
#[error("An error occurred in a thread: '{0}'")] #[fail(display = "An error occurred in a thread: '{}'", _0)]
ErrorInThread(String), ErrorInThread(String),
/// An Error appeared related to the schema. /// An Error appeared related to the schema.
#[error("Schema error: '{0}'")] #[fail(display = "Schema error: '{}'", _0)]
SchemaError(String), SchemaError(String),
/// System error. (e.g.: We failed spawning a new thread) /// System error. (e.g.: We failed spawning a new thread)
#[error("System error.'{0}'")] #[fail(display = "System error.'{}'", _0)]
SystemError(String), SystemError(String),
/// Index incompatible with current version of tantivy /// Index incompatible with current version of tantivy
#[error("{0:?}")] #[fail(display = "{:?}", _0)]
IncompatibleIndex(Incompatibility), IncompatibleIndex(Incompatibility),
} }
@@ -99,17 +89,31 @@ impl From<DataCorruption> for TantivyError {
TantivyError::DataCorruption(data_corruption) TantivyError::DataCorruption(data_corruption)
} }
} }
impl From<FastFieldNotAvailableError> for TantivyError { impl From<FastFieldNotAvailableError> for TantivyError {
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError { fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
TantivyError::SchemaError(format!("{}", fastfield_error)) TantivyError::SchemaError(format!("{}", fastfield_error))
} }
} }
impl From<LockError> for TantivyError { impl From<LockError> for TantivyError {
fn from(lock_error: LockError) -> TantivyError { fn from(lock_error: LockError) -> TantivyError {
TantivyError::LockFailure(lock_error, None) TantivyError::LockFailure(lock_error, None)
} }
} }
impl From<IOError> for TantivyError {
fn from(io_error: IOError) -> TantivyError {
TantivyError::IOError(io_error)
}
}
impl From<io::Error> for TantivyError {
fn from(io_error: io::Error) -> TantivyError {
TantivyError::IOError(io_error.into())
}
}
impl From<query::QueryParserError> for TantivyError { impl From<query::QueryParserError> for TantivyError {
fn from(parsing_error: query::QueryParserError) -> TantivyError { fn from(parsing_error: query::QueryParserError) -> TantivyError {
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error)) TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
@@ -122,9 +126,15 @@ impl<Guard> From<PoisonError<Guard>> for TantivyError {
} }
} }
impl From<chrono::ParseError> for TantivyError { impl From<OpenReadError> for TantivyError {
fn from(err: chrono::ParseError) -> TantivyError { fn from(error: OpenReadError) -> TantivyError {
TantivyError::InvalidArgument(err.to_string()) match error {
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
OpenReadError::IncompatibleIndex(incompatibility) => {
TantivyError::IncompatibleIndex(incompatibility)
}
}
} }
} }
@@ -134,9 +144,35 @@ impl From<schema::DocParsingError> for TantivyError {
} }
} }
impl From<OpenWriteError> for TantivyError {
fn from(error: OpenWriteError) -> TantivyError {
match error {
OpenWriteError::FileAlreadyExists(filepath) => {
TantivyError::FileAlreadyExists(filepath)
}
OpenWriteError::IOError(io_error) => TantivyError::IOError(io_error),
}
}
}
impl From<OpenDirectoryError> for TantivyError {
fn from(error: OpenDirectoryError) -> TantivyError {
match error {
OpenDirectoryError::DoesNotExist(directory_path) => {
TantivyError::PathDoesNotExist(directory_path)
}
OpenDirectoryError::NotADirectory(directory_path) => {
TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path))
}
OpenDirectoryError::IoError(err) => TantivyError::IOError(IOError::from(err)),
}
}
}
impl From<serde_json::Error> for TantivyError { impl From<serde_json::Error> for TantivyError {
fn from(error: serde_json::Error) -> TantivyError { fn from(error: serde_json::Error) -> TantivyError {
TantivyError::IOError(error.into()) let io_err = io::Error::from(error);
TantivyError::IOError(io_err.into())
} }
} }

View File

@@ -6,114 +6,31 @@ pub use self::writer::BytesFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value}; use crate::schema::Schema;
use crate::{query::TermQuery, schema::FAST, schema::INDEXED, schema::STORED}; use crate::Index;
use crate::{DocAddress, DocSet, Index, Searcher, Term};
use std::ops::Deref;
#[test] #[test]
fn test_bytes() -> crate::Result<()> { fn test_bytes() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let bytes_field = schema_builder.add_bytes_field("bytesfield", FAST); let field = schema_builder.add_bytes_field("bytesfield");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(bytes_field=>vec![0u8, 1, 2, 3])); index_writer.add_document(doc!(field=>vec![0u8, 1, 2, 3]));
index_writer.add_document(doc!(bytes_field=>vec![])); index_writer.add_document(doc!(field=>vec![]));
index_writer.add_document(doc!(bytes_field=>vec![255u8])); index_writer.add_document(doc!(field=>vec![255u8]));
index_writer.add_document(doc!(bytes_field=>vec![1u8, 3, 5, 7, 9])); index_writer.add_document(doc!(field=>vec![1u8, 3, 5, 7, 9]));
index_writer.add_document(doc!(bytes_field=>vec![0u8; 1000])); index_writer.add_document(doc!(field=>vec![0u8; 1000]));
index_writer.commit()?; assert!(index_writer.commit().is_ok());
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let bytes_reader = segment_reader.fast_fields().bytes(bytes_field).unwrap(); let bytes_reader = segment_reader.fast_fields().bytes(field).unwrap();
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]); assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
assert!(bytes_reader.get_bytes(1).is_empty()); assert!(bytes_reader.get_bytes(1).is_empty());
assert_eq!(bytes_reader.get_bytes(2), &[255u8]); assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]); assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
let long = vec![0u8; 1000]; let long = vec![0u8; 1000];
assert_eq!(bytes_reader.get_bytes(4), long.as_slice()); assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
Ok(())
}
fn create_index_for_test<T: Into<BytesOptions>>(
byte_options: T,
) -> crate::Result<impl Deref<Target = Searcher>> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_bytes_field("string_bytes", byte_options.into());
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(
field => b"tantivy".as_ref(),
field => b"lucene".as_ref()
));
index_writer.commit()?;
Ok(index.reader()?.searcher())
}
#[test]
fn test_stored_bytes() -> crate::Result<()> {
let searcher = create_index_for_test(STORED)?;
assert_eq!(searcher.num_docs(), 1);
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
let field = searcher.schema().get_field("string_bytes").unwrap();
let values: Vec<&Value> = retrieved_doc.get_all(field).collect();
assert_eq!(values.len(), 2);
let values_bytes: Vec<&[u8]> = values
.into_iter()
.flat_map(|value| value.bytes_value())
.collect();
assert_eq!(values_bytes, &[&b"tantivy"[..], &b"lucene"[..]]);
Ok(())
}
#[test]
fn test_non_stored_bytes() -> crate::Result<()> {
let searcher = create_index_for_test(INDEXED)?;
assert_eq!(searcher.num_docs(), 1);
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
let field = searcher.schema().get_field("string_bytes").unwrap();
assert!(retrieved_doc.get_first(field).is_none());
Ok(())
}
#[test]
fn test_index_bytes() -> crate::Result<()> {
let searcher = create_index_for_test(INDEXED)?;
assert_eq!(searcher.num_docs(), 1);
let field = searcher.schema().get_field("string_bytes").unwrap();
let term = Term::from_field_bytes(field, b"lucene".as_ref());
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
let term_weight = term_query.specialized_weight(&searcher, true)?;
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0f32)?;
assert_eq!(term_scorer.doc(), 0u32);
Ok(())
}
#[test]
fn test_non_index_bytes() -> crate::Result<()> {
let searcher = create_index_for_test(STORED)?;
assert_eq!(searcher.num_docs(), 1);
let field = searcher.schema().get_field("string_bytes").unwrap();
let term = Term::from_field_bytes(field, b"lucene".as_ref());
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
let term_weight_res = term_query.specialized_weight(&searcher, false);
assert!(matches!(
term_weight_res,
Err(crate::TantivyError::SchemaError(_))
));
Ok(())
}
#[test]
fn test_fast_bytes_multivalue_value() -> crate::Result<()> {
let searcher = create_index_for_test(FAST)?;
assert_eq!(searcher.num_docs(), 1);
let fast_fields = searcher.segment_reader(0u32).fast_fields();
let field = searcher.schema().get_field("string_bytes").unwrap();
let fast_field_reader = fast_fields.bytes(field).unwrap();
assert_eq!(fast_field_reader.get_bytes(0u32), b"tantivy");
Ok(())
} }
} }

View File

@@ -1,5 +1,6 @@
use crate::directory::FileSlice; use owning_ref::OwningRef;
use crate::directory::OwnedBytes;
use crate::directory::ReadOnlySource;
use crate::fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::DocId; use crate::DocId;
@@ -16,16 +17,16 @@ use crate::DocId;
#[derive(Clone)] #[derive(Clone)]
pub struct BytesFastFieldReader { pub struct BytesFastFieldReader {
idx_reader: FastFieldReader<u64>, idx_reader: FastFieldReader<u64>,
values: OwnedBytes, values: OwningRef<ReadOnlySource, [u8]>,
} }
impl BytesFastFieldReader { impl BytesFastFieldReader {
pub(crate) fn open( pub(crate) fn open(
idx_reader: FastFieldReader<u64>, idx_reader: FastFieldReader<u64>,
values_file: FileSlice, values_source: ReadOnlySource,
) -> crate::Result<BytesFastFieldReader> { ) -> BytesFastFieldReader {
let values = values_file.read_bytes()?; let values = OwningRef::new(values_source).map(|source| &source[..]);
Ok(BytesFastFieldReader { idx_reader, values }) BytesFastFieldReader { idx_reader, values }
} }
fn range(&self, doc: DocId) -> (usize, usize) { fn range(&self, doc: DocId) -> (usize, usize) {
@@ -37,7 +38,7 @@ impl BytesFastFieldReader {
/// Returns the bytes associated to the given `doc` /// Returns the bytes associated to the given `doc`
pub fn get_bytes(&self, doc: DocId) -> &[u8] { pub fn get_bytes(&self, doc: DocId) -> &[u8] {
let (start, stop) = self.range(doc); let (start, stop) = self.range(doc);
&self.values.as_slice()[start..stop] &self.values[start..stop]
} }
/// Returns the overall number of bytes in this bytes fast field. /// Returns the overall number of bytes in this bytes fast field.

View File

@@ -49,10 +49,16 @@ impl BytesFastFieldWriter {
/// matching field values present in the document. /// matching field values present in the document.
pub fn add_document(&mut self, doc: &Document) { pub fn add_document(&mut self, doc: &Document) {
self.next_doc(); self.next_doc();
for field_value in doc.get_all(self.field) { for field_value in doc.field_values() {
if let Value::Bytes(ref bytes) = field_value { if field_value.field() == self.field {
self.vals.extend_from_slice(bytes); if let Value::Bytes(ref bytes) = *field_value.value() {
return; self.vals.extend_from_slice(bytes);
} else {
panic!(
"Bytes field contained non-Bytes Value!. Field {:?} = {:?}",
self.field, field_value
);
}
} }
} }
} }
@@ -70,18 +76,21 @@ impl BytesFastFieldWriter {
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`. /// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> { pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
// writing the offset index {
let mut doc_index_serializer = // writing the offset index
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?; let mut doc_index_serializer =
for &offset in &self.doc_index { serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
doc_index_serializer.add_val(offset)?; for &offset in &self.doc_index {
doc_index_serializer.add_val(offset)?;
}
doc_index_serializer.add_val(self.vals.len() as u64)?;
doc_index_serializer.close_field()?;
}
{
// writing the values themselves
let mut value_serializer = serializer.new_bytes_fast_field_with_idx(self.field, 1)?;
value_serializer.write_all(&self.vals)?;
} }
doc_index_serializer.add_val(self.vals.len() as u64)?;
doc_index_serializer.close_field()?;
// writing the values themselves
serializer
.new_bytes_fast_field_with_idx(self.field, 1)?
.write_all(&self.vals)?;
Ok(()) Ok(())
} }
} }

View File

@@ -1,6 +1,5 @@
use crate::common::{BitSet, HasLen}; use crate::common::{BitSet, HasLen};
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::OwnedBytes;
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::space_usage::ByteCount; use crate::space_usage::ByteCount;
use crate::DocId; use crate::DocId;
@@ -40,7 +39,7 @@ pub fn write_delete_bitset(
/// Set of deleted `DocId`s. /// Set of deleted `DocId`s.
#[derive(Clone)] #[derive(Clone)]
pub struct DeleteBitSet { pub struct DeleteBitSet {
data: OwnedBytes, data: ReadOnlySource,
len: usize, len: usize,
} }
@@ -54,27 +53,26 @@ impl DeleteBitSet {
for &doc in docs { for &doc in docs {
bitset.insert(doc); bitset.insert(doc);
} }
let directory = RAMDirectory::create(); let mut directory = RAMDirectory::create();
let path = Path::new("dummydeletebitset"); let path = Path::new("dummydeletebitset");
let mut wrt = directory.open_write(path).unwrap(); let mut wrt = directory.open_write(path).unwrap();
write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap(); write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap();
wrt.terminate().unwrap(); wrt.terminate().unwrap();
let file = directory.open_read(path).unwrap(); let source = directory.open_read(path).unwrap();
Self::open(file).unwrap() Self::open(source)
} }
/// Opens a delete bitset given its file. /// Opens a delete bitset given its data source.
pub fn open(file: FileSlice) -> crate::Result<DeleteBitSet> { pub fn open(data: ReadOnlySource) -> DeleteBitSet {
let bytes = file.read_bytes()?; let num_deleted: usize = data
let num_deleted: usize = bytes
.as_slice() .as_slice()
.iter() .iter()
.map(|b| b.count_ones() as usize) .map(|b| b.count_ones() as usize)
.sum(); .sum();
Ok(DeleteBitSet { DeleteBitSet {
data: bytes, data,
len: num_deleted, len: num_deleted,
}) }
} }
/// Returns true iff the document is still "alive". In other words, if it has not been deleted. /// Returns true iff the document is still "alive". In other words, if it has not been deleted.
@@ -86,7 +84,7 @@ impl DeleteBitSet {
#[inline(always)] #[inline(always)]
pub fn is_deleted(&self, doc: DocId) -> bool { pub fn is_deleted(&self, doc: DocId) -> bool {
let byte_offset = doc / 8u32; let byte_offset = doc / 8u32;
let b: u8 = self.data.as_slice()[byte_offset as usize]; let b: u8 = (*self.data)[byte_offset as usize];
let shift = (doc & 7u32) as u8; let shift = (doc & 7u32) as u8;
b & (1u8 << shift) != 0 b & (1u8 << shift) != 0
} }

View File

@@ -4,8 +4,8 @@ use std::result;
/// `FastFieldNotAvailableError` is returned when the /// `FastFieldNotAvailableError` is returned when the
/// user requested for a fast field reader, and the field was not /// user requested for a fast field reader, and the field was not
/// defined in the schema as a fast field. /// defined in the schema as a fast field.
#[derive(Debug, Error)] #[derive(Debug, Fail)]
#[error("Fast field not available: '{field_name:?}'")] #[fail(display = "Fast field not available: '{:?}'", field_name)]
pub struct FastFieldNotAvailableError { pub struct FastFieldNotAvailableError {
field_name: String, field_name: String,
} }

View File

@@ -73,61 +73,7 @@ impl FacetReader {
} }
/// Return the list of facet ordinals associated to a document. /// Return the list of facet ordinals associated to a document.
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) { pub fn facet_ords(&mut self, doc: DocId, output: &mut Vec<u64>) {
self.term_ords.get_vals(doc, output); self.term_ords.get_vals(doc, output);
} }
} }
#[cfg(test)]
mod tests {
use crate::Index;
use crate::{
schema::{Facet, SchemaBuilder},
Document,
};
#[test]
fn test_facet_not_populated_for_all_docs() -> crate::Result<()> {
let mut schema_builder = SchemaBuilder::default();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
index_writer.add_document(Document::default());
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let facet_reader = searcher
.segment_reader(0u32)
.facet_reader(facet_field)
.unwrap();
let mut facet_ords = Vec::new();
facet_reader.facet_ords(0u32, &mut facet_ords);
assert_eq!(&facet_ords, &[2u64]);
facet_reader.facet_ords(1u32, &mut facet_ords);
assert!(facet_ords.is_empty());
Ok(())
}
#[test]
fn test_facet_not_populated_for_any_docs() -> crate::Result<()> {
let mut schema_builder = SchemaBuilder::default();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(Document::default());
index_writer.add_document(Document::default());
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let facet_reader = searcher
.segment_reader(0u32)
.facet_reader(facet_field)
.unwrap();
let mut facet_ords = Vec::new();
facet_reader.facet_ords(0u32, &mut facet_ords);
assert!(facet_ords.is_empty());
facet_reader.facet_ords(1u32, &mut facet_ords);
assert!(facet_ords.is_empty());
Ok(())
}
}

View File

@@ -33,14 +33,11 @@ pub use self::reader::FastFieldReader;
pub use self::readers::FastFieldReaders; pub use self::readers::FastFieldReaders;
pub use self::serializer::FastFieldSerializer; pub use self::serializer::FastFieldSerializer;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter}; pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use crate::chrono::{NaiveDateTime, Utc};
use crate::common; use crate::common;
use crate::schema::Cardinality; use crate::schema::Cardinality;
use crate::schema::FieldType; use crate::schema::FieldType;
use crate::schema::Value; use crate::schema::Value;
use crate::{
chrono::{NaiveDateTime, Utc},
schema::Type,
};
mod bytes; mod bytes;
mod delete; mod delete;
@@ -79,9 +76,6 @@ pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
fn make_zero() -> Self { fn make_zero() -> Self {
Self::from_u64(0i64.to_u64()) Self::from_u64(0i64.to_u64())
} }
/// Returns the `schema::Type` for this FastValue.
fn to_type() -> Type;
} }
impl FastValue for u64 { impl FastValue for u64 {
@@ -104,10 +98,6 @@ impl FastValue for u64 {
fn as_u64(&self) -> u64 { fn as_u64(&self) -> u64 {
*self *self
} }
fn to_type() -> Type {
Type::U64
}
} }
impl FastValue for i64 { impl FastValue for i64 {
@@ -129,10 +119,6 @@ impl FastValue for i64 {
fn as_u64(&self) -> u64 { fn as_u64(&self) -> u64 {
*self as u64 *self as u64
} }
fn to_type() -> Type {
Type::I64
}
} }
impl FastValue for f64 { impl FastValue for f64 {
@@ -154,10 +140,6 @@ impl FastValue for f64 {
fn as_u64(&self) -> u64 { fn as_u64(&self) -> u64 {
self.to_bits() self.to_bits()
} }
fn to_type() -> Type {
Type::F64
}
} }
impl FastValue for crate::DateTime { impl FastValue for crate::DateTime {
@@ -180,10 +162,6 @@ impl FastValue for crate::DateTime {
fn as_u64(&self) -> u64 { fn as_u64(&self) -> u64 {
self.timestamp().as_u64() self.timestamp().as_u64()
} }
fn to_type() -> Type {
Type::Date
}
} }
fn value_to_u64(value: &Value) -> u64 { fn value_to_u64(value: &Value) -> u64 {
@@ -209,7 +187,6 @@ mod tests {
use crate::schema::FAST; use crate::schema::FAST;
use crate::schema::{Document, IntOptions}; use crate::schema::{Document, IntOptions};
use crate::{Index, SegmentId, SegmentReader}; use crate::{Index, SegmentId, SegmentReader};
use common::HasLen;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::rngs::StdRng; use rand::rngs::StdRng;
@@ -240,9 +217,9 @@ mod tests {
} }
#[test] #[test]
fn test_intfastfield_small() -> crate::Result<()> { fn test_intfastfield_small() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write).unwrap(); let mut serializer = FastFieldSerializer::from_write(write).unwrap();
@@ -255,24 +232,27 @@ mod tests {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 36 as usize); {
let composite_file = CompositeFile::open(&file)?; assert_eq!(source.len(), 36 as usize);
let file = composite_file.open_read(*FIELD).unwrap(); }
let fast_field_reader = FastFieldReader::<u64>::open(file)?; {
assert_eq!(fast_field_reader.get(0), 13u64); let composite_file = CompositeFile::open(&source).unwrap();
assert_eq!(fast_field_reader.get(1), 14u64); let field_source = composite_file.open_read(*FIELD).unwrap();
assert_eq!(fast_field_reader.get(2), 2u64); let fast_field_reader = FastFieldReader::<u64>::open(field_source);
Ok(()) assert_eq!(fast_field_reader.get(0), 13u64);
assert_eq!(fast_field_reader.get(1), 14u64);
assert_eq!(fast_field_reader.get(2), 2u64);
}
} }
#[test] #[test]
fn test_intfastfield_large() -> crate::Result<()> { fn test_intfastfield_large() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test"))?; let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write)?; let mut serializer = FastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA); let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
fast_field_writers.add_document(&doc!(*FIELD=>4u64)); fast_field_writers.add_document(&doc!(*FIELD=>4u64));
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64)); fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
@@ -283,15 +263,19 @@ mod tests {
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64)); fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64)); fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
fast_field_writers.add_document(&doc!(*FIELD=>215u64)); fast_field_writers.add_document(&doc!(*FIELD=>215u64));
fast_field_writers.serialize(&mut serializer, &HashMap::new())?; fast_field_writers
serializer.close()?; .serialize(&mut serializer, &HashMap::new())
.unwrap();
serializer.close().unwrap();
} }
let file = directory.open_read(&path)?; let source = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 61 as usize);
{ {
let fast_fields_composite = CompositeFile::open(&file)?; assert_eq!(source.len(), 61 as usize);
}
{
let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data)?; let fast_field_reader = FastFieldReader::<u64>::open(data);
assert_eq!(fast_field_reader.get(0), 4u64); assert_eq!(fast_field_reader.get(0), 4u64);
assert_eq!(fast_field_reader.get(1), 14_082_001u64); assert_eq!(fast_field_reader.get(1), 14_082_001u64);
assert_eq!(fast_field_reader.get(2), 3_052u64); assert_eq!(fast_field_reader.get(2), 3_052u64);
@@ -302,13 +286,12 @@ mod tests {
assert_eq!(fast_field_reader.get(7), 1_501u64); assert_eq!(fast_field_reader.get(7), 1_501u64);
assert_eq!(fast_field_reader.get(8), 215u64); assert_eq!(fast_field_reader.get(8), 215u64);
} }
Ok(())
} }
#[test] #[test]
fn test_intfastfield_null_amplitude() -> crate::Result<()> { fn test_intfastfield_null_amplitude() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
@@ -322,23 +305,24 @@ mod tests {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 34 as usize);
{ {
let fast_fields_composite = CompositeFile::open(&file).unwrap(); assert_eq!(source.len(), 34 as usize);
}
{
let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data)?; let fast_field_reader = FastFieldReader::<u64>::open(data);
for doc in 0..10_000 { for doc in 0..10_000 {
assert_eq!(fast_field_reader.get(doc), 100_000u64); assert_eq!(fast_field_reader.get(doc), 100_000u64);
} }
} }
Ok(())
} }
#[test] #[test]
fn test_intfastfield_large_numbers() -> crate::Result<()> { fn test_intfastfield_large_numbers() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
@@ -354,12 +338,14 @@ mod tests {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 80042 as usize);
{ {
let fast_fields_composite = CompositeFile::open(&file)?; assert_eq!(source.len(), 80042 as usize);
}
{
let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data)?; let fast_field_reader = FastFieldReader::<u64>::open(data);
assert_eq!(fast_field_reader.get(0), 0u64); assert_eq!(fast_field_reader.get(0), 0u64);
for doc in 1..10_001 { for doc in 1..10_001 {
assert_eq!( assert_eq!(
@@ -368,13 +354,12 @@ mod tests {
); );
} }
} }
Ok(())
} }
#[test] #[test]
fn test_signed_intfastfield() -> crate::Result<()> { fn test_signed_intfastfield() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let i64_field = schema_builder.add_i64_field("field", FAST); let i64_field = schema_builder.add_i64_field("field", FAST);
@@ -393,12 +378,14 @@ mod tests {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 17709 as usize);
{ {
let fast_fields_composite = CompositeFile::open(&file)?; assert_eq!(source.len(), 17709 as usize);
}
{
let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(i64_field).unwrap(); let data = fast_fields_composite.open_read(i64_field).unwrap();
let fast_field_reader = FastFieldReader::<i64>::open(data)?; let fast_field_reader = FastFieldReader::<i64>::open(data);
assert_eq!(fast_field_reader.min_value(), -100i64); assert_eq!(fast_field_reader.min_value(), -100i64);
assert_eq!(fast_field_reader.max_value(), 9_999i64); assert_eq!(fast_field_reader.max_value(), 9_999i64);
@@ -411,13 +398,12 @@ mod tests {
assert_eq!(buffer[i], -100i64 + 53i64 + i as i64); assert_eq!(buffer[i], -100i64 + 53i64 + i as i64);
} }
} }
Ok(())
} }
#[test] #[test]
fn test_signed_intfastfield_default_val() -> crate::Result<()> { fn test_signed_intfastfield_default_val() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let i64_field = schema_builder.add_i64_field("field", FAST); let i64_field = schema_builder.add_i64_field("field", FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -434,14 +420,13 @@ mod tests {
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
{ {
let fast_fields_composite = CompositeFile::open(&file).unwrap(); let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(i64_field).unwrap(); let data = fast_fields_composite.open_read(i64_field).unwrap();
let fast_field_reader = FastFieldReader::<i64>::open(data)?; let fast_field_reader = FastFieldReader::<i64>::open(data);
assert_eq!(fast_field_reader.get(0u32), 0i64); assert_eq!(fast_field_reader.get(0u32), 0i64);
} }
Ok(())
} }
// Warning: this generates the same permutation at each call // Warning: this generates the same permutation at each call
@@ -452,26 +437,28 @@ mod tests {
} }
#[test] #[test]
fn test_intfastfield_permutation() -> crate::Result<()> { fn test_intfastfield_permutation() {
let path = Path::new("test"); let path = Path::new("test");
let permutation = generate_permutation(); let permutation = generate_permutation();
let n = permutation.len(); let n = permutation.len();
let directory = RAMDirectory::create(); let mut directory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test"))?; let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write)?; let mut serializer = FastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA); let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for &x in &permutation { for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x)); fast_field_writers.add_document(&doc!(*FIELD=>x));
} }
fast_field_writers.serialize(&mut serializer, &HashMap::new())?; fast_field_writers
serializer.close()?; .serialize(&mut serializer, &HashMap::new())
.unwrap();
serializer.close().unwrap();
} }
let file = directory.open_read(&path)?; let source = directory.open_read(&path).unwrap();
{ {
let fast_fields_composite = CompositeFile::open(&file)?; let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data)?; let fast_field_reader = FastFieldReader::<u64>::open(data);
let mut a = 0u64; let mut a = 0u64;
for _ in 0..n { for _ in 0..n {
@@ -479,7 +466,6 @@ mod tests {
a = fast_field_reader.get(a as u32); a = fast_field_reader.get(a as u32);
} }
} }
Ok(())
} }
#[test] #[test]
@@ -488,7 +474,7 @@ mod tests {
let date_field = schema_builder.add_date_field("date", FAST); let date_field = schema_builder.add_date_field("date", FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy)); index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now())); index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
@@ -525,7 +511,7 @@ mod tests {
); );
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy)); index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!( index_writer.add_document(doc!(
date_field => crate::DateTime::from_u64(1i64.to_u64()), date_field => crate::DateTime::from_u64(1i64.to_u64()),
@@ -612,7 +598,7 @@ mod bench {
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) { fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
let path = Path::new("test"); let path = Path::new("test");
let permutation = generate_permutation(); let permutation = generate_permutation();
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write).unwrap(); let mut serializer = FastFieldSerializer::from_write(write).unwrap();
@@ -625,9 +611,9 @@ mod bench {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
{ {
let fast_fields_composite = CompositeFile::open(&file).unwrap(); let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data); let fast_field_reader = FastFieldReader::<u64>::open(data);
@@ -646,7 +632,7 @@ mod bench {
fn bench_intfastfield_fflookup(b: &mut Bencher) { fn bench_intfastfield_fflookup(b: &mut Bencher) {
let path = Path::new("test"); let path = Path::new("test");
let permutation = generate_permutation(); let permutation = generate_permutation();
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write).unwrap(); let mut serializer = FastFieldSerializer::from_write(write).unwrap();
@@ -659,9 +645,9 @@ mod bench {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
{ {
let fast_fields_composite = CompositeFile::open(&file).unwrap(); let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data); let fast_field_reader = FastFieldReader::<u64>::open(data);

View File

@@ -25,7 +25,7 @@ mod tests {
); );
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=>1u64, field=>3u64)); index_writer.add_document(doc!(field=>1u64, field=>3u64));
index_writer.add_document(doc!()); index_writer.add_document(doc!());
index_writer.add_document(doc!(field=>4u64)); index_writer.add_document(doc!(field=>4u64));
@@ -64,7 +64,7 @@ mod tests {
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored()); schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let first_time_stamp = chrono::Utc::now(); let first_time_stamp = chrono::Utc::now();
index_writer.add_document( index_writer.add_document(
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64), doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
@@ -100,7 +100,6 @@ mod tests {
.get_first(date_field) .get_first(date_field)
.expect("cannot find value") .expect("cannot find value")
.date_value() .date_value()
.unwrap()
.timestamp(), .timestamp(),
first_time_stamp.timestamp() first_time_stamp.timestamp()
); );
@@ -109,7 +108,7 @@ mod tests {
.get_first(time_i) .get_first(time_i)
.expect("cannot find value") .expect("cannot find value")
.i64_value(), .i64_value(),
Some(1i64) 1i64
); );
} }
} }
@@ -132,7 +131,6 @@ mod tests {
.get_first(date_field) .get_first(date_field)
.expect("cannot find value") .expect("cannot find value")
.date_value() .date_value()
.unwrap()
.timestamp(), .timestamp(),
two_secs_ahead.timestamp() two_secs_ahead.timestamp()
); );
@@ -141,7 +139,7 @@ mod tests {
.get_first(time_i) .get_first(time_i)
.expect("cannot find value") .expect("cannot find value")
.i64_value(), .i64_value(),
Some(3i64) 3i64
); );
} }
} }
@@ -188,7 +186,7 @@ mod tests {
); );
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=> 1i64, field => 3i64)); index_writer.add_document(doc!(field=> 1i64, field => 3i64));
index_writer.add_document(doc!()); index_writer.add_document(doc!());
index_writer.add_document(doc!(field=> -4i64)); index_writer.add_document(doc!(field=> -4i64));
@@ -199,14 +197,22 @@ mod tests {
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let mut vals = Vec::new(); let mut vals = Vec::new();
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap(); let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
multi_value_reader.get_vals(2, &mut vals); {
assert_eq!(&vals, &[-4i64]); multi_value_reader.get_vals(2, &mut vals);
multi_value_reader.get_vals(0, &mut vals); assert_eq!(&vals, &[-4i64]);
assert_eq!(&vals, &[1i64, 3i64]); }
multi_value_reader.get_vals(1, &mut vals); {
assert!(vals.is_empty()); multi_value_reader.get_vals(0, &mut vals);
multi_value_reader.get_vals(3, &mut vals); assert_eq!(&vals, &[1i64, 3i64]);
assert_eq!(&vals, &[-5i64, -20i64, 1i64]); }
{
multi_value_reader.get_vals(1, &mut vals);
assert!(vals.is_empty());
}
{
multi_value_reader.get_vals(3, &mut vals);
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
}
} }
#[test] #[test]
#[ignore] #[ignore]
@@ -215,7 +221,7 @@ mod tests {
let field = schema_builder.add_facet_field("facetfield"); let field = schema_builder.add_facet_field("facetfield");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for i in 0..100_000 { for i in 0..100_000 {
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str()))); index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
} }

View File

@@ -74,7 +74,7 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index let mut index_writer = index
.writer_for_tests() .writer_with_num_threads(1, 30_000_000)
.expect("Failed to create index writer."); .expect("Failed to create index writer.");
index_writer.add_document(doc!( index_writer.add_document(doc!(
facet_field => Facet::from("/category/cat2"), facet_field => Facet::from("/category/cat2"),

View File

@@ -143,7 +143,7 @@ impl MultiValueIntFastFieldWriter {
.iter() .iter()
.map(|val| *mapping.get(val).expect("Missing term ordinal")); .map(|val| *mapping.get(val).expect("Missing term ordinal"));
doc_vals.extend(remapped_vals); doc_vals.extend(remapped_vals);
doc_vals.sort_unstable(); doc_vals.sort();
for &val in &doc_vals { for &val in &doc_vals {
value_serializer.add_val(val)?; value_serializer.add_val(val)?;
} }

View File

@@ -3,12 +3,13 @@ use crate::common::bitpacker::BitUnpacker;
use crate::common::compute_num_bits; use crate::common::compute_num_bits;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter}; use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::FAST; use crate::schema::FAST;
use crate::DocId; use crate::DocId;
use owning_ref::OwningRef;
use std::collections::HashMap; use std::collections::HashMap;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::path::Path; use std::path::Path;
@@ -19,27 +20,34 @@ use std::path::Path;
/// fast field is required. /// fast field is required.
#[derive(Clone)] #[derive(Clone)]
pub struct FastFieldReader<Item: FastValue> { pub struct FastFieldReader<Item: FastValue> {
bit_unpacker: BitUnpacker, bit_unpacker: BitUnpacker<OwningRef<ReadOnlySource, [u8]>>,
min_value_u64: u64, min_value_u64: u64,
max_value_u64: u64, max_value_u64: u64,
_phantom: PhantomData<Item>, _phantom: PhantomData<Item>,
} }
impl<Item: FastValue> FastFieldReader<Item> { impl<Item: FastValue> FastFieldReader<Item> {
/// Opens a fast field given a file. /// Opens a fast field given a source.
pub fn open(file: FileSlice) -> crate::Result<Self> { pub fn open(data: ReadOnlySource) -> Self {
let mut bytes = file.read_bytes()?; let min_value: u64;
let min_value = u64::deserialize(&mut bytes)?; let amplitude: u64;
let amplitude = u64::deserialize(&mut bytes)?; {
let mut cursor = data.as_slice();
min_value =
u64::deserialize(&mut cursor).expect("Failed to read the min_value of fast field.");
amplitude =
u64::deserialize(&mut cursor).expect("Failed to read the amplitude of fast field.");
}
let max_value = min_value + amplitude; let max_value = min_value + amplitude;
let num_bits = compute_num_bits(amplitude); let num_bits = compute_num_bits(amplitude);
let bit_unpacker = BitUnpacker::new(bytes, num_bits); let owning_ref = OwningRef::new(data).map(|data| &data[16..]);
Ok(FastFieldReader { let bit_unpacker = BitUnpacker::new(owning_ref, num_bits);
FastFieldReader {
min_value_u64: min_value, min_value_u64: min_value,
max_value_u64: max_value, max_value_u64: max_value,
bit_unpacker, bit_unpacker,
_phantom: PhantomData, _phantom: PhantomData,
}) }
} }
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> { pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
@@ -127,7 +135,7 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
let field = schema_builder.add_u64_field("field", FAST); let field = schema_builder.add_u64_field("field", FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
let path = Path::new("__dummy__"); let path = Path::new("__dummy__");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory let write: WritePtr = directory
.open_write(path) .open_write(path)
@@ -149,11 +157,12 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(path).expect("Failed to open the file"); let source = directory.open_read(path).expect("Failed to open the file");
let composite_file = CompositeFile::open(&file).expect("Failed to read the composite file"); let composite_file =
let field_file = composite_file CompositeFile::open(&source).expect("Failed to read the composite file");
let field_source = composite_file
.open_read(field) .open_read(field)
.expect("File component not found"); .expect("File component not found");
FastFieldReader::open(field_file).unwrap() FastFieldReader::open(field_source)
} }
} }

View File

@@ -68,52 +68,45 @@ impl FastFieldReaders {
}; };
for (field, field_entry) in schema.fields() { for (field, field_entry) in schema.fields() {
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
if let FieldType::Bytes(bytes_option) = field_type { if field_type == &FieldType::Bytes {
if !bytes_option.is_fast() { let idx_reader = fast_fields_composite
continue;
}
let fast_field_idx_file = fast_fields_composite
.open_read_with_idx(field, 0) .open_read_with_idx(field, 0)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?; .ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
let idx_reader = FastFieldReader::open(fast_field_idx_file)?; .map(FastFieldReader::open)?;
let data = fast_fields_composite let data = fast_fields_composite
.open_read_with_idx(field, 1) .open_read_with_idx(field, 1)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?; .ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
let bytes_fast_field_reader = BytesFastFieldReader::open(idx_reader, data)?;
fast_field_readers fast_field_readers
.fast_bytes .fast_bytes
.insert(field, bytes_fast_field_reader); .insert(field, BytesFastFieldReader::open(idx_reader, data));
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) { } else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
match cardinality { match cardinality {
Cardinality::SingleValue => { Cardinality::SingleValue => {
if let Some(fast_field_data) = fast_fields_composite.open_read(field) { if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
match fast_type { match fast_type {
FastType::U64 => { FastType::U64 => {
let fast_field_reader = FastFieldReader::open(fast_field_data)?; let fast_field_reader = FastFieldReader::open(fast_field_data);
fast_field_readers fast_field_readers
.fast_field_u64 .fast_field_u64
.insert(field, fast_field_reader); .insert(field, fast_field_reader);
} }
FastType::I64 => { FastType::I64 => {
let fast_field_reader = fast_field_readers.fast_field_i64.insert(
FastFieldReader::open(fast_field_data.clone())?; field,
fast_field_readers FastFieldReader::open(fast_field_data.clone()),
.fast_field_i64 );
.insert(field, fast_field_reader);
} }
FastType::F64 => { FastType::F64 => {
let fast_field_reader = fast_field_readers.fast_field_f64.insert(
FastFieldReader::open(fast_field_data.clone())?; field,
fast_field_readers FastFieldReader::open(fast_field_data.clone()),
.fast_field_f64 );
.insert(field, fast_field_reader);
} }
FastType::Date => { FastType::Date => {
let fast_field_reader = fast_field_readers.fast_field_date.insert(
FastFieldReader::open(fast_field_data.clone())?; field,
fast_field_readers FastFieldReader::open(fast_field_data.clone()),
.fast_field_date );
.insert(field, fast_field_reader);
} }
} }
} else { } else {
@@ -124,10 +117,10 @@ impl FastFieldReaders {
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0); let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
let data_opt = fast_fields_composite.open_read_with_idx(field, 1); let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) { if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
let idx_reader = FastFieldReader::open(fast_field_idx)?; let idx_reader = FastFieldReader::open(fast_field_idx);
match fast_type { match fast_type {
FastType::I64 => { FastType::I64 => {
let vals_reader = FastFieldReader::open(fast_field_data)?; let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field = let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader); MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers fast_field_readers
@@ -135,7 +128,7 @@ impl FastFieldReaders {
.insert(field, multivalued_int_fast_field); .insert(field, multivalued_int_fast_field);
} }
FastType::U64 => { FastType::U64 => {
let vals_reader = FastFieldReader::open(fast_field_data)?; let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field = let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader); MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers fast_field_readers
@@ -143,7 +136,7 @@ impl FastFieldReaders {
.insert(field, multivalued_int_fast_field); .insert(field, multivalued_int_fast_field);
} }
FastType::F64 => { FastType::F64 => {
let vals_reader = FastFieldReader::open(fast_field_data)?; let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field = let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader); MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers fast_field_readers
@@ -151,7 +144,7 @@ impl FastFieldReaders {
.insert(field, multivalued_int_fast_field); .insert(field, multivalued_int_fast_field);
} }
FastType::Date => { FastType::Date => {
let vals_reader = FastFieldReader::open(fast_field_data)?; let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field = let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader); MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers fast_field_readers

View File

@@ -33,7 +33,7 @@ impl FastFieldsWriter {
let mut bytes_value_writers = Vec::new(); let mut bytes_value_writers = Vec::new();
for (field, field_entry) in schema.fields() { for (field, field_entry) in schema.fields() {
match field_entry.field_type() { match *field_entry.field_type() {
FieldType::I64(ref int_options) FieldType::I64(ref int_options)
| FieldType::U64(ref int_options) | FieldType::U64(ref int_options)
| FieldType::F64(ref int_options) | FieldType::F64(ref int_options)
@@ -56,11 +56,9 @@ impl FastFieldsWriter {
let fast_field_writer = MultiValueIntFastFieldWriter::new(field, true); let fast_field_writer = MultiValueIntFastFieldWriter::new(field, true);
multi_values_writers.push(fast_field_writer); multi_values_writers.push(fast_field_writer);
} }
FieldType::Bytes(bytes_option) => { FieldType::Bytes => {
if bytes_option.is_fast() { let fast_field_writer = BytesFastFieldWriter::new(field);
let fast_field_writer = BytesFastFieldWriter::new(field); bytes_value_writers.push(fast_field_writer);
bytes_value_writers.push(fast_field_writer);
}
} }
_ => {} _ => {}
} }
@@ -128,7 +126,6 @@ impl FastFieldsWriter {
for field_writer in &self.single_value_writers { for field_writer in &self.single_value_writers {
field_writer.serialize(serializer)?; field_writer.serialize(serializer)?;
} }
for field_writer in &self.multi_values_writers { for field_writer in &self.multi_values_writers {
let field = field_writer.field(); let field = field_writer.field();
field_writer.serialize(serializer, mapping.get(&field))?; field_writer.serialize(serializer, mapping.get(&field))?;

View File

@@ -1,7 +1,6 @@
use super::{fieldnorm_to_id, id_to_fieldnorm}; use super::{fieldnorm_to_id, id_to_fieldnorm};
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::OwnedBytes;
use crate::schema::Field; use crate::schema::Field;
use crate::space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
use crate::DocId; use crate::DocId;
@@ -20,21 +19,16 @@ pub struct FieldNormReaders {
impl FieldNormReaders { impl FieldNormReaders {
/// Creates a field norm reader. /// Creates a field norm reader.
pub fn open(file: FileSlice) -> crate::Result<FieldNormReaders> { pub fn open(source: ReadOnlySource) -> crate::Result<FieldNormReaders> {
let data = CompositeFile::open(&file)?; let data = CompositeFile::open(&source)?;
Ok(FieldNormReaders { Ok(FieldNormReaders {
data: Arc::new(data), data: Arc::new(data),
}) })
} }
/// Returns the FieldNormReader for a specific field. /// Returns the FieldNormReader for a specific field.
pub fn get_field(&self, field: Field) -> crate::Result<Option<FieldNormReader>> { pub fn get_field(&self, field: Field) -> Option<FieldNormReader> {
if let Some(file) = self.data.open_read(field) { self.data.open_read(field).map(FieldNormReader::open)
let fieldnorm_reader = FieldNormReader::open(file)?;
Ok(Some(fieldnorm_reader))
} else {
Ok(None)
}
} }
/// Return a break down of the space usage per field. /// Return a break down of the space usage per field.
@@ -49,7 +43,7 @@ impl FieldNormReaders {
/// ///
/// This metric is important to compute the score of a /// This metric is important to compute the score of a
/// document : a document having a query word in one its short fields /// document : a document having a query word in one its short fields
/// (e.g. title)is likely to be more relevant than in one of its longer field /// (e.g. title) is likely to be more relevant than in one of its longer field
/// (e.g. body). /// (e.g. body).
/// ///
/// tantivy encodes `fieldnorm` on one byte with some precision loss, /// tantivy encodes `fieldnorm` on one byte with some precision loss,
@@ -61,31 +55,19 @@ impl FieldNormReaders {
/// precompute computationally expensive functions of the fieldnorm /// precompute computationally expensive functions of the fieldnorm
/// in a very short array. /// in a very short array.
#[derive(Clone)] #[derive(Clone)]
pub enum FieldNormReader { pub struct FieldNormReader {
ConstFieldNorm { fieldnorm_id: u8, num_docs: u32 }, data: ReadOnlySource,
OneByte(OwnedBytes),
} }
impl FieldNormReader { impl FieldNormReader {
pub fn const_fieldnorm_id(fieldnorm_id: u8, num_docs: u32) -> FieldNormReader { /// Opens a field norm reader given its data source.
FieldNormReader::ConstFieldNorm { pub fn open(data: ReadOnlySource) -> Self {
fieldnorm_id, FieldNormReader { data }
num_docs,
}
}
/// Opens a field norm reader given its file.
pub fn open(fieldnorm_file: FileSlice) -> crate::Result<Self> {
let data = fieldnorm_file.read_bytes()?;
Ok(FieldNormReader::OneByte(data))
} }
/// Returns the number of documents in this segment. /// Returns the number of documents in this segment.
pub fn num_docs(&self) -> u32 { pub fn num_docs(&self) -> u32 {
match self { self.data.len() as u32
Self::ConstFieldNorm { num_docs, .. } => *num_docs,
FieldNormReader::OneByte(vals) => vals.len() as u32,
}
} }
/// Returns the `fieldnorm` associated to a doc id. /// Returns the `fieldnorm` associated to a doc id.
@@ -97,7 +79,6 @@ impl FieldNormReader {
/// ///
/// The fieldnorm is effectively decoded from the /// The fieldnorm is effectively decoded from the
/// `fieldnorm_id` by doing a simple table lookup. /// `fieldnorm_id` by doing a simple table lookup.
#[inline(always)]
pub fn fieldnorm(&self, doc_id: DocId) -> u32 { pub fn fieldnorm(&self, doc_id: DocId) -> u32 {
let fieldnorm_id = self.fieldnorm_id(doc_id); let fieldnorm_id = self.fieldnorm_id(doc_id);
id_to_fieldnorm(fieldnorm_id) id_to_fieldnorm(fieldnorm_id)
@@ -106,11 +87,8 @@ impl FieldNormReader {
/// Returns the `fieldnorm_id` associated to a document. /// Returns the `fieldnorm_id` associated to a document.
#[inline(always)] #[inline(always)]
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 { pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
match self { let fielnorms_data = self.data.as_slice();
FieldNormReader::ConstFieldNorm { fieldnorm_id, .. } => *fieldnorm_id, fielnorms_data[doc_id as usize]
FieldNormReader::OneByte(data) => data.as_slice()[doc_id as usize],
}
} }
/// Converts a `fieldnorm_id` into a fieldnorm. /// Converts a `fieldnorm_id` into a fieldnorm.
@@ -133,8 +111,10 @@ impl FieldNormReader {
.cloned() .cloned()
.map(FieldNormReader::fieldnorm_to_id) .map(FieldNormReader::fieldnorm_to_id)
.collect::<Vec<u8>>(); .collect::<Vec<u8>>();
let field_norms_data = OwnedBytes::new(field_norms_id); let field_norms_data = ReadOnlySource::from(field_norms_id);
FieldNormReader::OneByte(field_norms_data) FieldNormReader {
data: field_norms_data,
}
} }
} }

View File

@@ -13,7 +13,7 @@ use std::io;
/// byte per document per field. /// byte per document per field.
pub struct FieldNormsWriter { pub struct FieldNormsWriter {
fields: Vec<Field>, fields: Vec<Field>,
fieldnorms_buffer: Vec<Option<Vec<u8>>>, fieldnorms_buffer: Vec<Vec<u8>>,
} }
impl FieldNormsWriter { impl FieldNormsWriter {
@@ -23,7 +23,7 @@ impl FieldNormsWriter {
schema schema
.fields() .fields()
.filter_map(|(field, field_entry)| { .filter_map(|(field, field_entry)| {
if field_entry.has_fieldnorms() { if field_entry.is_indexed() {
Some(field) Some(field)
} else { } else {
None None
@@ -36,14 +36,15 @@ impl FieldNormsWriter {
/// specified in the schema. /// specified in the schema.
pub fn for_schema(schema: &Schema) -> FieldNormsWriter { pub fn for_schema(schema: &Schema) -> FieldNormsWriter {
let fields = FieldNormsWriter::fields_with_fieldnorm(schema); let fields = FieldNormsWriter::fields_with_fieldnorm(schema);
let num_fields = schema.num_fields(); let max_field = fields
let mut fieldnorms_buffer: Vec<Option<Vec<u8>>> = vec![None; num_fields]; .iter()
for field in &fields { .map(Field::field_id)
fieldnorms_buffer[field.field_id() as usize] = Some(Vec::new()); .max()
} .map(|max_field_id| max_field_id as usize + 1)
.unwrap_or(0);
FieldNormsWriter { FieldNormsWriter {
fields, fields,
fieldnorms_buffer, fieldnorms_buffer: (0..max_field).map(|_| Vec::new()).collect::<Vec<_>>(),
} }
} }
@@ -52,10 +53,8 @@ impl FieldNormsWriter {
/// ///
/// Will extend with 0-bytes for documents that have not been seen. /// Will extend with 0-bytes for documents that have not been seen.
pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) { pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) {
for buffer_opt in self.fieldnorms_buffer.iter_mut() { for field in self.fields.iter() {
if let Some(buffer) = buffer_opt { self.fieldnorms_buffer[field.field_id() as usize].resize(max_doc as usize, 0u8);
buffer.resize(max_doc as usize, 0u8);
}
} }
} }
@@ -68,22 +67,21 @@ impl FieldNormsWriter {
/// * field - the field being set /// * field - the field being set
/// * fieldnorm - the number of terms present in document `doc` in field `field` /// * fieldnorm - the number of terms present in document `doc` in field `field`
pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) { pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) {
if let Some(fieldnorm_buffer) = self.fieldnorms_buffer[field.field_id() as usize].as_mut() { let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.field_id() as usize];
assert!( assert!(
fieldnorm_buffer.len() <= doc as usize, fieldnorm_buffer.len() <= doc as usize,
"Cannot register a given fieldnorm twice" // we fill intermediary `DocId` as having a fieldnorm of 0. "Cannot register a given fieldnorm twice"
); );
fieldnorm_buffer.resize(doc as usize + 1, 0u8); // we fill intermediary `DocId` as having a fieldnorm of 0.
fieldnorm_buffer[doc as usize] = fieldnorm_to_id(fieldnorm); fieldnorm_buffer.resize(doc as usize + 1, 0u8);
} fieldnorm_buffer[doc as usize] = fieldnorm_to_id(fieldnorm);
} }
/// Serialize the seen fieldnorm values to the serializer for all fields. /// Serialize the seen fieldnorm values to the serializer for all fields.
pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> { pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> {
for &field in self.fields.iter() { for &field in self.fields.iter() {
if let Some(buffer) = self.fieldnorms_buffer[field.field_id() as usize].as_ref() { let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
fieldnorms_serializer.serialize_field(field, &buffer[..])?; fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
}
} }
fieldnorms_serializer.close()?; fieldnorms_serializer.close()?;
Ok(()) Ok(())

View File

@@ -108,9 +108,9 @@ fn compute_deleted_bitset(
// Limit doc helps identify the first document // Limit doc helps identify the first document
// that may be affected by the delete operation. // that may be affected by the delete operation.
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp); let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
let inverted_index = segment_reader.inverted_index(delete_op.term.field())?; let inverted_index = segment_reader.inverted_index(delete_op.term.field());
if let Some(mut docset) = if let Some(mut docset) =
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)? inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)
{ {
let mut deleted_doc = docset.doc(); let mut deleted_doc = docset.doc();
while deleted_doc != TERMINATED { while deleted_doc != TERMINATED {
@@ -800,7 +800,7 @@ mod tests {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let index_writer = index.writer_for_tests().unwrap(); let index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let operations = vec![ let operations = vec![
UserOperation::Add(doc!(text_field=>"a")), UserOperation::Add(doc!(text_field=>"a")),
UserOperation::Add(doc!(text_field=>"b")), UserOperation::Add(doc!(text_field=>"b")),
@@ -815,7 +815,7 @@ mod tests {
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "hello1")); index_writer.add_document(doc!(text_field => "hello1"));
index_writer.add_document(doc!(text_field => "hello2")); index_writer.add_document(doc!(text_field => "hello2"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
@@ -864,7 +864,7 @@ mod tests {
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
.try_into() .try_into()
.unwrap(); .unwrap();
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let a_term = Term::from_field_text(text_field, "a"); let a_term = Term::from_field_text(text_field, "a");
let b_term = Term::from_field_text(text_field, "b"); let b_term = Term::from_field_text(text_field, "b");
let operations = vec![ let operations = vec![
@@ -926,8 +926,8 @@ mod tests {
fn test_lockfile_already_exists_error_msg() { fn test_lockfile_already_exists_error_msg() {
let schema_builder = schema::Schema::builder(); let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let _index_writer = index.writer_for_tests().unwrap(); let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
match index.writer_for_tests() { match index.writer_with_num_threads(1, 3_000_000) {
Err(err) => { Err(err) => {
let err_msg = err.to_string(); let err_msg = err.to_string();
assert!(err_msg.contains("already an `IndexWriter`")); assert!(err_msg.contains("already an `IndexWriter`"));
@@ -979,7 +979,7 @@ mod tests {
let num_docs_containing = |s: &str| { let num_docs_containing = |s: &str| {
let searcher = reader.searcher(); let searcher = reader.searcher();
let term = Term::from_field_text(text_field, s); let term = Term::from_field_text(text_field, s);
searcher.doc_freq(&term).unwrap() searcher.doc_freq(&term)
}; };
{ {
@@ -1015,7 +1015,7 @@ mod tests {
.unwrap(); .unwrap();
let num_docs_containing = |s: &str| { let num_docs_containing = |s: &str| {
let term_a = Term::from_field_text(text_field, s); let term_a = Term::from_field_text(text_field, s);
reader.searcher().doc_freq(&term_a).unwrap() reader.searcher().doc_freq(&term_a)
}; };
{ {
// writing the segment // writing the segment
@@ -1110,7 +1110,6 @@ mod tests {
.unwrap() .unwrap()
.searcher() .searcher()
.doc_freq(&term_a) .doc_freq(&term_a)
.unwrap()
}; };
assert_eq!(num_docs_containing("a"), 0); assert_eq!(num_docs_containing("a"), 0);
assert_eq!(num_docs_containing("b"), 100); assert_eq!(num_docs_containing("b"), 100);
@@ -1130,7 +1129,7 @@ mod tests {
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let term = Term::from_field_text(text_field, s); let term = Term::from_field_text(text_field, s);
searcher.doc_freq(&term).unwrap() searcher.doc_freq(&term)
}; };
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
@@ -1181,15 +1180,7 @@ mod tests {
// working with an empty index == no documents // working with an empty index == no documents
let term_b = Term::from_field_text(text_field, "b"); let term_b = Term::from_field_text(text_field, "b");
assert_eq!( assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_b), 0);
index
.reader()
.unwrap()
.searcher()
.doc_freq(&term_b)
.unwrap(),
0
);
} }
#[test] #[test]
@@ -1209,15 +1200,7 @@ mod tests {
let term_a = Term::from_field_text(text_field, "a"); let term_a = Term::from_field_text(text_field, "a");
// expect the document with that term to be in the index // expect the document with that term to be in the index
assert_eq!( assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
index
.reader()
.unwrap()
.searcher()
.doc_freq(&term_a)
.unwrap(),
1
);
} }
#[test] #[test]
@@ -1243,15 +1226,7 @@ mod tests {
// Find original docs in the index // Find original docs in the index
let term_a = Term::from_field_text(text_field, "a"); let term_a = Term::from_field_text(text_field, "a");
// expect the document with that term to be in the index // expect the document with that term to be in the index
assert_eq!( assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
index
.reader()
.unwrap()
.searcher()
.doc_freq(&term_a)
.unwrap(),
1
);
} }
#[test] #[test]
@@ -1286,7 +1261,7 @@ mod tests {
let idfield = schema_builder.add_text_field("id", STRING); let idfield = schema_builder.add_text_field("id", STRING);
schema_builder.add_text_field("optfield", STRING); schema_builder.add_text_field("optfield", STRING);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(idfield=>"myid")); index_writer.add_document(doc!(idfield=>"myid"));
let commit = index_writer.commit(); let commit = index_writer.commit();
assert!(commit.is_ok()); assert!(commit.is_ok());

View File

@@ -25,23 +25,23 @@ use std::cmp;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::Result<u64> { fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
let mut total_tokens = 0u64; let mut total_tokens = 0u64;
let mut count: [usize; 256] = [0; 256]; let mut count: [usize; 256] = [0; 256];
for reader in readers { for reader in readers {
if reader.has_deletes() { if reader.has_deletes() {
// if there are deletes, then we use an approximation // if there are deletes, then we use an approximation
// using the fieldnorm // using the fieldnorm
let fieldnorms_reader = reader.get_fieldnorms_reader(field)?; let fieldnorms_reader = reader.get_fieldnorms_reader(field);
for doc in reader.doc_ids_alive() { for doc in reader.doc_ids_alive() {
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc); let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc);
count[fieldnorm_id as usize] += 1; count[fieldnorm_id as usize] += 1;
} }
} else { } else {
total_tokens += reader.inverted_index(field)?.total_num_tokens(); total_tokens += reader.inverted_index(field).total_num_tokens();
} }
} }
Ok(total_tokens total_tokens
+ count + count
.iter() .iter()
.cloned() .cloned()
@@ -49,7 +49,7 @@ fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::R
.map(|(fieldnorm_ord, count)| { .map(|(fieldnorm_ord, count)| {
count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8)) count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8))
}) })
.sum::<u64>()) .sum::<u64>()
} }
pub struct IndexMerger { pub struct IndexMerger {
@@ -175,7 +175,7 @@ impl IndexMerger {
for field in fields { for field in fields {
fieldnorms_data.clear(); fieldnorms_data.clear();
for reader in &self.readers { for reader in &self.readers {
let fieldnorms_reader = reader.get_fieldnorms_reader(field)?; let fieldnorms_reader = reader.get_fieldnorms_reader(field);
for doc_id in reader.doc_ids_alive() { for doc_id in reader.doc_ids_alive() {
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc_id); let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc_id);
fieldnorms_data.push(fieldnorm_id); fieldnorms_data.push(fieldnorm_id);
@@ -194,7 +194,7 @@ impl IndexMerger {
) -> crate::Result<()> { ) -> crate::Result<()> {
for (field, field_entry) in self.schema.fields() { for (field, field_entry) in self.schema.fields() {
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
match field_type { match *field_type {
FieldType::HierarchicalFacet => { FieldType::HierarchicalFacet => {
let term_ordinal_mapping = term_ord_mappings let term_ordinal_mapping = term_ord_mappings
.remove(&field) .remove(&field)
@@ -223,10 +223,8 @@ impl IndexMerger {
// They can be implemented using what is done // They can be implemented using what is done
// for facets in the future. // for facets in the future.
} }
FieldType::Bytes(byte_options) => { FieldType::Bytes => {
if byte_options.is_fast() { self.write_bytes_fast_field(field, fast_field_serializer)?;
self.write_bytes_fast_field(field, fast_field_serializer)?;
}
} }
} }
} }
@@ -445,11 +443,9 @@ impl IndexMerger {
let mut bytes_readers: Vec<BytesFastFieldReader> = Vec::new(); let mut bytes_readers: Vec<BytesFastFieldReader> = Vec::new();
for reader in &self.readers { for reader in &self.readers {
let bytes_reader = reader.fast_fields().bytes(field).ok_or_else(|| { let bytes_reader = reader.fast_fields().bytes(field).expect(
crate::TantivyError::InvalidArgument( "Failed to find bytes fast field reader. This is a bug in tantivy, please report.",
"Bytes fast field {:?} not found in segment.".to_string(), );
)
})?;
if let Some(delete_bitset) = reader.delete_bitset() { if let Some(delete_bitset) = reader.delete_bitset() {
for doc in 0u32..reader.max_doc() { for doc in 0u32..reader.max_doc() {
if delete_bitset.is_alive(doc) { if delete_bitset.is_alive(doc) {
@@ -502,15 +498,14 @@ impl IndexMerger {
) -> crate::Result<Option<TermOrdinalMapping>> { ) -> crate::Result<Option<TermOrdinalMapping>> {
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000); let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
let mut delta_computer = DeltaComputer::new(); let mut delta_computer = DeltaComputer::new();
let mut field_term_streams = Vec::new();
let mut max_term_ords: Vec<TermOrdinal> = Vec::new();
let field_readers: Vec<Arc<InvertedIndexReader>> = self let field_readers: Vec<Arc<InvertedIndexReader>> = self
.readers .readers
.iter() .iter()
.map(|reader| reader.inverted_index(indexed_field)) .map(|reader| reader.inverted_index(indexed_field))
.collect::<crate::Result<Vec<_>>>()?; .collect::<Vec<_>>();
let mut field_term_streams = Vec::new();
let mut max_term_ords: Vec<TermOrdinal> = Vec::new();
for field_reader in &field_readers { for field_reader in &field_readers {
let terms = field_reader.terms(); let terms = field_reader.terms();
@@ -546,7 +541,7 @@ impl IndexMerger {
// The total number of tokens will only be exact when there has been no deletes. // The total number of tokens will only be exact when there has been no deletes.
// //
// Otherwise, we approximate by removing deleted documents proportionally. // Otherwise, we approximate by removing deleted documents proportionally.
let total_num_tokens: u64 = compute_total_num_tokens(&self.readers, indexed_field)?; let total_num_tokens: u64 = compute_total_num_tokens(&self.readers, indexed_field);
// Create the total list of doc ids // Create the total list of doc ids
// by stacking the doc ids from the different segment. // by stacking the doc ids from the different segment.
@@ -583,8 +578,8 @@ impl IndexMerger {
let term_info = heap_item.streamer.value(); let term_info = heap_item.streamer.value();
let segment_reader = &self.readers[heap_item.segment_ord]; let segment_reader = &self.readers[heap_item.segment_ord];
let inverted_index: &InvertedIndexReader = &*field_readers[segment_ord]; let inverted_index: &InvertedIndexReader = &*field_readers[segment_ord];
let segment_postings = inverted_index let segment_postings =
.read_postings_from_terminfo(term_info, segment_postings_option)?; inverted_index.read_postings_from_terminfo(term_info, segment_postings_option);
let delete_bitset_opt = segment_reader.delete_bitset(); let delete_bitset_opt = segment_reader.delete_bitset();
let doc_freq = if let Some(delete_bitset) = delete_bitset_opt { let doc_freq = if let Some(delete_bitset) = delete_bitset_opt {
segment_postings.doc_freq_given_deletes(delete_bitset) segment_postings.doc_freq_given_deletes(delete_bitset)
@@ -653,7 +648,7 @@ impl IndexMerger {
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> { ) -> crate::Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new(); let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() { for (field, field_entry) in self.schema.fields() {
let fieldnorm_reader = fieldnorm_readers.get_field(field)?; let fieldnorm_reader = fieldnorm_readers.get_field(field);
if field_entry.is_indexed() { if field_entry.is_indexed() {
if let Some(term_ordinal_mapping) = self.write_postings_for_field( if let Some(term_ordinal_mapping) = self.write_postings_for_field(
field, field,
@@ -670,7 +665,7 @@ impl IndexMerger {
fn write_storable_fields(&self, store_writer: &mut StoreWriter) -> crate::Result<()> { fn write_storable_fields(&self, store_writer: &mut StoreWriter) -> crate::Result<()> {
for reader in &self.readers { for reader in &self.readers {
let store_reader = reader.get_store_reader()?; let store_reader = reader.get_store_reader();
if reader.num_deleted_docs() > 0 { if reader.num_deleted_docs() > 0 {
for doc_id in reader.doc_ids_alive() { for doc_id in reader.doc_ids_alive() {
let doc = store_reader.get(doc_id)?; let doc = store_reader.get(doc_id)?;
@@ -725,12 +720,12 @@ mod tests {
use crate::IndexWriter; use crate::IndexWriter;
use crate::Searcher; use crate::Searcher;
use crate::{schema, DocSet, SegmentId}; use crate::{schema, DocSet, SegmentId};
use byteorder::{BigEndian, ReadBytesExt}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use futures::executor::block_on; use futures::executor::block_on;
use schema::FAST; use std::io::Cursor;
#[test] #[test]
fn test_index_merger_no_deletes() -> crate::Result<()> { fn test_index_merger_no_deletes() {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let text_fieldtype = schema::TextOptions::default() let text_fieldtype = schema::TextOptions::default()
.set_indexing_options( .set_indexing_options(
@@ -743,77 +738,98 @@ mod tests {
let date_field = schema_builder.add_date_field("date", INDEXED); let date_field = schema_builder.add_date_field("date", INDEXED);
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue); let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype); let score_field = schema_builder.add_u64_field("score", score_fieldtype);
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST); let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader()?; let reader = index.reader().unwrap();
let curr_time = chrono::Utc::now(); let curr_time = chrono::Utc::now();
{ let add_score_bytes = |doc: &mut Document, score: u32| {
let mut index_writer = index.writer_for_tests()?; let mut bytes = Vec::new();
// writing the segment bytes
index_writer.add_document(doc!( .write_u32::<BigEndian>(score)
text_field => "af b", .expect("failed to write u32 bytes to Vec...");
score_field => 3u64, doc.add_bytes(bytes_score_field, bytes);
date_field => curr_time, };
bytes_score_field => 3u32.to_be_bytes().as_ref()
));
index_writer.add_document(doc!( {
text_field => "a b c", let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
score_field => 5u64, {
bytes_score_field => 5u32.to_be_bytes().as_ref() // writing the segment
)); {
index_writer.add_document(doc!( let mut doc = Document::default();
text_field => "a b c d", doc.add_text(text_field, "af b");
score_field => 7u64, doc.add_u64(score_field, 3);
bytes_score_field => 7u32.to_be_bytes().as_ref() doc.add_date(date_field, &curr_time);
)); add_score_bytes(&mut doc, 3);
index_writer.commit()?; index_writer.add_document(doc);
// writing the segment }
index_writer.add_document(doc!( {
text_field => "af b", let mut doc = Document::default();
date_field => curr_time, doc.add_text(text_field, "a b c");
score_field => 11u64, doc.add_u64(score_field, 5);
bytes_score_field => 11u32.to_be_bytes().as_ref() add_score_bytes(&mut doc, 5);
)); index_writer.add_document(doc);
index_writer.add_document(doc!( }
text_field => "a b c g", {
score_field => 13u64, let mut doc = Document::default();
bytes_score_field => 13u32.to_be_bytes().as_ref() doc.add_text(text_field, "a b c d");
)); doc.add_u64(score_field, 7);
index_writer.commit()?; add_score_bytes(&mut doc, 7);
index_writer.add_document(doc);
}
index_writer.commit().expect("committed");
}
{
// writing the segment
{
let mut doc = Document::default();
doc.add_text(text_field, "af b");
doc.add_date(date_field, &curr_time);
doc.add_u64(score_field, 11);
add_score_bytes(&mut doc, 11);
index_writer.add_document(doc);
}
{
let mut doc = Document::default();
doc.add_text(text_field, "a b c g");
doc.add_u64(score_field, 13);
add_score_bytes(&mut doc, 13);
index_writer.add_document(doc);
}
index_writer.commit().expect("Commit failed");
}
} }
{ {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids))?; block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer.wait_merging_threads()?; index_writer.wait_merging_threads().unwrap();
} }
{ {
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let get_doc_ids = |terms: Vec<Term>| { let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
searcher let top_docs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap();
.search(&query, &TEST_COLLECTOR_WITH_SCORE) top_docs.docs().to_vec()
.map(|top_docs| top_docs.docs().to_vec())
}; };
{ {
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "a")])?, get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
vec![DocAddress(0, 1), DocAddress(0, 2), DocAddress(0, 4)] vec![DocAddress(0, 1), DocAddress(0, 2), DocAddress(0, 4)]
); );
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "af")])?, get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
vec![DocAddress(0, 0), DocAddress(0, 3)] vec![DocAddress(0, 0), DocAddress(0, 3)]
); );
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "g")])?, get_doc_ids(vec![Term::from_field_text(text_field, "g")]),
vec![DocAddress(0, 4)] vec![DocAddress(0, 4)]
); );
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "b")])?, get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
vec![ vec![
DocAddress(0, 0), DocAddress(0, 0),
DocAddress(0, 1), DocAddress(0, 1),
@@ -823,57 +839,60 @@ mod tests {
] ]
); );
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_date(date_field, &curr_time)])?, get_doc_ids(vec![Term::from_field_date(date_field, &curr_time)]),
vec![DocAddress(0, 0), DocAddress(0, 3)] vec![DocAddress(0, 0), DocAddress(0, 3)]
); );
} }
{ {
let doc = searcher.doc(DocAddress(0, 0))?; let doc = searcher.doc(DocAddress(0, 0)).unwrap();
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b")); assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b"));
} }
{ {
let doc = searcher.doc(DocAddress(0, 1))?; let doc = searcher.doc(DocAddress(0, 1)).unwrap();
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c")); assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c"));
} }
{ {
let doc = searcher.doc(DocAddress(0, 2))?; let doc = searcher.doc(DocAddress(0, 2)).unwrap();
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c d")); assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c d"));
} }
{ {
let doc = searcher.doc(DocAddress(0, 3))?; let doc = searcher.doc(DocAddress(0, 3)).unwrap();
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b")); assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b"));
} }
{ {
let doc = searcher.doc(DocAddress(0, 4))?; let doc = searcher.doc(DocAddress(0, 4)).unwrap();
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c g")); assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c g"));
} }
{ {
let get_fast_vals = |terms: Vec<Term>| { let get_fast_vals = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
searcher.search(&query, &FastFieldTestCollector::for_field(score_field)) searcher
.search(&query, &FastFieldTestCollector::for_field(score_field))
.unwrap()
}; };
let get_fast_vals_bytes = |terms: Vec<Term>| { let get_fast_vals_bytes = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
searcher.search( searcher
&query, .search(
&BytesFastFieldTestCollector::for_field(bytes_score_field), &query,
) &BytesFastFieldTestCollector::for_field(bytes_score_field),
)
.expect("failed to search")
}; };
assert_eq!( assert_eq!(
get_fast_vals(vec![Term::from_field_text(text_field, "a")])?, get_fast_vals(vec![Term::from_field_text(text_field, "a")]),
vec![5, 7, 13] vec![5, 7, 13]
); );
assert_eq!( assert_eq!(
get_fast_vals_bytes(vec![Term::from_field_text(text_field, "a")])?, get_fast_vals_bytes(vec![Term::from_field_text(text_field, "a")]),
vec![0, 0, 0, 5, 0, 0, 0, 7, 0, 0, 0, 13] vec![0, 0, 0, 5, 0, 0, 0, 7, 0, 0, 0, 13]
); );
} }
} }
Ok(())
} }
#[test] #[test]
fn test_index_merger_with_deletes() -> crate::Result<()> { fn test_index_merger_with_deletes() {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let text_fieldtype = schema::TextOptions::default() let text_fieldtype = schema::TextOptions::default()
.set_indexing_options( .set_indexing_options(
@@ -883,26 +902,27 @@ mod tests {
let text_field = schema_builder.add_text_field("text", text_fieldtype); let text_field = schema_builder.add_text_field("text", text_fieldtype);
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue); let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype); let score_field = schema_builder.add_u64_field("score", score_fieldtype);
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST); let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let search_term = |searcher: &Searcher, term: Term| { let search_term = |searcher: &Searcher, term: Term| {
let collector = FastFieldTestCollector::for_field(score_field); let collector = FastFieldTestCollector::for_field(score_field);
let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field); let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
let term_query = TermQuery::new(term, IndexRecordOption::Basic); let term_query = TermQuery::new(term, IndexRecordOption::Basic);
searcher let (scores, bytes) = searcher
.search(&term_query, &(collector, bytes_collector)) .search(&term_query, &(collector, bytes_collector))
.map(|(scores, bytes)| { .unwrap();
let mut score_bytes = &bytes[..]; let mut score_bytes = Cursor::new(bytes);
for &score in &scores { for &score in &scores {
assert_eq!(score as u32, score_bytes.read_u32::<BigEndian>().unwrap()); assert_eq!(score as u32, score_bytes.read_u32::<BigEndian>().unwrap());
} }
scores
}) scores
}; };
let empty_vec = Vec::<u64>::new(); let empty_vec = Vec::<u64>::new();
{ {
// a first commit // a first commit
index_writer.add_document(doc!( index_writer.add_document(doc!(
@@ -921,26 +941,26 @@ mod tests {
score_field => 3u64, score_field => 3u64,
bytes_score_field => vec![0u8, 0, 0, 3], bytes_score_field => vec![0u8, 0, 0, 3],
)); ));
index_writer.commit()?; index_writer.commit().expect("committed");
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2); assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3); assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?, search_term(&searcher, Term::from_field_text(text_field, "a")),
vec![1] vec![1]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?, search_term(&searcher, Term::from_field_text(text_field, "b")),
vec![1] vec![1]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?, search_term(&searcher, Term::from_field_text(text_field, "c")),
vec![3] vec![3]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?, search_term(&searcher, Term::from_field_text(text_field, "d")),
vec![1, 3] vec![1, 3]
); );
} }
@@ -968,8 +988,8 @@ mod tests {
score_field => 7_000u64, score_field => 7_000u64,
bytes_score_field => vec![0u8, 0, 27, 88], bytes_score_field => vec![0u8, 0, 27, 88],
)); ));
index_writer.commit()?; index_writer.commit().expect("committed");
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2); assert_eq!(searcher.segment_readers().len(), 2);
@@ -979,31 +999,31 @@ mod tests {
assert_eq!(searcher.segment_readers()[1].num_docs(), 1); assert_eq!(searcher.segment_readers()[1].num_docs(), 1);
assert_eq!(searcher.segment_readers()[1].max_doc(), 3); assert_eq!(searcher.segment_readers()[1].max_doc(), 3);
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?, search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?, search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?, search_term(&searcher, Term::from_field_text(text_field, "c")),
vec![3] vec![3]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?, search_term(&searcher, Term::from_field_text(text_field, "d")),
vec![3] vec![3]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?, search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?, search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000] vec![6_000]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?, search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000] vec![6_000, 7_000]
); );
@@ -1025,40 +1045,42 @@ mod tests {
} }
{ {
// merging the segments // merging the segments
let segment_ids = index.searchable_segment_ids()?; let segment_ids = index
block_on(index_writer.merge(&segment_ids))?; .searchable_segment_ids()
reader.reload()?; .expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 3); assert_eq!(searcher.num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].num_docs(), 3); assert_eq!(searcher.segment_readers()[0].num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3); assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?, search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?, search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?, search_term(&searcher, Term::from_field_text(text_field, "c")),
vec![3] vec![3]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?, search_term(&searcher, Term::from_field_text(text_field, "d")),
vec![3] vec![3]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?, search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?, search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000] vec![6_000]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?, search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000] vec![6_000, 7_000]
); );
let score_field_reader = searcher let score_field_reader = searcher
@@ -1072,40 +1094,40 @@ mod tests {
{ {
// test a commit with only deletes // test a commit with only deletes
index_writer.delete_term(Term::from_field_text(text_field, "c")); index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.commit()?; index_writer.commit().unwrap();
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2); assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3); assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?, search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?, search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?, search_term(&searcher, Term::from_field_text(text_field, "c")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?, search_term(&searcher, Term::from_field_text(text_field, "d")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?, search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?, search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000] vec![6_000]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?, search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000] vec![6_000, 7_000]
); );
let score_field_reader = searcher let score_field_reader = searcher
@@ -1118,9 +1140,11 @@ mod tests {
} }
{ {
// Test merging a single segment in order to remove deletes. // Test merging a single segment in order to remove deletes.
let segment_ids = index.searchable_segment_ids()?; let segment_ids = index
block_on(index_writer.merge(&segment_ids))?; .searchable_segment_ids()
reader.reload()?; .expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
@@ -1128,31 +1152,31 @@ mod tests {
assert_eq!(searcher.segment_readers()[0].num_docs(), 2); assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 2); assert_eq!(searcher.segment_readers()[0].max_doc(), 2);
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?, search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?, search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?, search_term(&searcher, Term::from_field_text(text_field, "c")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?, search_term(&searcher, Term::from_field_text(text_field, "d")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?, search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?, search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000] vec![6_000]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?, search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000] vec![6_000, 7_000]
); );
let score_field_reader = searcher let score_field_reader = searcher
@@ -1167,16 +1191,17 @@ mod tests {
{ {
// Test removing all docs // Test removing all docs
index_writer.delete_term(Term::from_field_text(text_field, "g")); index_writer.delete_term(Term::from_field_text(text_field, "g"));
index_writer.commit()?; index_writer.commit().unwrap();
let segment_ids = index.searchable_segment_ids()?; let segment_ids = index
reader.reload()?; .searchable_segment_ids()
.expect("Searchable segments failed.");
reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert!(segment_ids.is_empty()); assert!(segment_ids.is_empty());
assert!(searcher.segment_readers().is_empty()); assert!(searcher.segment_readers().is_empty());
assert_eq!(searcher.num_docs(), 0); assert_eq!(searcher.num_docs(), 0);
} }
Ok(())
} }
#[test] #[test]
@@ -1186,7 +1211,7 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| { let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| {
let mut doc = Document::default(); let mut doc = Document::default();
for facet in doc_facets { for facet in doc_facets {
@@ -1251,7 +1276,7 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap(); reader.reload().unwrap();
@@ -1270,7 +1295,7 @@ mod tests {
// Deleting one term // Deleting one term
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let facet = Facet::from_path(vec!["top", "a", "firstdoc"]); let facet = Facet::from_path(vec!["top", "a", "firstdoc"]);
let facet_term = Term::from_facet(facet_field, &facet); let facet_term = Term::from_facet(facet_field, &facet);
index_writer.delete_term(facet_term); index_writer.delete_term(facet_term);
@@ -1295,7 +1320,7 @@ mod tests {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let int_field = schema_builder.add_u64_field("intvals", INDEXED); let int_field = schema_builder.add_u64_field("intvals", INDEXED);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(int_field => 1u64)); index_writer.add_document(doc!(int_field => 1u64));
index_writer.commit().expect("commit failed"); index_writer.commit().expect("commit failed");
index_writer.add_document(doc!(int_field => 1u64)); index_writer.add_document(doc!(int_field => 1u64));
@@ -1324,7 +1349,7 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut doc = Document::default(); let mut doc = Document::default();
doc.add_u64(int_field, 1); doc.add_u64(int_field, 1);
index_writer.add_document(doc.clone()); index_writer.add_document(doc.clone());
@@ -1363,7 +1388,7 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| { let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| {
let mut doc = Document::default(); let mut doc = Document::default();
for &val in int_vals { for &val in int_vals {
@@ -1437,7 +1462,7 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert!(block_on(index_writer.merge(&segment_ids)).is_ok()); assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
assert!(index_writer.wait_merging_threads().is_ok()); assert!(index_writer.wait_merging_threads().is_ok());
} }
@@ -1491,7 +1516,7 @@ mod tests {
let index = Index::create_in_ram(builder.build()); let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_for_tests()?; let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
// Make sure we'll attempt to merge every created segment // Make sure we'll attempt to merge every created segment
let mut policy = crate::indexer::LogMergePolicy::default(); let mut policy = crate::indexer::LogMergePolicy::default();
@@ -1523,7 +1548,7 @@ mod tests {
let mut builder = schema::SchemaBuilder::new(); let mut builder = schema::SchemaBuilder::new();
let text = builder.add_text_field("text", TEXT); let text = builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(builder.build()); let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_for_tests()?; let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
let happy_term = Term::from_field_text(text, "happy"); let happy_term = Term::from_field_text(text, "happy");
let term_query = TermQuery::new(happy_term, IndexRecordOption::WithFreqs); let term_query = TermQuery::new(happy_term, IndexRecordOption::WithFreqs);
for _ in 0..62 { for _ in 0..62 {
@@ -1533,7 +1558,7 @@ mod tests {
let reader = index.reader()?; let reader = index.reader()?;
let searcher = reader.searcher(); let searcher = reader.searcher();
let mut term_scorer = term_query let mut term_scorer = term_query
.specialized_weight(&searcher, true)? .specialized_weight(&searcher, true)
.specialized_scorer(searcher.segment_reader(0u32), 1.0)?; .specialized_scorer(searcher.segment_reader(0u32), 1.0)?;
assert_eq!(term_scorer.doc(), 0); assert_eq!(term_scorer.doc(), 0);
assert_nearly_equals!(term_scorer.block_max_score(), 0.0079681855); assert_nearly_equals!(term_scorer.block_max_score(), 0.0079681855);
@@ -1548,7 +1573,7 @@ mod tests {
assert_eq!(searcher.segment_readers().len(), 2); assert_eq!(searcher.segment_readers().len(), 2);
for segment_reader in searcher.segment_readers() { for segment_reader in searcher.segment_readers() {
let mut term_scorer = term_query let mut term_scorer = term_query
.specialized_weight(&searcher, true)? .specialized_weight(&searcher, true)
.specialized_scorer(segment_reader, 1.0)?; .specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there. // the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() { for doc in segment_reader.doc_ids_alive() {
@@ -1572,7 +1597,7 @@ mod tests {
let segment_reader = searcher.segment_reader(0u32); let segment_reader = searcher.segment_reader(0u32);
let mut term_scorer = term_query let mut term_scorer = term_query
.specialized_weight(&searcher, true)? .specialized_weight(&searcher, true)
.specialized_scorer(segment_reader, 1.0)?; .specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there. // the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() { for doc in segment_reader.doc_ids_alive() {

View File

@@ -29,9 +29,8 @@ pub use self::segment_writer::SegmentWriter;
/// Alias for the default merge policy, which is the `LogMergePolicy`. /// Alias for the default merge policy, which is the `LogMergePolicy`.
pub type DefaultMergePolicy = LogMergePolicy; pub type DefaultMergePolicy = LogMergePolicy;
#[cfg(feature = "mmap")]
#[cfg(test)] #[cfg(test)]
mod tests_mmap { mod tests {
use crate::schema::{self, Schema}; use crate::schema::{self, Schema};
use crate::{Index, Term}; use crate::{Index, Term};
@@ -40,7 +39,7 @@ mod tests_mmap {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_from_tempdir(schema_builder.build()).unwrap(); let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// there must be one deleted document in the segment // there must be one deleted document in the segment
index_writer.add_document(doc!(text_field=>"b")); index_writer.add_document(doc!(text_field=>"b"));
index_writer.delete_term(Term::from_field_text(text_field, "b")); index_writer.delete_term(Term::from_field_text(text_field, "b"));

View File

@@ -43,7 +43,7 @@ const NUM_MERGE_THREADS: usize = 4;
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
pub fn save_new_metas(schema: Schema, directory: &dyn Directory) -> crate::Result<()> { pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::Result<()> {
save_metas( save_metas(
&IndexMeta { &IndexMeta {
segments: Vec::new(), segments: Vec::new(),
@@ -64,7 +64,7 @@ pub fn save_new_metas(schema: Schema, directory: &dyn Directory) -> crate::Resul
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()> { fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> {
info!("save metas"); info!("save metas");
let mut buffer = serde_json::to_vec_pretty(metas)?; let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer. // Just adding a new line at the end of the buffer.
@@ -450,8 +450,9 @@ impl SegmentUpdater {
.into_iter() .into_iter()
.map(|merge_candidate: MergeCandidate| { .map(|merge_candidate: MergeCandidate| {
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0) MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
}); })
merge_candidates.extend(committed_merge_candidates); .collect::<Vec<_>>();
merge_candidates.extend(committed_merge_candidates.into_iter());
for merge_operation in merge_candidates { for merge_operation in merge_candidates {
if let Err(err) = self.start_merge(merge_operation) { if let Err(err) = self.start_merge(merge_operation) {
@@ -554,7 +555,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(MergeWheneverPossible)); index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
{ {
@@ -607,7 +608,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{ {
for _ in 0..100 { for _ in 0..100 {
@@ -678,7 +679,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{ {
for _ in 0..100 { for _ in 0..100 {

View File

@@ -16,6 +16,8 @@ use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
use crate::tokenizer::{TokenStreamChain, Tokenizer}; use crate::tokenizer::{TokenStreamChain, Tokenizer};
use crate::Opstamp; use crate::Opstamp;
use crate::{DocId, SegmentComponent}; use crate::{DocId, SegmentComponent};
use std::io;
use std::str;
/// Computes the initial size of the hash table. /// Computes the initial size of the hash table.
/// ///
@@ -46,7 +48,6 @@ pub struct SegmentWriter {
fieldnorms_writer: FieldNormsWriter, fieldnorms_writer: FieldNormsWriter,
doc_opstamps: Vec<Opstamp>, doc_opstamps: Vec<Opstamp>,
tokenizers: Vec<Option<TextAnalyzer>>, tokenizers: Vec<Option<TextAnalyzer>>,
term_buffer: Term,
} }
impl SegmentWriter { impl SegmentWriter {
@@ -90,7 +91,6 @@ impl SegmentWriter {
fast_field_writers: FastFieldsWriter::from_schema(schema), fast_field_writers: FastFieldsWriter::from_schema(schema),
doc_opstamps: Vec::with_capacity(1_000), doc_opstamps: Vec::with_capacity(1_000),
tokenizers, tokenizers,
term_buffer: Term::new(),
}) })
} }
@@ -116,11 +116,7 @@ impl SegmentWriter {
/// Indexes a new document /// Indexes a new document
/// ///
/// As a user, you should rather use `IndexWriter`'s add_document. /// As a user, you should rather use `IndexWriter`'s add_document.
pub fn add_document( pub fn add_document(&mut self, add_operation: AddOperation, schema: &Schema) -> io::Result<()> {
&mut self,
add_operation: AddOperation,
schema: &Schema,
) -> crate::Result<()> {
let doc_id = self.max_doc; let doc_id = self.max_doc;
let mut doc = add_operation.document; let mut doc = add_operation.document;
self.doc_opstamps.push(add_operation.opstamp); self.doc_opstamps.push(add_operation.opstamp);
@@ -128,45 +124,34 @@ impl SegmentWriter {
self.fast_field_writers.add_document(&doc); self.fast_field_writers.add_document(&doc);
for (field, field_values) in doc.get_sorted_field_values() { for (field, field_values) in doc.get_sorted_field_values() {
let field_entry = schema.get_field_entry(field); let field_options = schema.get_field_entry(field);
let make_schema_error = || { if !field_options.is_indexed() {
crate::TantivyError::SchemaError(format!(
"Expected a {:?} for field {:?}",
field_entry.field_type().value_type(),
field_entry.name()
))
};
if !field_entry.is_indexed() {
continue; continue;
} }
let (term_buffer, multifield_postings) = match *field_options.field_type() {
(&mut self.term_buffer, &mut self.multifield_postings);
match *field_entry.field_type() {
FieldType::HierarchicalFacet => { FieldType::HierarchicalFacet => {
term_buffer.set_field(field); let facets: Vec<&str> = field_values
let facets = .iter()
field_values .flat_map(|field_value| match *field_value.value() {
.iter() Value::Facet(ref facet) => Some(facet.encoded_str()),
.flat_map(|field_value| match *field_value.value() { _ => {
Value::Facet(ref facet) => Some(facet.encoded_str()), panic!("Expected hierarchical facet");
_ => { }
panic!("Expected hierarchical facet"); })
} .collect();
}); let mut term = Term::for_field(field); // we set the Term
for facet_str in facets { for fake_str in facets {
let mut unordered_term_id_opt = None; let mut unordered_term_id_opt = None;
FacetTokenizer FacetTokenizer.token_stream(fake_str).process(&mut |token| {
.token_stream(facet_str) term.set_text(&token.text);
.process(&mut |token| { let unordered_term_id =
term_buffer.set_text(&token.text); self.multifield_postings.subscribe(doc_id, &term);
let unordered_term_id = unordered_term_id_opt = Some(unordered_term_id);
multifield_postings.subscribe(doc_id, &term_buffer); });
unordered_term_id_opt = Some(unordered_term_id);
});
if let Some(unordered_term_id) = unordered_term_id_opt { if let Some(unordered_term_id) = unordered_term_id_opt {
self.fast_field_writers self.fast_field_writers
.get_multivalue_writer(field) .get_multivalue_writer(field)
.expect("writer for facet missing") .expect("multified writer for facet missing")
.add_val(unordered_term_id); .add_val(unordered_term_id);
} }
} }
@@ -183,6 +168,7 @@ impl SegmentWriter {
if let Some(last_token) = tok_str.tokens.last() { if let Some(last_token) = tok_str.tokens.last() {
total_offset += last_token.offset_to; total_offset += last_token.offset_to;
} }
token_streams token_streams
.push(PreTokenizedStream::from(tok_str.clone()).into()); .push(PreTokenizedStream::from(tok_str.clone()).into());
} }
@@ -192,6 +178,7 @@ impl SegmentWriter {
{ {
offsets.push(total_offset); offsets.push(total_offset);
total_offset += text.len(); total_offset += text.len();
token_streams.push(tokenizer.token_stream(text)); token_streams.push(tokenizer.token_stream(text));
} }
} }
@@ -203,12 +190,8 @@ impl SegmentWriter {
0 0
} else { } else {
let mut token_stream = TokenStreamChain::new(offsets, token_streams); let mut token_stream = TokenStreamChain::new(offsets, token_streams);
multifield_postings.index_text( self.multifield_postings
doc_id, .index_text(doc_id, field, &mut token_stream)
field,
&mut token_stream,
term_buffer,
)
}; };
self.fieldnorms_writer.record(doc_id, field, num_tokens); self.fieldnorms_writer.record(doc_id, field, num_tokens);
@@ -216,67 +199,49 @@ impl SegmentWriter {
FieldType::U64(ref int_option) => { FieldType::U64(ref int_option) => {
if int_option.is_indexed() { if int_option.is_indexed() {
for field_value in field_values { for field_value in field_values {
term_buffer.set_field(field_value.field()); let term = Term::from_field_u64(
let u64_val = field_value field_value.field(),
.value() field_value.value().u64_value(),
.u64_value() );
.ok_or_else(make_schema_error)?; self.multifield_postings.subscribe(doc_id, &term);
term_buffer.set_u64(u64_val);
multifield_postings.subscribe(doc_id, &term_buffer);
} }
} }
} }
FieldType::Date(ref int_option) => { FieldType::Date(ref int_option) => {
if int_option.is_indexed() { if int_option.is_indexed() {
for field_value in field_values { for field_value in field_values {
term_buffer.set_field(field_value.field()); let term = Term::from_field_i64(
let date_val = field_value field_value.field(),
.value() field_value.value().date_value().timestamp(),
.date_value() );
.ok_or_else(make_schema_error)?; self.multifield_postings.subscribe(doc_id, &term);
term_buffer.set_i64(date_val.timestamp());
multifield_postings.subscribe(doc_id, &term_buffer);
} }
} }
} }
FieldType::I64(ref int_option) => { FieldType::I64(ref int_option) => {
if int_option.is_indexed() { if int_option.is_indexed() {
for field_value in field_values { for field_value in field_values {
term_buffer.set_field(field_value.field()); let term = Term::from_field_i64(
let i64_val = field_value field_value.field(),
.value() field_value.value().i64_value(),
.i64_value() );
.ok_or_else(make_schema_error)?; self.multifield_postings.subscribe(doc_id, &term);
term_buffer.set_i64(i64_val);
multifield_postings.subscribe(doc_id, &term_buffer);
} }
} }
} }
FieldType::F64(ref int_option) => { FieldType::F64(ref int_option) => {
if int_option.is_indexed() { if int_option.is_indexed() {
for field_value in field_values { for field_value in field_values {
term_buffer.set_field(field_value.field()); let term = Term::from_field_f64(
let f64_val = field_value field_value.field(),
.value() field_value.value().f64_value(),
.f64_value() );
.ok_or_else(make_schema_error)?; self.multifield_postings.subscribe(doc_id, &term);
term_buffer.set_f64(f64_val);
multifield_postings.subscribe(doc_id, &term_buffer);
} }
} }
} }
FieldType::Bytes(ref option) => { FieldType::Bytes => {
if option.is_indexed() { // Do nothing. Bytes only supports fast fields.
for field_value in field_values {
term_buffer.set_field(field_value.field());
let bytes = field_value
.value()
.bytes_value()
.ok_or_else(make_schema_error)?;
term_buffer.set_bytes(bytes);
self.multifield_postings.subscribe(doc_id, &term_buffer);
}
}
} }
} }
} }

View File

@@ -105,7 +105,7 @@ extern crate serde_json;
extern crate log; extern crate log;
#[macro_use] #[macro_use]
extern crate thiserror; extern crate failure;
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
extern crate test; extern crate test;
@@ -134,7 +134,7 @@ mod core;
mod indexer; mod indexer;
#[allow(unused_doc_comments)] #[allow(unused_doc_comments)]
pub mod error; mod error;
pub mod tokenizer; pub mod tokenizer;
pub mod collector; pub mod collector;
@@ -157,7 +157,6 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset; mod docset;
pub use self::docset::{DocSet, TERMINATED}; pub use self::docset::{DocSet, TERMINATED};
pub use crate::common::HasLen;
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64}; pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
pub use crate::core::{Executor, SegmentComponent}; pub use crate::core::{Executor, SegmentComponent};
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta}; pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
@@ -246,10 +245,18 @@ pub type DocId = u32;
/// with opstamp `n+1`. /// with opstamp `n+1`.
pub type Opstamp = u64; pub type Opstamp = u64;
/// A Score that represents the relevance of the document to the query
///
/// This is modelled internally as a `f64`, because tantivy was compiled with the `scoref64`
/// feature. The larger the number, the more relevant the document is to the search query.
#[cfg(feature = "scoref64")]
pub type Score = f64;
/// A Score that represents the relevance of the document to the query /// A Score that represents the relevance of the document to the query
/// ///
/// This is modelled internally as a `f32`. The larger the number, the more relevant /// This is modelled internally as a `f32`. The larger the number, the more relevant
/// the document to the search query. /// the document to the search query.
#[cfg(not(feature = "scoref64"))]
pub type Score = f32; pub type Score = f32;
/// A `SegmentLocalId` identifies a segment. /// A `SegmentLocalId` identifies a segment.
@@ -277,7 +284,7 @@ impl DocAddress {
/// ///
/// The id used for the segment is actually an ordinal /// The id used for the segment is actually an ordinal
/// in the list of `Segment`s held by a `Searcher`. /// in the list of `Segment`s held by a `Searcher`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct DocAddress(pub SegmentLocalId, pub DocId); pub struct DocAddress(pub SegmentLocalId, pub DocId);
#[cfg(test)] #[cfg(test)]
@@ -289,6 +296,7 @@ mod tests {
use crate::schema::*; use crate::schema::*;
use crate::DocAddress; use crate::DocAddress;
use crate::Index; use crate::Index;
use crate::IndexWriter;
use crate::Postings; use crate::Postings;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use rand::distributions::Bernoulli; use rand::distributions::Bernoulli;
@@ -353,14 +361,14 @@ mod tests {
#[test] #[test]
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
fn test_indexing() -> crate::Result<()> { fn test_indexing() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_from_tempdir(schema).unwrap(); let index = Index::create_from_tempdir(schema).unwrap();
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{ {
let doc = doc!(text_field=>"af b"); let doc = doc!(text_field=>"af b");
index_writer.add_document(doc); index_writer.add_document(doc);
@@ -375,76 +383,100 @@ mod tests {
} }
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
Ok(())
} }
#[test] #[test]
fn test_docfreq1() -> crate::Result<()> { fn test_docfreq1() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b c")); {
index_writer.commit()?; index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.add_document(doc!(text_field=>"a")); index_writer.commit().unwrap();
index_writer.add_document(doc!(text_field=>"a a")); }
index_writer.commit()?; {
index_writer.add_document(doc!(text_field=>"c")); index_writer.add_document(doc!(text_field=>"a"));
index_writer.commit()?; index_writer.add_document(doc!(text_field=>"a a"));
let reader = index.reader()?; index_writer.commit().unwrap();
let searcher = reader.searcher(); }
let term_a = Term::from_field_text(text_field, "a"); {
assert_eq!(searcher.doc_freq(&term_a)?, 3); index_writer.add_document(doc!(text_field=>"c"));
let term_b = Term::from_field_text(text_field, "b"); index_writer.commit().unwrap();
assert_eq!(searcher.doc_freq(&term_b)?, 1); }
let term_c = Term::from_field_text(text_field, "c"); {
assert_eq!(searcher.doc_freq(&term_c)?, 2); let reader = index.reader().unwrap();
let term_d = Term::from_field_text(text_field, "d"); let searcher = reader.searcher();
assert_eq!(searcher.doc_freq(&term_d)?, 0); let term_a = Term::from_field_text(text_field, "a");
Ok(()) assert_eq!(searcher.doc_freq(&term_a), 3);
let term_b = Term::from_field_text(text_field, "b");
assert_eq!(searcher.doc_freq(&term_b), 1);
let term_c = Term::from_field_text(text_field, "c");
assert_eq!(searcher.doc_freq(&term_c), 2);
let term_d = Term::from_field_text(text_field, "d");
assert_eq!(searcher.doc_freq(&term_d), 0);
}
} }
#[test] #[test]
fn test_fieldnorm_no_docs_with_field() -> crate::Result<()> { fn test_fieldnorm_no_docs_with_field() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let title_field = schema_builder.add_text_field("title", TEXT); let title_field = schema_builder.add_text_field("title", TEXT);
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.commit()?;
let index_reader = index.reader()?;
let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0);
{ {
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field)?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert_eq!(fieldnorm_reader.fieldnorm(0), 3); {
let doc = doc!(text_field=>"a b c");
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
} }
{ {
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field)?; let index_reader = index.reader().unwrap();
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0); let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0);
{
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field);
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
}
{
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field);
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
}
} }
Ok(())
} }
#[test] #[test]
fn test_fieldnorm() -> crate::Result<()> { fn test_fieldnorm() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?; {
index_writer.add_document(doc!(text_field=>"a b c")); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!()); {
index_writer.add_document(doc!(text_field=>"a b")); let doc = doc!(text_field=>"a b c");
index_writer.commit()?; index_writer.add_document(doc);
let reader = index.reader()?; }
let searcher = reader.searcher(); {
let segment_reader: &SegmentReader = searcher.segment_reader(0); let doc = doc!();
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field)?; index_writer.add_document(doc);
assert_eq!(fieldnorms_reader.fieldnorm(0), 3); }
assert_eq!(fieldnorms_reader.fieldnorm(1), 0); {
assert_eq!(fieldnorms_reader.fieldnorm(2), 2); let doc = doc!(text_field=>"a b");
Ok(()) index_writer.add_document(doc);
}
index_writer.commit().unwrap();
}
{
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0);
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field);
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
}
} }
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool { fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
@@ -459,7 +491,7 @@ mod tests {
} }
#[test] #[test]
fn test_delete_postings1() -> crate::Result<()> { fn test_delete_postings1() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let term_abcd = Term::from_field_text(text_field, "abcd"); let term_abcd = Term::from_field_text(text_field, "abcd");
@@ -475,7 +507,7 @@ mod tests {
.unwrap(); .unwrap();
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// 0 // 0
index_writer.add_document(doc!(text_field=>"a b")); index_writer.add_document(doc!(text_field=>"a b"));
// 1 // 1
@@ -491,19 +523,19 @@ mod tests {
index_writer.add_document(doc!(text_field=>" b c")); index_writer.add_document(doc!(text_field=>" b c"));
// 5 // 5
index_writer.add_document(doc!(text_field=>" a")); index_writer.add_document(doc!(text_field=>" a"));
index_writer.commit()?; index_writer.commit().unwrap();
} }
{ {
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(text_field)?; let inverted_index = segment_reader.inverted_index(text_field);
assert!(inverted_index assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, segment_reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 5); assert_eq!(postings.doc(), 5);
@@ -511,7 +543,7 @@ mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, segment_reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 3); assert_eq!(postings.doc(), 3);
@@ -522,25 +554,25 @@ mod tests {
} }
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// 0 // 0
index_writer.add_document(doc!(text_field=>"a b")); index_writer.add_document(doc!(text_field=>"a b"));
// 1 // 1
index_writer.delete_term(Term::from_field_text(text_field, "c")); index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.rollback()?; index_writer.rollback().unwrap();
} }
{ {
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let seg_reader = searcher.segment_reader(0); let seg_reader = searcher.segment_reader(0);
let inverted_index = seg_reader.inverted_index(term_abcd.field())?; let inverted_index = seg_reader.inverted_index(term_abcd.field());
assert!(inverted_index assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, seg_reader)); assert!(advance_undeleted(&mut postings, seg_reader));
assert_eq!(postings.doc(), 5); assert_eq!(postings.doc(), 5);
@@ -548,7 +580,7 @@ mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, seg_reader)); assert!(advance_undeleted(&mut postings, seg_reader));
assert_eq!(postings.doc(), 3); assert_eq!(postings.doc(), 3);
@@ -559,30 +591,30 @@ mod tests {
} }
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b")); index_writer.add_document(doc!(text_field=>"a b"));
index_writer.delete_term(Term::from_field_text(text_field, "c")); index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.rollback()?; index_writer.rollback().unwrap();
index_writer.delete_term(Term::from_field_text(text_field, "a")); index_writer.delete_term(Term::from_field_text(text_field, "a"));
index_writer.commit()?; index_writer.commit().unwrap();
} }
{ {
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(term_abcd.field())?; let inverted_index = segment_reader.inverted_index(term_abcd.field());
assert!(inverted_index assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(!advance_undeleted(&mut postings, segment_reader)); assert!(!advance_undeleted(&mut postings, segment_reader));
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, segment_reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 3); assert_eq!(postings.doc(), 3);
@@ -592,107 +624,101 @@ mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, segment_reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 4); assert_eq!(postings.doc(), 4);
assert!(!advance_undeleted(&mut postings, segment_reader)); assert!(!advance_undeleted(&mut postings, segment_reader));
} }
} }
Ok(())
} }
#[test] #[test]
fn test_indexed_u64() -> crate::Result<()> { fn test_indexed_u64() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("value", INDEXED); let field = schema_builder.add_u64_field("value", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=>1u64)); index_writer.add_document(doc!(field=>1u64));
index_writer.commit()?; index_writer.commit().unwrap();
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let term = Term::from_field_u64(field, 1u64); let term = Term::from_field_u64(field, 1u64);
let mut postings = searcher let mut postings = searcher
.segment_reader(0) .segment_reader(0)
.inverted_index(term.field())? .inverted_index(term.field())
.read_postings(&term, IndexRecordOption::Basic)? .read_postings(&term, IndexRecordOption::Basic)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
Ok(())
} }
#[test] #[test]
fn test_indexed_i64() -> crate::Result<()> { fn test_indexed_i64() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_i64_field("value", INDEXED); let value_field = schema_builder.add_i64_field("value", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let negative_val = -1i64; let negative_val = -1i64;
index_writer.add_document(doc!(value_field => negative_val)); index_writer.add_document(doc!(value_field => negative_val));
index_writer.commit()?; index_writer.commit().unwrap();
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let term = Term::from_field_i64(value_field, negative_val); let term = Term::from_field_i64(value_field, negative_val);
let mut postings = searcher let mut postings = searcher
.segment_reader(0) .segment_reader(0)
.inverted_index(term.field())? .inverted_index(term.field())
.read_postings(&term, IndexRecordOption::Basic)? .read_postings(&term, IndexRecordOption::Basic)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
Ok(())
} }
#[test] #[test]
fn test_indexed_f64() -> crate::Result<()> { fn test_indexed_f64() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_f64_field("value", INDEXED); let value_field = schema_builder.add_f64_field("value", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let val = std::f64::consts::PI; let val = std::f64::consts::PI;
index_writer.add_document(doc!(value_field => val)); index_writer.add_document(doc!(value_field => val));
index_writer.commit()?; index_writer.commit().unwrap();
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let term = Term::from_field_f64(value_field, val); let term = Term::from_field_f64(value_field, val);
let mut postings = searcher let mut postings = searcher
.segment_reader(0) .segment_reader(0)
.inverted_index(term.field())? .inverted_index(term.field())
.read_postings(&term, IndexRecordOption::Basic)? .read_postings(&term, IndexRecordOption::Basic)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
Ok(())
} }
#[test] #[test]
fn test_indexedfield_not_in_documents() -> crate::Result<()> { fn test_indexedfield_not_in_documents() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let absent_field = schema_builder.add_text_field("text", TEXT); let absent_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a")); index_writer.add_document(doc!(text_field=>"a"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(absent_field)?; segment_reader.inverted_index(absent_field); //< should not panic
assert_eq!(inverted_index.terms().num_terms(), 0);
Ok(())
} }
#[test] #[test]
fn test_delete_postings2() -> crate::Result<()> { fn test_delete_postings2() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -700,112 +726,125 @@ mod tests {
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
.try_into()?; .try_into()
.unwrap();
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"63"));
index_writer.add_document(doc!(text_field=>"70")); let add_document = |index_writer: &mut IndexWriter, val: &'static str| {
index_writer.add_document(doc!(text_field=>"34")); let doc = doc!(text_field=>val);
index_writer.add_document(doc!(text_field=>"1")); index_writer.add_document(doc);
index_writer.add_document(doc!(text_field=>"38")); };
index_writer.add_document(doc!(text_field=>"33"));
index_writer.add_document(doc!(text_field=>"40")); let remove_document = |index_writer: &mut IndexWriter, val: &'static str| {
index_writer.add_document(doc!(text_field=>"17")); let delterm = Term::from_field_text(text_field, val);
index_writer.delete_term(Term::from_field_text(text_field, "38")); index_writer.delete_term(delterm);
index_writer.delete_term(Term::from_field_text(text_field, "34")); };
index_writer.commit()?;
reader.reload()?; add_document(&mut index_writer, "63");
assert_eq!(reader.searcher().num_docs(), 6); add_document(&mut index_writer, "70");
Ok(()) add_document(&mut index_writer, "34");
add_document(&mut index_writer, "1");
add_document(&mut index_writer, "38");
add_document(&mut index_writer, "33");
add_document(&mut index_writer, "40");
add_document(&mut index_writer, "17");
remove_document(&mut index_writer, "38");
remove_document(&mut index_writer, "34");
index_writer.commit().unwrap();
reader.reload().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 6);
} }
#[test] #[test]
fn test_termfreq() -> crate::Result<()> { fn test_termfreq() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"af af af bc bc")); let doc = doc!(text_field=>"af af af bc bc");
index_writer.commit()?; index_writer.add_document(doc);
index_writer.commit().unwrap();
} }
{ {
let index_reader = index.reader()?; let index_reader = index.reader().unwrap();
let searcher = index_reader.searcher(); let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(text_field)?; let inverted_index = reader.inverted_index(text_field);
let term_abcd = Term::from_field_text(text_field, "abcd"); let term_abcd = Term::from_field_text(text_field, "abcd");
assert!(inverted_index assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
let term_af = Term::from_field_text(text_field, "af"); let term_af = Term::from_field_text(text_field, "af");
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
assert_eq!(postings.term_freq(), 3); assert_eq!(postings.term_freq(), 3);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
} }
Ok(())
} }
#[test] #[test]
fn test_searcher_1() -> crate::Result<()> { fn test_searcher_1() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let reader = index.reader()?; let reader = index.reader().unwrap();
// writing the segment {
let mut index_writer = index.writer_for_tests()?; // writing the segment
index_writer.add_document(doc!(text_field=>"af af af b")); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b c")); index_writer.add_document(doc!(text_field=>"af af af b"));
index_writer.add_document(doc!(text_field=>"a b c d")); index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.commit()?; index_writer.add_document(doc!(text_field=>"a b c d"));
index_writer.commit().unwrap();
reader.reload()?; }
let searcher = reader.searcher(); {
let get_doc_ids = |terms: Vec<Term>| { reader.reload().unwrap();
let query = BooleanQuery::new_multiterms_query(terms); let searcher = reader.searcher();
searcher let get_doc_ids = |terms: Vec<Term>| {
.search(&query, &TEST_COLLECTOR_WITH_SCORE) let query = BooleanQuery::new_multiterms_query(terms);
.map(|topdocs| topdocs.docs().to_vec()) let topdocs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap();
}; topdocs.docs().to_vec()
assert_eq!( };
get_doc_ids(vec![Term::from_field_text(text_field, "a")])?, assert_eq!(
vec![DocAddress(0, 1), DocAddress(0, 2)] get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
); vec![DocAddress(0, 1), DocAddress(0, 2)]
assert_eq!( );
get_doc_ids(vec![Term::from_field_text(text_field, "af")])?, assert_eq!(
vec![DocAddress(0, 0)] get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
); vec![DocAddress(0, 0)]
assert_eq!( );
get_doc_ids(vec![Term::from_field_text(text_field, "b")])?, assert_eq!(
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)] get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
); vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
assert_eq!( );
get_doc_ids(vec![Term::from_field_text(text_field, "c")])?, assert_eq!(
vec![DocAddress(0, 1), DocAddress(0, 2)] get_doc_ids(vec![Term::from_field_text(text_field, "c")]),
); vec![DocAddress(0, 1), DocAddress(0, 2)]
assert_eq!( );
get_doc_ids(vec![Term::from_field_text(text_field, "d")])?, assert_eq!(
vec![DocAddress(0, 2)] get_doc_ids(vec![Term::from_field_text(text_field, "d")]),
); vec![DocAddress(0, 2)]
assert_eq!( );
get_doc_ids(vec![ assert_eq!(
Term::from_field_text(text_field, "b"), get_doc_ids(vec![
Term::from_field_text(text_field, "a"), Term::from_field_text(text_field, "b"),
])?, Term::from_field_text(text_field, "a"),
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)] ]),
); vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
Ok(()) );
}
} }
#[test] #[test]
fn test_searcher_2() -> crate::Result<()> { fn test_searcher_2() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -813,17 +852,19 @@ mod tests {
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
.try_into()?; .try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0u64); assert_eq!(reader.searcher().num_docs(), 0u64);
// writing the segment {
let mut index_writer = index.writer_for_tests()?; // writing the segment
index_writer.add_document(doc!(text_field=>"af b")); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b c")); index_writer.add_document(doc!(text_field=>"af b"));
index_writer.add_document(doc!(text_field=>"a b c d")); index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.commit()?; index_writer.add_document(doc!(text_field=>"a b c d"));
reader.reload()?; index_writer.commit().unwrap();
}
reader.reload().unwrap();
assert_eq!(reader.searcher().num_docs(), 3u64); assert_eq!(reader.searcher().num_docs(), 3u64);
Ok(())
} }
#[test] #[test]
@@ -835,17 +876,17 @@ mod tests {
text_field => "some other value", text_field => "some other value",
other_text_field => "short"); other_text_field => "short");
assert_eq!(document.len(), 3); assert_eq!(document.len(), 3);
let values: Vec<&Value> = document.get_all(text_field).collect(); let values = document.get_all(text_field);
assert_eq!(values.len(), 2); assert_eq!(values.len(), 2);
assert_eq!(values[0].text(), Some("tantivy")); assert_eq!(values[0].text(), Some("tantivy"));
assert_eq!(values[1].text(), Some("some other value")); assert_eq!(values[1].text(), Some("some other value"));
let values: Vec<&Value> = document.get_all(other_text_field).collect(); let values = document.get_all(other_text_field);
assert_eq!(values.len(), 1); assert_eq!(values.len(), 1);
assert_eq!(values[0].text(), Some("short")); assert_eq!(values[0].text(), Some("short"));
} }
#[test] #[test]
fn test_wrong_fast_field_type() -> crate::Result<()> { fn test_wrong_fast_field_type() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST); let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
let fast_field_signed = schema_builder.add_i64_field("signed", FAST); let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
@@ -855,14 +896,14 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap();
{ {
let document = let document =
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64); doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
index_writer.add_document(document); index_writer.add_document(document);
index_writer.commit()?; index_writer.commit().unwrap();
} }
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0); let segment_reader: &SegmentReader = searcher.segment_reader(0);
{ {
@@ -901,12 +942,11 @@ mod tests {
let fast_field_reader = fast_field_reader_opt.unwrap(); let fast_field_reader = fast_field_reader_opt.unwrap();
assert_eq!(fast_field_reader.get(0), 4f64) assert_eq!(fast_field_reader.get(0), 4f64)
} }
Ok(())
} }
// motivated by #729 // motivated by #729
#[test] #[test]
fn test_update_via_delete_insert() -> crate::Result<()> { fn test_update_via_delete_insert() {
use crate::collector::Count; use crate::collector::Count;
use crate::indexer::NoMergePolicy; use crate::indexer::NoMergePolicy;
use crate::query::AllQuery; use crate::query::AllQuery;
@@ -920,17 +960,17 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
let index_reader = index.reader()?; let index_reader = index.reader().unwrap();
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer(3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy)); index_writer.set_merge_policy(Box::new(NoMergePolicy));
for doc_id in 0u64..DOC_COUNT { for doc_id in 0u64..DOC_COUNT {
index_writer.add_document(doc!(id => doc_id)); index_writer.add_document(doc!(id => doc_id));
} }
index_writer.commit()?; index_writer.commit().unwrap();
index_reader.reload()?; index_reader.reload().unwrap();
let searcher = index_reader.searcher(); let searcher = index_reader.searcher();
assert_eq!( assert_eq!(
@@ -941,11 +981,12 @@ mod tests {
// update the 10 elements by deleting and re-adding // update the 10 elements by deleting and re-adding
for doc_id in 0u64..DOC_COUNT { for doc_id in 0u64..DOC_COUNT {
index_writer.delete_term(Term::from_field_u64(id, doc_id)); index_writer.delete_term(Term::from_field_u64(id, doc_id));
index_writer.commit()?; index_writer.commit().unwrap();
index_reader.reload()?; index_reader.reload().unwrap();
index_writer.add_document(doc!(id => doc_id)); let doc = doc!(id => doc_id);
index_writer.commit()?; index_writer.add_document(doc);
index_reader.reload()?; index_writer.commit().unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher(); let searcher = index_reader.searcher();
// The number of document should be stable. // The number of document should be stable.
assert_eq!( assert_eq!(
@@ -954,7 +995,7 @@ mod tests {
); );
} }
index_reader.reload()?; index_reader.reload().unwrap();
let searcher = index_reader.searcher(); let searcher = index_reader.searcher();
let segment_ids: Vec<SegmentId> = searcher let segment_ids: Vec<SegmentId> = searcher
.segment_readers() .segment_readers()
@@ -963,18 +1004,12 @@ mod tests {
.collect(); .collect();
block_on(index_writer.merge(&segment_ids)).unwrap(); block_on(index_writer.merge(&segment_ids)).unwrap();
index_reader.reload()?; index_reader.reload().unwrap();
let searcher = index_reader.searcher(); let searcher = index_reader.searcher();
assert_eq!(searcher.search(&AllQuery, &Count)?, DOC_COUNT as usize);
Ok(())
}
#[test] assert_eq!(
fn test_validate_checksum() -> crate::Result<()> { searcher.search(&AllQuery, &Count).unwrap(),
let index_path = tempfile::tempdir().expect("dir"); DOC_COUNT as usize
let schema = Schema::builder().build(); );
let index = Index::create_in_dir(&index_path, schema)?;
assert!(index.validate_checksum()?.is_empty());
Ok(())
} }
} }

View File

@@ -38,11 +38,11 @@ const LONG_SKIP_INTERVAL: u64 = (LONG_SKIP_IN_BLOCKS * COMPRESSION_BLOCK_SIZE) a
pub mod tests { pub mod tests {
use super::PositionSerializer; use super::PositionSerializer;
use crate::directory::ReadOnlySource;
use crate::positions::reader::PositionReader; use crate::positions::reader::PositionReader;
use crate::{common::HasLen, directory::FileSlice};
use std::iter; use std::iter;
fn create_stream_buffer(vals: &[u32]) -> (FileSlice, FileSlice) { fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) {
let mut skip_buffer = vec![]; let mut skip_buffer = vec![];
let mut stream_buffer = vec![]; let mut stream_buffer = vec![];
{ {
@@ -53,7 +53,10 @@ pub mod tests {
} }
serializer.close().unwrap(); serializer.close().unwrap();
} }
(FileSlice::from(stream_buffer), FileSlice::from(skip_buffer)) (
ReadOnlySource::from(stream_buffer),
ReadOnlySource::from(skip_buffer),
)
} }
#[test] #[test]
@@ -62,7 +65,7 @@ pub mod tests {
let (stream, skip) = create_stream_buffer(&v[..]); let (stream, skip) = create_stream_buffer(&v[..]);
assert_eq!(skip.len(), 12); assert_eq!(skip.len(), 12);
assert_eq!(stream.len(), 1168); assert_eq!(stream.len(), 1168);
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap(); let mut position_reader = PositionReader::new(stream, skip, 0u64);
for &n in &[1, 10, 127, 128, 130, 312] { for &n in &[1, 10, 127, 128, 130, 312] {
let mut v = vec![0u32; n]; let mut v = vec![0u32; n];
position_reader.read(0, &mut v[..]); position_reader.read(0, &mut v[..]);
@@ -78,7 +81,7 @@ pub mod tests {
let (stream, skip) = create_stream_buffer(&v[..]); let (stream, skip) = create_stream_buffer(&v[..]);
assert_eq!(skip.len(), 12); assert_eq!(skip.len(), 12);
assert_eq!(stream.len(), 1168); assert_eq!(stream.len(), 1168);
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap(); let mut position_reader = PositionReader::new(stream, skip, 0u64);
for &offset in &[1u64, 10u64, 127u64, 128u64, 130u64, 312u64] { for &offset in &[1u64, 10u64, 127u64, 128u64, 130u64, 312u64] {
for &len in &[1, 10, 130, 500] { for &len in &[1, 10, 130, 500] {
let mut v = vec![0u32; len]; let mut v = vec![0u32; len];
@@ -97,7 +100,7 @@ pub mod tests {
assert_eq!(skip.len(), 12); assert_eq!(skip.len(), 12);
assert_eq!(stream.len(), 1168); assert_eq!(stream.len(), 1168);
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap(); let mut position_reader = PositionReader::new(stream, skip, 0u64);
let mut buf = [0u32; 7]; let mut buf = [0u32; 7];
let mut c = 0; let mut c = 0;
@@ -119,7 +122,7 @@ pub mod tests {
let (stream, skip) = create_stream_buffer(&v[..]); let (stream, skip) = create_stream_buffer(&v[..]);
assert_eq!(skip.len(), 15_749); assert_eq!(skip.len(), 15_749);
assert_eq!(stream.len(), 4_987_872); assert_eq!(stream.len(), 4_987_872);
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap(); let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0);
let mut buf = [0u32; 256]; let mut buf = [0u32; 256];
position_reader.read(128, &mut buf); position_reader.read(128, &mut buf);
for i in 0..256 { for i in 0..256 {
@@ -139,8 +142,7 @@ pub mod tests {
assert_eq!(skip.len(), 15_749); assert_eq!(skip.len(), 15_749);
assert_eq!(stream.len(), 4_987_872); assert_eq!(stream.len(), 4_987_872);
let mut buf = [0u32; 1]; let mut buf = [0u32; 1];
let mut position_reader = let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 200_000);
PositionReader::new(stream.clone(), skip.clone(), 200_000).unwrap();
position_reader.read(230, &mut buf); position_reader.read(230, &mut buf);
position_reader.read(9, &mut buf); position_reader.read(9, &mut buf);
} }
@@ -155,7 +157,7 @@ pub mod tests {
} }
let (stream, skip) = create_stream_buffer(&v[..]); let (stream, skip) = create_stream_buffer(&v[..]);
let mut buf = Vec::new(); let mut buf = Vec::new();
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap(); let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0);
let mut offset = 0; let mut offset = 0;
for i in 1..24 { for i in 1..24 {
buf.resize(i, 0); buf.resize(i, 0);
@@ -173,7 +175,7 @@ pub mod tests {
let (stream, skip) = create_stream_buffer(&v[..]); let (stream, skip) = create_stream_buffer(&v[..]);
assert_eq!(skip.len(), 15_749); assert_eq!(skip.len(), 15_749);
assert_eq!(stream.len(), 1_000_000); assert_eq!(stream.len(), 1_000_000);
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024).unwrap(); let mut position_reader = PositionReader::new(stream, skip, 128 * 1024);
let mut buf = [0u32; 1]; let mut buf = [0u32; 1];
position_reader.read(0, &mut buf); position_reader.read(0, &mut buf);
assert_eq!(buf[0], CONST_VAL); assert_eq!(buf[0], CONST_VAL);
@@ -192,8 +194,7 @@ pub mod tests {
128 * 1024 + 7, 128 * 1024 + 7,
128 * 10 * 1024 + 10, 128 * 10 * 1024 + 10,
] { ] {
let mut position_reader = let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), offset);
PositionReader::new(stream.clone(), skip.clone(), offset).unwrap();
let mut buf = [0u32; 1]; let mut buf = [0u32; 1];
position_reader.read(0, &mut buf); position_reader.read(0, &mut buf);
assert_eq!(buf[0], offset as u32); assert_eq!(buf[0], offset as u32);

View File

@@ -1,13 +1,8 @@
use std::io;
use crate::common::{BinarySerializable, FixedSize}; use crate::common::{BinarySerializable, FixedSize};
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::OwnedBytes;
use crate::positions::COMPRESSION_BLOCK_SIZE; use crate::positions::COMPRESSION_BLOCK_SIZE;
use crate::positions::LONG_SKIP_INTERVAL; use crate::positions::LONG_SKIP_INTERVAL;
use crate::positions::LONG_SKIP_IN_BLOCKS; use crate::positions::LONG_SKIP_IN_BLOCKS;
use bitpacking::{BitPacker, BitPacker4x};
/// Positions works as a long sequence of compressed block. /// Positions works as a long sequence of compressed block.
/// All terms are chained one after the other. /// All terms are chained one after the other.
/// ///
@@ -28,28 +23,28 @@ use bitpacking::{BitPacker, BitPacker4x};
/// A given block obviously takes `(128 x num_bit_for_the_block / num_bits_in_a_byte)`, /// A given block obviously takes `(128 x num_bit_for_the_block / num_bits_in_a_byte)`,
/// so skipping a block without decompressing it is just a matter of advancing that many /// so skipping a block without decompressing it is just a matter of advancing that many
/// bytes. /// bytes.
use bitpacking::{BitPacker, BitPacker4x};
use owned_read::OwnedRead;
struct Positions { struct Positions {
bit_packer: BitPacker4x, bit_packer: BitPacker4x,
skip_file: FileSlice, skip_source: ReadOnlySource,
position_file: FileSlice, position_source: ReadOnlySource,
long_skip_data: OwnedBytes, long_skip_source: ReadOnlySource,
} }
impl Positions { impl Positions {
pub fn new(position_file: FileSlice, skip_file: FileSlice) -> io::Result<Positions> { pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
let (body, footer) = skip_file.split_from_end(u32::SIZE_IN_BYTES); let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES);
let footer_data = footer.read_bytes()?; let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
let num_long_skips = u32::deserialize(&mut footer_data.as_slice())?; let (skip_source, long_skip_source) =
let (skip_file, long_skip_file) =
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize)); body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
let long_skip_data = long_skip_file.read_bytes()?; Positions {
Ok(Positions {
bit_packer: BitPacker4x::new(), bit_packer: BitPacker4x::new(),
skip_file, skip_source,
long_skip_data, long_skip_source,
position_file, position_source,
}) }
} }
/// Returns the offset of the block associated to the given `long_skip_id`. /// Returns the offset of the block associated to the given `long_skip_id`.
@@ -59,23 +54,19 @@ impl Positions {
if long_skip_id == 0 { if long_skip_id == 0 {
return 0; return 0;
} }
let long_skip_slice = self.long_skip_data.as_slice(); let long_skip_slice = self.long_skip_source.as_slice();
let mut long_skip_blocks: &[u8] = &long_skip_slice[(long_skip_id - 1) * 8..][..8]; let mut long_skip_blocks: &[u8] = &long_skip_slice[(long_skip_id - 1) * 8..][..8];
u64::deserialize(&mut long_skip_blocks).expect("Index corrupted") u64::deserialize(&mut long_skip_blocks).expect("Index corrupted")
} }
fn reader(&self, offset: u64) -> io::Result<PositionReader> { fn reader(&self, offset: u64) -> PositionReader {
let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize; let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize;
let offset_num_bytes: u64 = self.long_skip(long_skip_id); let offset_num_bytes: u64 = self.long_skip(long_skip_id);
let position_read = self let mut position_read = OwnedRead::new(self.position_source.clone());
.position_file position_read.advance(offset_num_bytes as usize);
.slice_from(offset_num_bytes as usize) let mut skip_read = OwnedRead::new(self.skip_source.clone());
.read_bytes()?; skip_read.advance(long_skip_id * LONG_SKIP_IN_BLOCKS);
let skip_read = self PositionReader {
.skip_file
.slice_from(long_skip_id * LONG_SKIP_IN_BLOCKS)
.read_bytes()?;
Ok(PositionReader {
bit_packer: self.bit_packer, bit_packer: self.bit_packer,
skip_read, skip_read,
position_read, position_read,
@@ -83,14 +74,14 @@ impl Positions {
block_offset: std::i64::MAX as u64, block_offset: std::i64::MAX as u64,
anchor_offset: (long_skip_id as u64) * LONG_SKIP_INTERVAL, anchor_offset: (long_skip_id as u64) * LONG_SKIP_INTERVAL,
abs_offset: offset, abs_offset: offset,
}) }
} }
} }
#[derive(Clone)] #[derive(Clone)]
pub struct PositionReader { pub struct PositionReader {
skip_read: OwnedBytes, skip_read: OwnedRead,
position_read: OwnedBytes, position_read: OwnedRead,
bit_packer: BitPacker4x, bit_packer: BitPacker4x,
buffer: Box<[u32; COMPRESSION_BLOCK_SIZE]>, buffer: Box<[u32; COMPRESSION_BLOCK_SIZE]>,
@@ -102,12 +93,11 @@ pub struct PositionReader {
impl PositionReader { impl PositionReader {
pub fn new( pub fn new(
position_file: FileSlice, position_source: ReadOnlySource,
skip_file: FileSlice, skip_source: ReadOnlySource,
offset: u64, offset: u64,
) -> io::Result<PositionReader> { ) -> PositionReader {
let positions = Positions::new(position_file, skip_file)?; Positions::new(position_source, skip_source).reader(offset)
positions.reader(offset)
} }
fn advance_num_blocks(&mut self, num_blocks: usize) { fn advance_num_blocks(&mut self, num_blocks: usize) {
@@ -141,7 +131,7 @@ impl PositionReader {
self.advance_num_blocks(num_blocks_to_skip); self.advance_num_blocks(num_blocks_to_skip);
self.anchor_offset = offset - (offset % COMPRESSION_BLOCK_SIZE as u64); self.anchor_offset = offset - (offset % COMPRESSION_BLOCK_SIZE as u64);
self.block_offset = self.anchor_offset; self.block_offset = self.anchor_offset;
let num_bits = self.skip_read.as_slice()[0]; let num_bits = self.skip_read.get(0);
self.bit_packer self.bit_packer
.decompress(self.position_read.as_ref(), self.buffer.as_mut(), num_bits); .decompress(self.position_read.as_ref(), self.buffer.as_mut(), num_bits);
} else { } else {
@@ -151,7 +141,7 @@ impl PositionReader {
self.anchor_offset = self.block_offset; self.anchor_offset = self.block_offset;
} }
let mut num_bits = self.skip_read.as_slice()[0]; let mut num_bits = self.skip_read.get(0);
let mut position_data = self.position_read.as_ref(); let mut position_data = self.position_read.as_ref();
for i in 1.. { for i in 1.. {
@@ -165,7 +155,7 @@ impl PositionReader {
output = &mut output[remaining_in_block..]; output = &mut output[remaining_in_block..];
offset += remaining_in_block as u64; offset += remaining_in_block as u64;
position_data = &position_data[(num_bits as usize * COMPRESSION_BLOCK_SIZE / 8)..]; position_data = &position_data[(num_bits as usize * COMPRESSION_BLOCK_SIZE / 8)..];
num_bits = self.skip_read.as_slice()[i]; num_bits = self.skip_read.get(i);
self.bit_packer self.bit_packer
.decompress(position_data, self.buffer.as_mut(), num_bits); .decompress(position_data, self.buffer.as_mut(), num_bits);
self.block_offset += COMPRESSION_BLOCK_SIZE as u64; self.block_offset += COMPRESSION_BLOCK_SIZE as u64;

View File

@@ -1,8 +1,5 @@
use std::io;
use crate::common::{BinarySerializable, VInt}; use crate::common::{BinarySerializable, VInt};
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::OwnedBytes;
use crate::fieldnorm::FieldNormReader; use crate::fieldnorm::FieldNormReader;
use crate::postings::compression::{ use crate::postings::compression::{
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE, AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
@@ -37,7 +34,7 @@ pub struct BlockSegmentPostings {
doc_freq: u32, doc_freq: u32,
data: OwnedBytes, data: ReadOnlySource,
pub(crate) skip_reader: SkipReader, pub(crate) skip_reader: SkipReader,
} }
@@ -75,34 +72,37 @@ fn decode_vint_block(
fn split_into_skips_and_postings( fn split_into_skips_and_postings(
doc_freq: u32, doc_freq: u32,
mut bytes: OwnedBytes, data: ReadOnlySource,
) -> (Option<OwnedBytes>, OwnedBytes) { ) -> (Option<ReadOnlySource>, ReadOnlySource) {
if doc_freq < COMPRESSION_BLOCK_SIZE as u32 { if doc_freq < COMPRESSION_BLOCK_SIZE as u32 {
return (None, bytes); return (None, data);
} }
let skip_len = VInt::deserialize(&mut bytes).expect("Data corrupted").0 as usize; let mut data_byte_arr = data.as_slice();
let (skip_data, postings_data) = bytes.split(skip_len); let skip_len = VInt::deserialize(&mut data_byte_arr)
.expect("Data corrupted")
.0 as usize;
let vint_len = data.len() - data_byte_arr.len();
let (skip_data, postings_data) = data.slice_from(vint_len).split(skip_len);
(Some(skip_data), postings_data) (Some(skip_data), postings_data)
} }
impl BlockSegmentPostings { impl BlockSegmentPostings {
pub(crate) fn open( pub(crate) fn from_data(
doc_freq: u32, doc_freq: u32,
data: FileSlice, data: ReadOnlySource,
record_option: IndexRecordOption, record_option: IndexRecordOption,
requested_option: IndexRecordOption, requested_option: IndexRecordOption,
) -> io::Result<BlockSegmentPostings> { ) -> BlockSegmentPostings {
let freq_reading_option = match (record_option, requested_option) { let freq_reading_option = match (record_option, requested_option) {
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq, (IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
(_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq, (_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq,
(_, _) => FreqReadingOption::ReadFreq, (_, _) => FreqReadingOption::ReadFreq,
}; };
let (skip_data_opt, postings_data) = let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, data);
split_into_skips_and_postings(doc_freq, data.read_bytes()?);
let skip_reader = match skip_data_opt { let skip_reader = match skip_data_opt {
Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option), Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option),
None => SkipReader::new(OwnedBytes::empty(), doc_freq, record_option), None => SkipReader::new(ReadOnlySource::empty(), doc_freq, record_option),
}; };
let mut block_segment_postings = BlockSegmentPostings { let mut block_segment_postings = BlockSegmentPostings {
@@ -116,7 +116,7 @@ impl BlockSegmentPostings {
skip_reader, skip_reader,
}; };
block_segment_postings.load_block(); block_segment_postings.load_block();
Ok(block_segment_postings) block_segment_postings
} }
/// Returns the block_max_score for the current block. /// Returns the block_max_score for the current block.
@@ -172,15 +172,15 @@ impl BlockSegmentPostings {
// # Warning // # Warning
// //
// This does not reset the positions list. // This does not reset the positions list.
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedBytes) { pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: ReadOnlySource) {
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data); let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
self.data = postings_data; self.data = ReadOnlySource::new(postings_data);
self.block_max_score_cache = None; self.block_max_score_cache = None;
self.loaded_offset = std::usize::MAX; self.loaded_offset = std::usize::MAX;
if let Some(skip_data) = skip_data_opt { if let Some(skip_data) = skip_data_opt {
self.skip_reader.reset(skip_data, doc_freq); self.skip_reader.reset(skip_data, doc_freq);
} else { } else {
self.skip_reader.reset(OwnedBytes::empty(), doc_freq); self.skip_reader.reset(ReadOnlySource::empty(), doc_freq);
} }
self.doc_freq = doc_freq; self.doc_freq = doc_freq;
self.load_block(); self.load_block();
@@ -344,8 +344,8 @@ impl BlockSegmentPostings {
freq_reading_option: FreqReadingOption::NoFreq, freq_reading_option: FreqReadingOption::NoFreq,
block_max_score_cache: None, block_max_score_cache: None,
doc_freq: 0, doc_freq: 0,
data: OwnedBytes::empty(), data: ReadOnlySource::new(vec![]),
skip_reader: SkipReader::new(OwnedBytes::empty(), 0, IndexRecordOption::Basic), skip_reader: SkipReader::new(ReadOnlySource::new(vec![]), 0, IndexRecordOption::Basic),
} }
} }
} }
@@ -455,7 +455,7 @@ mod tests {
let int_field = schema_builder.add_u64_field("id", INDEXED); let int_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut last_doc = 0u32; let mut last_doc = 0u32;
for &doc in docs { for &doc in docs {
for _ in last_doc..doc { for _ in last_doc..doc {
@@ -467,12 +467,10 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(int_field).unwrap(); let inverted_index = segment_reader.inverted_index(int_field);
let term = Term::from_field_u64(int_field, 0u64); let term = Term::from_field_u64(int_field, 0u64);
let term_info = inverted_index.get_term_info(&term).unwrap(); let term_info = inverted_index.get_term_info(&term).unwrap();
inverted_index inverted_index.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
.unwrap()
} }
#[test] #[test]
@@ -493,38 +491,37 @@ mod tests {
} }
#[test] #[test]
fn test_reset_block_segment_postings() -> crate::Result<()> { fn test_reset_block_segment_postings() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let int_field = schema_builder.add_u64_field("id", INDEXED); let int_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// create two postings list, one containg even number, // create two postings list, one containg even number,
// the other containing odd numbers. // the other containing odd numbers.
for i in 0..6 { for i in 0..6 {
let doc = doc!(int_field=> (i % 2) as u64); let doc = doc!(int_field=> (i % 2) as u64);
index_writer.add_document(doc); index_writer.add_document(doc);
} }
index_writer.commit()?; index_writer.commit().unwrap();
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let mut block_segments; let mut block_segments;
{ {
let term = Term::from_field_u64(int_field, 0u64); let term = Term::from_field_u64(int_field, 0u64);
let inverted_index = segment_reader.inverted_index(int_field)?; let inverted_index = segment_reader.inverted_index(int_field);
let term_info = inverted_index.get_term_info(&term).unwrap(); let term_info = inverted_index.get_term_info(&term).unwrap();
block_segments = inverted_index block_segments = inverted_index
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)?; .read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic);
} }
assert_eq!(block_segments.docs(), &[0, 2, 4]); assert_eq!(block_segments.docs(), &[0, 2, 4]);
{ {
let term = Term::from_field_u64(int_field, 1u64); let term = Term::from_field_u64(int_field, 1u64);
let inverted_index = segment_reader.inverted_index(int_field)?; let inverted_index = segment_reader.inverted_index(int_field);
let term_info = inverted_index.get_term_info(&term).unwrap(); let term_info = inverted_index.get_term_info(&term).unwrap();
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments)?; inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments);
} }
assert_eq!(block_segments.docs(), &[1, 3, 5]); assert_eq!(block_segments.docs(), &[1, 3, 5]);
Ok(())
} }
} }

View File

@@ -310,7 +310,6 @@ pub mod tests {
mod bench { mod bench {
use super::*; use super::*;
use crate::TERMINATED;
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::Rng; use rand::Rng;
use rand::SeedableRng; use rand::SeedableRng;
@@ -341,7 +340,7 @@ mod bench {
let mut encoder = BlockEncoder::new(); let mut encoder = BlockEncoder::new();
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1); let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32); let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
let mut decoder = BlockDecoder::default(); let mut decoder = BlockDecoder::new();
b.iter(|| { b.iter(|| {
decoder.uncompress_block_sorted(compressed, 0u32, num_bits); decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
}); });
@@ -376,9 +375,9 @@ mod bench {
let mut encoder = BlockEncoder::new(); let mut encoder = BlockEncoder::new();
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001); let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
let compressed = encoder.compress_vint_sorted(&data, 0u32); let compressed = encoder.compress_vint_sorted(&data, 0u32);
let mut decoder = BlockDecoder::default(); let mut decoder = BlockDecoder::new();
b.iter(|| { b.iter(|| {
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT, TERMINATED); decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT);
}); });
} }
} }

View File

@@ -28,6 +28,8 @@ pub use self::segment_postings::SegmentPostings;
pub(crate) use self::stacker::compute_table_size; pub(crate) use self::stacker::compute_table_size;
pub use crate::common::HasLen;
pub(crate) type UnorderedTermId = u64; pub(crate) type UnorderedTermId = u64;
#[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))] #[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))]
@@ -40,8 +42,8 @@ pub(crate) enum FreqReadingOption {
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::InvertedIndexSerializer;
use super::Postings; use super::*;
use crate::core::Index; use crate::core::Index;
use crate::core::SegmentComponent; use crate::core::SegmentComponent;
use crate::core::SegmentReader; use crate::core::SegmentReader;
@@ -56,7 +58,6 @@ pub mod tests {
use crate::schema::{IndexRecordOption, TextFieldIndexing}; use crate::schema::{IndexRecordOption, TextFieldIndexing};
use crate::tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN}; use crate::tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN};
use crate::DocId; use crate::DocId;
use crate::HasLen;
use crate::Score; use crate::Score;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use rand::rngs::StdRng; use rand::rngs::StdRng;
@@ -90,7 +91,7 @@ pub mod tests {
let title = schema_builder.add_text_field("title", TEXT); let title = schema_builder.add_text_field("title", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 30_000_000)?;
index_writer.add_document(doc!(title => r#"abc abc abc"#)); index_writer.add_document(doc!(title => r#"abc abc abc"#));
index_writer.add_document(doc!(title => r#"abc be be be be abc"#)); index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
for _ in 0..1_000 { for _ in 0..1_000 {
@@ -100,12 +101,12 @@ pub mod tests {
index_writer.commit()?; index_writer.commit()?;
let searcher = index.reader()?.searcher(); let searcher = index.reader()?.searcher();
let inverted_index = searcher.segment_reader(0u32).inverted_index(title)?; let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
let term = Term::from_field_text(title, "abc"); let term = Term::from_field_text(title, "abc");
let mut positions = Vec::new(); let mut positions = Vec::new();
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
postings.positions(&mut positions); postings.positions(&mut positions);
@@ -119,7 +120,7 @@ pub mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
assert_eq!(postings.advance(), 1); assert_eq!(postings.advance(), 1);
@@ -128,7 +129,7 @@ pub mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.seek(1), 1); assert_eq!(postings.seek(1), 1);
assert_eq!(postings.doc(), 1); assert_eq!(postings.doc(), 1);
@@ -137,7 +138,7 @@ pub mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.seek(1002), 1002); assert_eq!(postings.seek(1002), 1002);
assert_eq!(postings.doc(), 1002); assert_eq!(postings.doc(), 1002);
@@ -146,7 +147,7 @@ pub mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.seek(100), 100); assert_eq!(postings.seek(100), 100);
assert_eq!(postings.seek(1002), 1002); assert_eq!(postings.seek(1002), 1002);
@@ -158,7 +159,7 @@ pub mod tests {
} }
#[test] #[test]
pub fn test_drop_token_that_are_too_long() -> crate::Result<()> { pub fn test_drop_token_that_are_too_long() {
let ok_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN).collect(); let ok_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN).collect();
let mut exceeding_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN + 1).collect(); let mut exceeding_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN + 1).collect();
exceeding_token_text.push_str(" hello"); exceeding_token_text.push_str(" hello");
@@ -175,7 +176,7 @@ pub mod tests {
.tokenizers() .tokenizers()
.register("simple_no_truncation", SimpleTokenizer); .register("simple_no_truncation", SimpleTokenizer);
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy)); index_writer.set_merge_policy(Box::new(NoMergePolicy));
{ {
index_writer.add_document(doc!(text_field=>exceeding_token_text)); index_writer.add_document(doc!(text_field=>exceeding_token_text));
@@ -183,7 +184,7 @@ pub mod tests {
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0u32); let segment_reader = searcher.segment_reader(0u32);
let inverted_index = segment_reader.inverted_index(text_field)?; let inverted_index = segment_reader.inverted_index(text_field);
assert_eq!(inverted_index.terms().num_terms(), 1); assert_eq!(inverted_index.terms().num_terms(), 1);
let mut bytes = vec![]; let mut bytes = vec![];
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)); assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
@@ -195,17 +196,16 @@ pub mod tests {
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(1u32); let segment_reader = searcher.segment_reader(1u32);
let inverted_index = segment_reader.inverted_index(text_field)?; let inverted_index = segment_reader.inverted_index(text_field);
assert_eq!(inverted_index.terms().num_terms(), 1); assert_eq!(inverted_index.terms().num_terms(), 1);
let mut bytes = vec![]; let mut bytes = vec![];
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)); assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
assert_eq!(&bytes[..], ok_token_text.as_bytes()); assert_eq!(&bytes[..], ok_token_text.as_bytes());
} }
Ok(())
} }
#[test] #[test]
pub fn test_position_and_fieldnorm1() -> crate::Result<()> { pub fn test_position_and_fieldnorm1() {
let mut positions = Vec::new(); let mut positions = Vec::new();
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
@@ -217,38 +217,42 @@ pub mod tests {
let mut segment_writer = let mut segment_writer =
SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap(); SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap();
{ {
let mut doc = Document::default();
// checking that position works if the field has two values // checking that position works if the field has two values
doc.add_text(text_field, "a b a c a d a a.");
doc.add_text(text_field, "d d d d a");
let op = AddOperation { let op = AddOperation {
opstamp: 0u64, opstamp: 0u64,
document: doc!( document: doc,
text_field => "a b a c a d a a.",
text_field => "d d d d a"
),
}; };
segment_writer.add_document(op, &schema)?; segment_writer.add_document(op, &schema).unwrap();
} }
{ {
let mut doc = Document::default();
doc.add_text(text_field, "b a");
let op = AddOperation { let op = AddOperation {
opstamp: 1u64, opstamp: 1u64,
document: doc!(text_field => "b a"), document: doc,
}; };
segment_writer.add_document(op, &schema).unwrap(); segment_writer.add_document(op, &schema).unwrap();
} }
for i in 2..1000 { for i in 2..1000 {
let mut text: String = iter::repeat("e ").take(i).collect(); let mut doc = Document::default();
let mut text = iter::repeat("e ").take(i).collect::<String>();
text.push_str(" a"); text.push_str(" a");
doc.add_text(text_field, &text);
let op = AddOperation { let op = AddOperation {
opstamp: 2u64, opstamp: 2u64,
document: doc!(text_field => text), document: doc,
}; };
segment_writer.add_document(op, &schema).unwrap(); segment_writer.add_document(op, &schema).unwrap();
} }
segment_writer.finalize()?; segment_writer.finalize().unwrap();
} }
{ {
let segment_reader = SegmentReader::open(&segment)?; let segment_reader = SegmentReader::open(&segment).unwrap();
{ {
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field)?; let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field);
assert_eq!(fieldnorm_reader.fieldnorm(0), 8 + 5); assert_eq!(fieldnorm_reader.fieldnorm(0), 8 + 5);
assert_eq!(fieldnorm_reader.fieldnorm(1), 2); assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
for i in 2..1000 { for i in 2..1000 {
@@ -261,15 +265,15 @@ pub mod tests {
{ {
let term_a = Term::from_field_text(text_field, "abcdef"); let term_a = Term::from_field_text(text_field, "abcdef");
assert!(segment_reader assert!(segment_reader
.inverted_index(term_a.field())? .inverted_index(term_a.field())
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
} }
{ {
let term_a = Term::from_field_text(text_field, "a"); let term_a = Term::from_field_text(text_field, "a");
let mut postings_a = segment_reader let mut postings_a = segment_reader
.inverted_index(term_a.field())? .inverted_index(term_a.field())
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings_a.len(), 1000); assert_eq!(postings_a.len(), 1000);
assert_eq!(postings_a.doc(), 0); assert_eq!(postings_a.doc(), 0);
@@ -291,8 +295,8 @@ pub mod tests {
{ {
let term_e = Term::from_field_text(text_field, "e"); let term_e = Term::from_field_text(text_field, "e");
let mut postings_e = segment_reader let mut postings_e = segment_reader
.inverted_index(term_e.field())? .inverted_index(term_e.field())
.read_postings(&term_e, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_e, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings_e.len(), 1000 - 2); assert_eq!(postings_e.len(), 1000 - 2);
for i in 2u32..1000u32 { for i in 2u32..1000u32 {
@@ -308,18 +312,17 @@ pub mod tests {
assert_eq!(postings_e.doc(), TERMINATED); assert_eq!(postings_e.doc(), TERMINATED);
} }
} }
Ok(())
} }
#[test] #[test]
pub fn test_position_and_fieldnorm2() -> crate::Result<()> { pub fn test_position_and_fieldnorm2() {
let mut positions: Vec<u32> = Vec::new(); let mut positions: Vec<u32> = Vec::new();
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "g b b d c g c")); index_writer.add_document(doc!(text_field => "g b b d c g c"));
index_writer.add_document(doc!(text_field => "g a b b a d c g c")); index_writer.add_document(doc!(text_field => "g a b b a d c g c"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
@@ -328,17 +331,16 @@ pub mod tests {
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let mut postings = segment_reader let mut postings = segment_reader
.inverted_index(text_field)? .inverted_index(text_field)
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 1u32); assert_eq!(postings.doc(), 1u32);
postings.positions(&mut positions); postings.positions(&mut positions);
assert_eq!(&positions[..], &[1u32, 4]); assert_eq!(&positions[..], &[1u32, 4]);
Ok(())
} }
#[test] #[test]
fn test_skip_next() -> crate::Result<()> { fn test_skip_next() {
let term_0 = Term::from_field_u64(Field::from_field_id(0), 0); let term_0 = Term::from_field_u64(Field::from_field_id(0), 0);
let term_1 = Term::from_field_u64(Field::from_field_id(0), 1); let term_1 = Term::from_field_u64(Field::from_field_id(0), 1);
let term_2 = Term::from_field_u64(Field::from_field_id(0), 2); let term_2 = Term::from_field_u64(Field::from_field_id(0), 2);
@@ -349,9 +351,10 @@ pub mod tests {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_u64_field("value", INDEXED); let value_field = schema_builder.add_u64_field("value", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for i in 0u64..num_docs as u64 { for i in 0u64..num_docs as u64 {
let doc = doc!(value_field => 2u64, value_field => i % 2u64); let doc = doc!(value_field => 2u64, value_field => i % 2u64);
index_writer.add_document(doc); index_writer.add_document(doc);
@@ -360,15 +363,15 @@ pub mod tests {
} }
index index
}; };
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
// check that the basic usage works // check that the basic usage works
for i in 0..num_docs - 1 { for i in 0..num_docs - 1 {
for j in i + 1..num_docs { for j in i + 1..num_docs {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_2.field())? .inverted_index(term_2.field())
.read_postings(&term_2, IndexRecordOption::Basic)? .read_postings(&term_2, IndexRecordOption::Basic)
.unwrap(); .unwrap();
assert_eq!(segment_postings.seek(i), i); assert_eq!(segment_postings.seek(i), i);
assert_eq!(segment_postings.doc(), i); assert_eq!(segment_postings.doc(), i);
@@ -380,8 +383,8 @@ pub mod tests {
{ {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_2.field())? .inverted_index(term_2.field())
.read_postings(&term_2, IndexRecordOption::Basic)? .read_postings(&term_2, IndexRecordOption::Basic)
.unwrap(); .unwrap();
// check that `skip_next` advances the iterator // check that `skip_next` advances the iterator
@@ -400,8 +403,8 @@ pub mod tests {
// check that filtering works // check that filtering works
{ {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_0.field())? .inverted_index(term_0.field())
.read_postings(&term_0, IndexRecordOption::Basic)? .read_postings(&term_0, IndexRecordOption::Basic)
.unwrap(); .unwrap();
for i in 0..num_docs / 2 { for i in 0..num_docs / 2 {
@@ -410,8 +413,8 @@ pub mod tests {
} }
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_0.field())? .inverted_index(term_0.field())
.read_postings(&term_0, IndexRecordOption::Basic)? .read_postings(&term_0, IndexRecordOption::Basic)
.unwrap(); .unwrap();
for i in 0..num_docs / 2 - 1 { for i in 0..num_docs / 2 - 1 {
@@ -422,19 +425,19 @@ pub mod tests {
// delete some of the documents // delete some of the documents
{ {
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.delete_term(term_0); index_writer.delete_term(term_0);
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
// make sure seeking still works // make sure seeking still works
for i in 0..num_docs { for i in 0..num_docs {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_2.field())? .inverted_index(term_2.field())
.read_postings(&term_2, IndexRecordOption::Basic)? .read_postings(&term_2, IndexRecordOption::Basic)
.unwrap(); .unwrap();
if i % 2 == 0 { if i % 2 == 0 {
@@ -450,8 +453,8 @@ pub mod tests {
// now try with a longer sequence // now try with a longer sequence
{ {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_2.field())? .inverted_index(term_2.field())
.read_postings(&term_2, IndexRecordOption::Basic)? .read_postings(&term_2, IndexRecordOption::Basic)
.unwrap(); .unwrap();
let mut last = 2; // start from 5 to avoid seeking to 3 twice let mut last = 2; // start from 5 to avoid seeking to 3 twice
@@ -476,19 +479,20 @@ pub mod tests {
// delete everything else // delete everything else
{ {
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.delete_term(term_1); index_writer.delete_term(term_1);
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
// finally, check that it's empty // finally, check that it's empty
{ {
let searchable_segment_ids = index.searchable_segment_ids()?; let searchable_segment_ids = index
.searchable_segment_ids()
.expect("could not get index segment ids");
assert!(searchable_segment_ids.is_empty()); assert!(searchable_segment_ids.is_empty());
assert_eq!(searcher.num_docs(), 0); assert_eq!(searcher.num_docs(), 0);
} }
Ok(())
} }
pub static TERM_A: Lazy<Term> = Lazy::new(|| { pub static TERM_A: Lazy<Term> = Lazy::new(|| {
@@ -518,7 +522,7 @@ pub mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let posting_list_size = 1_000_000; let posting_list_size = 1_000_000;
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for _ in 0..posting_list_size { for _ in 0..posting_list_size {
let mut doc = Document::default(); let mut doc = Document::default();
if rng.gen_bool(1f64 / 15f64) { if rng.gen_bool(1f64 / 15f64) {
@@ -620,7 +624,7 @@ mod bench {
b.iter(|| { b.iter(|| {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(TERM_A.field()) .inverted_index(TERM_A.field())
.read_postings(&*TERM_A, IndexRecordOption::Basic)? .read_postings(&*TERM_A, IndexRecordOption::Basic)
.unwrap(); .unwrap();
while segment_postings.advance() != TERMINATED {} while segment_postings.advance() != TERMINATED {}
}); });
@@ -635,22 +639,18 @@ mod bench {
let segment_postings_a = segment_reader let segment_postings_a = segment_reader
.inverted_index(TERM_A.field()) .inverted_index(TERM_A.field())
.read_postings(&*TERM_A, IndexRecordOption::Basic) .read_postings(&*TERM_A, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
let segment_postings_b = segment_reader let segment_postings_b = segment_reader
.inverted_index(TERM_B.field()) .inverted_index(TERM_B.field())
.read_postings(&*TERM_B, IndexRecordOption::Basic) .read_postings(&*TERM_B, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
let segment_postings_c = segment_reader let segment_postings_c = segment_reader
.inverted_index(TERM_C.field()) .inverted_index(TERM_C.field())
.read_postings(&*TERM_C, IndexRecordOption::Basic) .read_postings(&*TERM_C, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
let segment_postings_d = segment_reader let segment_postings_d = segment_reader
.inverted_index(TERM_D.field()) .inverted_index(TERM_D.field())
.read_postings(&*TERM_D, IndexRecordOption::Basic) .read_postings(&*TERM_D, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
let mut intersection = Intersection::new(vec![ let mut intersection = Intersection::new(vec![
segment_postings_a, segment_postings_a,
@@ -671,7 +671,6 @@ mod bench {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(TERM_A.field()) .inverted_index(TERM_A.field())
.read_postings(&*TERM_A, IndexRecordOption::Basic) .read_postings(&*TERM_A, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
let mut existing_docs = Vec::new(); let mut existing_docs = Vec::new();
@@ -731,7 +730,7 @@ mod bench {
let mut s = 0u32; let mut s = 0u32;
while segment_postings.doc() != TERMINATED { while segment_postings.doc() != TERMINATED {
s += (segment_postings.doc() & n) % 1024; s += (segment_postings.doc() & n) % 1024;
segment_postings.advance(); segment_postings.advance()
} }
s s
}); });

View File

@@ -38,8 +38,12 @@ fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<dyn PostingsWriter>
| FieldType::I64(_) | FieldType::I64(_)
| FieldType::F64(_) | FieldType::F64(_)
| FieldType::Date(_) | FieldType::Date(_)
| FieldType::Bytes(_)
| FieldType::HierarchicalFacet => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(), | FieldType::HierarchicalFacet => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(),
FieldType::Bytes => {
// FieldType::Bytes cannot actually be indexed.
// TODO fix during the indexer refactoring described in #276
SpecializedPostingsWriter::<NothingRecorder>::new_boxed()
}
} }
} }
@@ -101,7 +105,6 @@ impl MultiFieldPostingsWriter {
doc: DocId, doc: DocId,
field: Field, field: Field,
token_stream: &mut dyn TokenStream, token_stream: &mut dyn TokenStream,
term_buffer: &mut Term,
) -> u32 { ) -> u32 {
let postings_writer = let postings_writer =
self.per_field_postings_writers[field.field_id() as usize].deref_mut(); self.per_field_postings_writers[field.field_id() as usize].deref_mut();
@@ -111,7 +114,6 @@ impl MultiFieldPostingsWriter {
field, field,
token_stream, token_stream,
&mut self.heap, &mut self.heap,
term_buffer,
) )
} }
@@ -157,12 +159,11 @@ impl MultiFieldPostingsWriter {
unordered_term_mappings.insert(field, mapping); unordered_term_mappings.insert(field, mapping);
} }
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {} FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {}
FieldType::Bytes(_) => {} FieldType::Bytes => {}
} }
let postings_writer = let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
self.per_field_postings_writers[field.field_id() as usize].as_ref(); let fieldnorm_reader = fieldnorm_readers.get_field(field);
let fieldnorm_reader = fieldnorm_readers.get_field(field)?;
let mut field_serializer = serializer.new_field( let mut field_serializer = serializer.new_field(
field, field,
postings_writer.total_num_tokens(), postings_writer.total_num_tokens(),
@@ -219,20 +220,13 @@ pub trait PostingsWriter {
field: Field, field: Field,
token_stream: &mut dyn TokenStream, token_stream: &mut dyn TokenStream,
heap: &mut MemoryArena, heap: &mut MemoryArena,
term_buffer: &mut Term,
) -> u32 { ) -> u32 {
term_buffer.set_field(field); let mut term = Term::for_field(field);
let mut sink = |token: &Token| { let mut sink = |token: &Token| {
// We skip all tokens with a len greater than u16. // We skip all tokens with a len greater than u16.
if token.text.len() <= MAX_TOKEN_LEN { if token.text.len() <= MAX_TOKEN_LEN {
term_buffer.set_text(token.text.as_str()); term.set_text(token.text.as_str());
self.subscribe( self.subscribe(term_index, doc_id, token.position as u32, &term, heap);
term_index,
doc_id,
token.position as u32,
&term_buffer,
heap,
);
} else { } else {
info!( info!(
"A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \ "A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \

View File

@@ -1,16 +1,21 @@
use crate::common::HasLen; use crate::common::HasLen;
use crate::directory::FileSlice;
use crate::docset::DocSet; use crate::docset::DocSet;
use crate::fastfield::DeleteBitSet;
use crate::positions::PositionReader; use crate::positions::PositionReader;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE; use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::postings::serializer::PostingsSerializer; use crate::postings::serializer::PostingsSerializer;
use crate::postings::BlockSearcher; use crate::postings::BlockSearcher;
use crate::postings::BlockSegmentPostings;
use crate::postings::Postings; use crate::postings::Postings;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::{DocId, TERMINATED}; use crate::{DocId, TERMINATED};
use crate::directory::ReadOnlySource;
use crate::fastfield::DeleteBitSet;
use crate::postings::BlockSegmentPostings;
/// `SegmentPostings` represents the inverted list or postings associated to /// `SegmentPostings` represents the inverted list or postings associated to
/// a term in a `Segment`. /// a term in a `Segment`.
/// ///
@@ -72,7 +77,7 @@ impl SegmentPostings {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
{ {
let mut postings_serializer = let mut postings_serializer =
PostingsSerializer::new(&mut buffer, 0.0, IndexRecordOption::Basic, None); PostingsSerializer::new(&mut buffer, 0.0, false, false, None);
postings_serializer.new_term(docs.len() as u32); postings_serializer.new_term(docs.len() as u32);
for &doc in docs { for &doc in docs {
postings_serializer.write_doc(doc, 1u32); postings_serializer.write_doc(doc, 1u32);
@@ -81,13 +86,12 @@ impl SegmentPostings {
.close_term(docs.len() as u32) .close_term(docs.len() as u32)
.expect("In memory Serialization should never fail."); .expect("In memory Serialization should never fail.");
} }
let block_segment_postings = BlockSegmentPostings::open( let block_segment_postings = BlockSegmentPostings::from_data(
docs.len() as u32, docs.len() as u32,
FileSlice::from(buffer), ReadOnlySource::from(buffer),
IndexRecordOption::Basic, IndexRecordOption::Basic,
IndexRecordOption::Basic, IndexRecordOption::Basic,
) );
.unwrap();
SegmentPostings::from_block_postings(block_segment_postings, None) SegmentPostings::from_block_postings(block_segment_postings, None)
} }
@@ -110,13 +114,14 @@ impl SegmentPostings {
.iter() .iter()
.map(|&fieldnorm| fieldnorm as u64) .map(|&fieldnorm| fieldnorm as u64)
.sum::<u64>(); .sum::<u64>();
total_num_tokens as Score / fieldnorms.len() as Score total_num_tokens as Score / fieldnorms.len() as f32
}) })
.unwrap_or(0.0); .unwrap_or(0.0);
let mut postings_serializer = PostingsSerializer::new( let mut postings_serializer = PostingsSerializer::new(
&mut buffer, &mut buffer,
average_field_norm, average_field_norm,
IndexRecordOption::WithFreqs, true,
false,
fieldnorm_reader, fieldnorm_reader,
); );
postings_serializer.new_term(doc_and_tfs.len() as u32); postings_serializer.new_term(doc_and_tfs.len() as u32);
@@ -126,13 +131,12 @@ impl SegmentPostings {
postings_serializer postings_serializer
.close_term(doc_and_tfs.len() as u32) .close_term(doc_and_tfs.len() as u32)
.unwrap(); .unwrap();
let block_segment_postings = BlockSegmentPostings::open( let block_segment_postings = BlockSegmentPostings::from_data(
doc_and_tfs.len() as u32, doc_and_tfs.len() as u32,
FileSlice::from(buffer), ReadOnlySource::from(buffer),
IndexRecordOption::WithFreqs, IndexRecordOption::WithFreqs,
IndexRecordOption::WithFreqs, IndexRecordOption::WithFreqs,
) );
.unwrap();
SegmentPostings::from_block_postings(block_segment_postings, None) SegmentPostings::from_block_postings(block_segment_postings, None)
} }
@@ -200,7 +204,7 @@ impl DocSet for SegmentPostings {
} }
/// Return the current document's `DocId`. /// Return the current document's `DocId`.
#[inline(always)] #[inline]
fn doc(&self) -> DocId { fn doc(&self) -> DocId {
self.block_cursor.doc(self.cur) self.block_cursor.doc(self.cur)
} }

View File

@@ -8,8 +8,8 @@ use crate::positions::PositionSerializer;
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE}; use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
use crate::postings::skip::SkipSerializer; use crate::postings::skip::SkipSerializer;
use crate::query::BM25Weight; use crate::query::BM25Weight;
use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType}; use crate::schema::{Field, FieldEntry, FieldType};
use crate::schema::{IndexRecordOption, Schema};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal}; use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::{DocId, Score}; use crate::{DocId, Score};
use std::cmp::Ordering; use std::cmp::Ordering;
@@ -143,24 +143,30 @@ impl<'a> FieldSerializer<'a> {
fieldnorm_reader: Option<FieldNormReader>, fieldnorm_reader: Option<FieldNormReader>,
) -> io::Result<FieldSerializer<'a>> { ) -> io::Result<FieldSerializer<'a>> {
total_num_tokens.serialize(postings_write)?; total_num_tokens.serialize(postings_write)?;
let mode = match field_type { let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
FieldType::Str(ref text_options) => { FieldType::Str(ref text_options) => {
if let Some(text_indexing_options) = text_options.get_indexing_options() { if let Some(text_indexing_options) = text_options.get_indexing_options() {
text_indexing_options.index_option() let index_option = text_indexing_options.index_option();
(index_option.has_freq(), index_option.has_positions())
} else { } else {
IndexRecordOption::Basic (false, false)
} }
} }
_ => IndexRecordOption::Basic, _ => (false, false),
}; };
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?; let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
let average_fieldnorm = fieldnorm_reader let average_fieldnorm = fieldnorm_reader
.as_ref() .as_ref()
.map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score)) .map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score))
.unwrap_or(0.0); .unwrap_or(0.0);
let postings_serializer = let postings_serializer = PostingsSerializer::new(
PostingsSerializer::new(postings_write, average_fieldnorm, mode, fieldnorm_reader); postings_write,
let positions_serializer_opt = if mode.has_positions() { average_fieldnorm,
term_freq_enabled,
position_enabled,
fieldnorm_reader,
);
let positions_serializer_opt = if position_enabled {
Some(PositionSerializer::new(positions_write, positionsidx_write)) Some(PositionSerializer::new(positions_write, positionsidx_write))
} else { } else {
None None
@@ -317,22 +323,29 @@ pub struct PostingsSerializer<W: Write> {
postings_write: Vec<u8>, postings_write: Vec<u8>,
skip_write: SkipSerializer, skip_write: SkipSerializer,
mode: IndexRecordOption, termfreq_enabled: bool,
termfreq_sum_enabled: bool,
fieldnorm_reader: Option<FieldNormReader>, fieldnorm_reader: Option<FieldNormReader>,
bm25_weight: Option<BM25Weight>, bm25_weight: Option<BM25Weight>,
num_docs: u32, // Number of docs in the segment
avg_fieldnorm: Score, // Average number of term in the field for that segment. avg_fieldnorm: Score, // Average number of term in the field for that segment.
// this value is used to compute the block wand information. // this value is used to compute the block wand information.
} }
impl<W: Write> PostingsSerializer<W> { impl<W: Write> PostingsSerializer<W> {
pub fn new( pub fn new(
write: W, write: W,
avg_fieldnorm: Score, avg_fieldnorm: Score,
mode: IndexRecordOption, termfreq_enabled: bool,
termfreq_sum_enabled: bool,
fieldnorm_reader: Option<FieldNormReader>, fieldnorm_reader: Option<FieldNormReader>,
) -> PostingsSerializer<W> { ) -> PostingsSerializer<W> {
let num_docs = fieldnorm_reader
.as_ref()
.map(|fieldnorm_reader| fieldnorm_reader.num_docs())
.unwrap_or(0u32);
PostingsSerializer { PostingsSerializer {
output_write: CountingWriter::wrap(write), output_write: CountingWriter::wrap(write),
@@ -343,30 +356,26 @@ impl<W: Write> PostingsSerializer<W> {
skip_write: SkipSerializer::new(), skip_write: SkipSerializer::new(),
last_doc_id_encoded: 0u32, last_doc_id_encoded: 0u32,
mode, termfreq_enabled,
termfreq_sum_enabled,
fieldnorm_reader, fieldnorm_reader,
bm25_weight: None, bm25_weight: None,
num_docs,
avg_fieldnorm, avg_fieldnorm,
} }
} }
/// Returns the number of documents in the segment currently being serialized.
/// This function may return `None` if there are no fieldnorm for that field.
fn num_docs_in_segment(&self) -> Option<u32> {
self.fieldnorm_reader
.as_ref()
.map(|reader| reader.num_docs())
}
pub fn new_term(&mut self, term_doc_freq: u32) { pub fn new_term(&mut self, term_doc_freq: u32) {
if self.mode.has_freq() { if self.termfreq_enabled && self.num_docs > 0 {
return; let bm25_weight = BM25Weight::for_one_term(
term_doc_freq as u64,
self.num_docs as u64,
self.avg_fieldnorm,
);
self.bm25_weight = Some(bm25_weight);
} }
self.bm25_weight = self.num_docs_in_segment().map(|num_docs| {
BM25Weight::for_one_term(term_doc_freq as u64, num_docs as u64, self.avg_fieldnorm)
});
} }
fn write_block(&mut self) { fn write_block(&mut self) {
@@ -381,15 +390,13 @@ impl<W: Write> PostingsSerializer<W> {
// last el block 0, offset block 1, // last el block 0, offset block 1,
self.postings_write.extend(block_encoded); self.postings_write.extend(block_encoded);
} }
if self.mode.has_freq() { if self.termfreq_enabled {
let (num_bits, block_encoded): (u8, &[u8]) = self let (num_bits, block_encoded): (u8, &[u8]) = self
.block_encoder .block_encoder
.compress_block_unsorted(&self.block.term_freqs()); .compress_block_unsorted(&self.block.term_freqs());
self.postings_write.extend(block_encoded); self.postings_write.extend(block_encoded);
self.skip_write.write_term_freq(num_bits); self.skip_write.write_term_freq(num_bits);
if self.mode.has_positions() { if self.termfreq_sum_enabled {
// We serialize the sum of term freqs within the skip information
// in order to navigate through positions.
let sum_freq = self.block.term_freqs().iter().cloned().sum(); let sum_freq = self.block.term_freqs().iter().cloned().sum();
self.skip_write.write_total_term_freq(sum_freq); self.skip_write.write_total_term_freq(sum_freq);
} }
@@ -448,7 +455,7 @@ impl<W: Write> PostingsSerializer<W> {
self.postings_write.write_all(block_encoded)?; self.postings_write.write_all(block_encoded)?;
} }
// ... Idem for term frequencies // ... Idem for term frequencies
if self.mode.has_freq() { if self.termfreq_enabled {
let block_encoded = self let block_encoded = self
.block_encoder .block_encoder
.compress_vint_unsorted(self.block.term_freqs()); .compress_vint_unsorted(self.block.term_freqs());

View File

@@ -1,9 +1,10 @@
use crate::common::{read_u32_vint_no_advance, serialize_vint_u32, BinarySerializable}; use crate::common::{read_u32_vint_no_advance, serialize_vint_u32, BinarySerializable, VInt};
use crate::directory::OwnedBytes; use crate::directory::ReadOnlySource;
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE}; use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
use crate::query::BM25Weight; use crate::query::BM25Weight;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::{DocId, Score, TERMINATED}; use crate::{DocId, Score, TERMINATED};
use owned_read::OwnedRead;
pub struct SkipSerializer { pub struct SkipSerializer {
buffer: Vec<u8>, buffer: Vec<u8>,
@@ -61,7 +62,7 @@ impl SkipSerializer {
pub(crate) struct SkipReader { pub(crate) struct SkipReader {
last_doc_in_block: DocId, last_doc_in_block: DocId,
pub(crate) last_doc_in_previous_block: DocId, pub(crate) last_doc_in_previous_block: DocId,
owned_read: OwnedBytes, owned_read: OwnedRead,
skip_info: IndexRecordOption, skip_info: IndexRecordOption,
byte_offset: usize, byte_offset: usize,
remaining_docs: u32, // number of docs remaining, including the remaining_docs: u32, // number of docs remaining, including the
@@ -92,7 +93,7 @@ impl Default for BlockInfo {
} }
impl SkipReader { impl SkipReader {
pub fn new(data: OwnedBytes, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader { pub fn new(data: ReadOnlySource, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
let mut skip_reader = SkipReader { let mut skip_reader = SkipReader {
last_doc_in_block: if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 { last_doc_in_block: if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
0 0
@@ -100,7 +101,7 @@ impl SkipReader {
TERMINATED TERMINATED
}, },
last_doc_in_previous_block: 0u32, last_doc_in_previous_block: 0u32,
owned_read: data, owned_read: OwnedRead::new(data),
skip_info, skip_info,
block_info: BlockInfo::VInt { num_docs: doc_freq }, block_info: BlockInfo::VInt { num_docs: doc_freq },
byte_offset: 0, byte_offset: 0,
@@ -113,14 +114,14 @@ impl SkipReader {
skip_reader skip_reader
} }
pub fn reset(&mut self, data: OwnedBytes, doc_freq: u32) { pub fn reset(&mut self, data: ReadOnlySource, doc_freq: u32) {
self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 { self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
0 0
} else { } else {
TERMINATED TERMINATED
}; };
self.last_doc_in_previous_block = 0u32; self.last_doc_in_previous_block = 0u32;
self.owned_read = data; self.owned_read = OwnedRead::new(data);
self.block_info = BlockInfo::VInt { num_docs: doc_freq }; self.block_info = BlockInfo::VInt { num_docs: doc_freq };
self.byte_offset = 0; self.byte_offset = 0;
self.remaining_docs = doc_freq; self.remaining_docs = doc_freq;
@@ -153,24 +154,17 @@ impl SkipReader {
self.position_offset self.position_offset
} }
#[inline(always)]
pub fn byte_offset(&self) -> usize { pub fn byte_offset(&self) -> usize {
self.byte_offset self.byte_offset
} }
fn read_block_info(&mut self) { fn read_block_info(&mut self) {
let doc_delta = { let doc_delta = u32::deserialize(&mut self.owned_read).expect("Skip data corrupted");
let bytes = self.owned_read.as_slice();
let mut buf = [0; 4];
buf.copy_from_slice(&bytes[..4]);
u32::from_le_bytes(buf)
};
self.last_doc_in_block += doc_delta as DocId; self.last_doc_in_block += doc_delta as DocId;
let doc_num_bits = self.owned_read.as_slice()[4]; let doc_num_bits = self.owned_read.get(0);
match self.skip_info { match self.skip_info {
IndexRecordOption::Basic => { IndexRecordOption::Basic => {
self.owned_read.advance(5); self.owned_read.advance(1);
self.block_info = BlockInfo::BitPacked { self.block_info = BlockInfo::BitPacked {
doc_num_bits, doc_num_bits,
tf_num_bits: 0, tf_num_bits: 0,
@@ -180,11 +174,11 @@ impl SkipReader {
}; };
} }
IndexRecordOption::WithFreqs => { IndexRecordOption::WithFreqs => {
let bytes = self.owned_read.as_slice(); let tf_num_bits = self.owned_read.get(1);
let tf_num_bits = bytes[5]; let block_wand_fieldnorm_id = self.owned_read.get(2);
let block_wand_fieldnorm_id = bytes[6]; let data = &self.owned_read.as_ref()[3..];
let (block_wand_term_freq, num_bytes) = read_u32_vint_no_advance(&bytes[7..]); let (block_wand_term_freq, num_bytes) = read_u32_vint_no_advance(data);
self.owned_read.advance(7 + num_bytes); self.owned_read.advance(3 + num_bytes);
self.block_info = BlockInfo::BitPacked { self.block_info = BlockInfo::BitPacked {
doc_num_bits, doc_num_bits,
tf_num_bits, tf_num_bits,
@@ -194,16 +188,13 @@ impl SkipReader {
}; };
} }
IndexRecordOption::WithFreqsAndPositions => { IndexRecordOption::WithFreqsAndPositions => {
let bytes = self.owned_read.as_slice(); let tf_num_bits = self.owned_read.get(1);
let tf_num_bits = bytes[5]; self.owned_read.advance(2);
let tf_sum = { let tf_sum = u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
let mut buf = [0; 4]; let block_wand_fieldnorm_id = self.owned_read.get(0);
buf.copy_from_slice(&bytes[6..10]); self.owned_read.advance(1);
u32::from_le_bytes(buf) let block_wand_term_freq =
}; VInt::deserialize_u64(&mut self.owned_read).unwrap() as u32;
let block_wand_fieldnorm_id = bytes[10];
let (block_wand_term_freq, num_bytes) = read_u32_vint_no_advance(&bytes[11..]);
self.owned_read.advance(11 + num_bytes);
self.block_info = BlockInfo::BitPacked { self.block_info = BlockInfo::BitPacked {
doc_num_bits, doc_num_bits,
tf_num_bits, tf_num_bits,
@@ -271,7 +262,7 @@ mod tests {
use super::BlockInfo; use super::BlockInfo;
use super::IndexRecordOption; use super::IndexRecordOption;
use super::{SkipReader, SkipSerializer}; use super::{SkipReader, SkipSerializer};
use crate::directory::OwnedBytes; use crate::directory::ReadOnlySource;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE; use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
#[test] #[test]
@@ -287,8 +278,11 @@ mod tests {
skip_serializer.data().to_owned() skip_serializer.data().to_owned()
}; };
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32; let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
let mut skip_reader = let mut skip_reader = SkipReader::new(
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::WithFreqs); ReadOnlySource::new(buf),
doc_freq,
IndexRecordOption::WithFreqs,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32); assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!( assert_eq!(
skip_reader.block_info, skip_reader.block_info,
@@ -329,8 +323,11 @@ mod tests {
skip_serializer.data().to_owned() skip_serializer.data().to_owned()
}; };
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32; let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
let mut skip_reader = let mut skip_reader = SkipReader::new(
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic); ReadOnlySource::from(buf),
doc_freq,
IndexRecordOption::Basic,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32); assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!( assert_eq!(
skip_reader.block_info(), skip_reader.block_info(),
@@ -370,8 +367,11 @@ mod tests {
skip_serializer.data().to_owned() skip_serializer.data().to_owned()
}; };
let doc_freq = COMPRESSION_BLOCK_SIZE as u32; let doc_freq = COMPRESSION_BLOCK_SIZE as u32;
let mut skip_reader = let mut skip_reader = SkipReader::new(
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic); ReadOnlySource::from(buf),
doc_freq,
IndexRecordOption::Basic,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32); assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!( assert_eq!(
skip_reader.block_info(), skip_reader.block_info(),

View File

@@ -206,8 +206,8 @@ mod tests {
fn test_stack_long() { fn test_stack_long() {
let mut heap = MemoryArena::new(); let mut heap = MemoryArena::new();
let mut stack = ExpUnrolledLinkedList::new(); let mut stack = ExpUnrolledLinkedList::new();
let data: Vec<u32> = (0..100).collect(); let source: Vec<u32> = (0..100).collect();
for &el in &data { for &el in &source {
assert!(stack assert!(stack
.writer(&mut heap) .writer(&mut heap)
.write_u32::<LittleEndian>(el) .write_u32::<LittleEndian>(el)
@@ -221,7 +221,7 @@ mod tests {
result.push(LittleEndian::read_u32(&remaining[..4])); result.push(LittleEndian::read_u32(&remaining[..4]));
remaining = &remaining[4..]; remaining = &remaining[4..];
} }
assert_eq!(&result[..], &data[..]); assert_eq!(&result[..], &source[..]);
} }
#[test] #[test]

View File

@@ -83,7 +83,7 @@ mod tests {
let field = schema_builder.add_text_field("text", TEXT); let field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
index_writer.add_document(doc!(field=>"aaa")); index_writer.add_document(doc!(field=>"aaa"));
index_writer.add_document(doc!(field=>"bbb")); index_writer.add_document(doc!(field=>"bbb"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();

View File

@@ -5,6 +5,7 @@ use crate::query::{BitSetDocSet, Explanation};
use crate::query::{Scorer, Weight}; use crate::query::{Scorer, Weight};
use crate::schema::{Field, IndexRecordOption}; use crate::schema::{Field, IndexRecordOption};
use crate::termdict::{TermDictionary, TermStreamer}; use crate::termdict::{TermDictionary, TermStreamer};
use crate::Result;
use crate::TantivyError; use crate::TantivyError;
use crate::{DocId, Score}; use crate::{DocId, Score};
use std::sync::Arc; use std::sync::Arc;
@@ -39,16 +40,16 @@ impl<A> Weight for AutomatonWeight<A>
where where
A: Automaton + Send + Sync + 'static, A: Automaton + Send + Sync + 'static,
{ {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: Score) -> Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc(); let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc); let mut doc_bitset = BitSet::with_max_value(max_doc);
let inverted_index = reader.inverted_index(self.field)?; let inverted_index = reader.inverted_index(self.field);
let term_dict = inverted_index.terms(); let term_dict = inverted_index.terms();
let mut term_stream = self.automaton_stream(term_dict); let mut term_stream = self.automaton_stream(term_dict);
while term_stream.advance() { while term_stream.advance() {
let term_info = term_stream.value(); let term_info = term_stream.value();
let mut block_segment_postings = inverted_index let mut block_segment_postings = inverted_index
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?; .read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
loop { loop {
let docs = block_segment_postings.docs(); let docs = block_segment_postings.docs();
if docs.is_empty() { if docs.is_empty() {
@@ -65,7 +66,7 @@ where
Ok(Box::new(const_scorer)) Ok(Box::new(const_scorer))
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?; let mut scorer = self.scorer(reader, 1.0)?;
if scorer.seek(doc) == doc { if scorer.seek(doc) == doc {
Ok(Explanation::new("AutomatonScorer", 1.0)) Ok(Explanation::new("AutomatonScorer", 1.0))
@@ -90,7 +91,7 @@ mod tests {
let mut schema = Schema::builder(); let mut schema = Schema::builder();
let title = schema.add_text_field("title", STRING); let title = schema.add_text_field("title", STRING);
let index = Index::create_in_ram(schema.build()); let index = Index::create_in_ram(schema.build());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(title=>"abc")); index_writer.add_document(doc!(title=>"abc"));
index_writer.add_document(doc!(title=>"bcd")); index_writer.add_document(doc!(title=>"bcd"));
index_writer.add_document(doc!(title=>"abcd")); index_writer.add_document(doc!(title=>"abcd"));

View File

@@ -52,7 +52,7 @@ impl BM25Weight {
} }
} }
pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> crate::Result<BM25Weight> { pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> BM25Weight {
assert!(!terms.is_empty(), "BM25 requires at least one term"); assert!(!terms.is_empty(), "BM25 requires at least one term");
let field = terms[0].field(); let field = terms[0].field();
for term in &terms[1..] { for term in &terms[1..] {
@@ -66,27 +66,25 @@ impl BM25Weight {
let mut total_num_tokens = 0u64; let mut total_num_tokens = 0u64;
let mut total_num_docs = 0u64; let mut total_num_docs = 0u64;
for segment_reader in searcher.segment_readers() { for segment_reader in searcher.segment_readers() {
let inverted_index = segment_reader.inverted_index(field)?; let inverted_index = segment_reader.inverted_index(field);
total_num_tokens += inverted_index.total_num_tokens(); total_num_tokens += inverted_index.total_num_tokens();
total_num_docs += u64::from(segment_reader.max_doc()); total_num_docs += u64::from(segment_reader.max_doc());
} }
let average_fieldnorm = total_num_tokens as Score / total_num_docs as Score; let average_fieldnorm = total_num_tokens as Score / total_num_docs as Score;
if terms.len() == 1 { if terms.len() == 1 {
let term_doc_freq = searcher.doc_freq(&terms[0])?; let term_doc_freq = searcher.doc_freq(&terms[0]);
Ok(BM25Weight::for_one_term( BM25Weight::for_one_term(term_doc_freq, total_num_docs, average_fieldnorm)
term_doc_freq,
total_num_docs,
average_fieldnorm,
))
} else { } else {
let mut idf_sum: Score = 0.0; let idf = terms
for term in terms { .iter()
let term_doc_freq = searcher.doc_freq(term)?; .map(|term| {
idf_sum += idf(term_doc_freq, total_num_docs); let term_doc_freq = searcher.doc_freq(term);
} idf(term_doc_freq, total_num_docs)
let idf_explain = Explanation::new("idf", idf_sum); })
Ok(BM25Weight::new(idf_explain, average_fieldnorm)) .sum::<Score>();
let idf_explain = Explanation::new("idf", idf);
BM25Weight::new(idf_explain, average_fieldnorm)
} }
} }

View File

@@ -4,6 +4,19 @@ use crate::{DocId, DocSet, Score, TERMINATED};
use std::ops::Deref; use std::ops::Deref;
use std::ops::DerefMut; use std::ops::DerefMut;
fn is_sorted<I: Iterator<Item = DocId>>(mut it: I) -> bool {
if let Some(first) = it.next() {
let mut prev = first;
for doc in it {
if doc < prev {
return false;
}
prev = doc;
}
}
true
}
/// Takes a term_scorers sorted by their current doc() and a threshold and returns /// Takes a term_scorers sorted by their current doc() and a threshold and returns
/// Returns (pivot_len, pivot_ord) defined as follows: /// Returns (pivot_len, pivot_ord) defined as follows:
/// - `pivot_doc` lowest document that has a chance of exceeding (>) the threshold score. /// - `pivot_doc` lowest document that has a chance of exceeding (>) the threshold score.
@@ -42,12 +55,37 @@ fn find_pivot_doc(
Some((before_pivot_len, pivot_len, pivot_doc)) Some((before_pivot_len, pivot_len, pivot_doc))
} }
struct TermScorerWithMaxScore<'a> {
scorer: &'a mut TermScorer,
max_score: Score,
}
impl<'a> From<&'a mut TermScorer> for TermScorerWithMaxScore<'a> {
fn from(scorer: &'a mut TermScorer) -> Self {
let max_score = scorer.max_score();
TermScorerWithMaxScore { scorer, max_score }
}
}
impl<'a> Deref for TermScorerWithMaxScore<'a> {
type Target = TermScorer;
fn deref(&self) -> &Self::Target {
self.scorer
}
}
impl<'a> DerefMut for TermScorerWithMaxScore<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.scorer
}
}
// Before and after calling this method, scorers need to be sorted by their `.doc()`. // Before and after calling this method, scorers need to be sorted by their `.doc()`.
fn block_max_was_too_low_advance_one_scorer( fn block_max_was_too_low_advance_one_scorer(
scorers: &mut Vec<TermScorerWithMaxScore>, scorers: &mut Vec<TermScorerWithMaxScore>,
pivot_len: usize, pivot_len: usize,
) { ) {
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
let mut scorer_to_seek = pivot_len - 1; let mut scorer_to_seek = pivot_len - 1;
let mut doc_to_seek_after = scorers[scorer_to_seek].doc(); let mut doc_to_seek_after = scorers[scorer_to_seek].doc();
for scorer_ord in (0..pivot_len - 1).rev() { for scorer_ord in (0..pivot_len - 1).rev() {
@@ -64,7 +102,6 @@ fn block_max_was_too_low_advance_one_scorer(
} }
scorers[scorer_to_seek].seek(doc_to_seek_after + 1); scorers[scorer_to_seek].seek(doc_to_seek_after + 1);
restore_ordering(scorers, scorer_to_seek); restore_ordering(scorers, scorer_to_seek);
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
} }
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted // Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
@@ -140,99 +177,64 @@ pub fn block_wand(
.map(TermScorerWithMaxScore::from) .map(TermScorerWithMaxScore::from)
.collect(); .collect();
scorers.sort_by_key(|scorer| scorer.doc()); scorers.sort_by_key(|scorer| scorer.doc());
// At this point we need to ensure that the scorers are sorted! loop {
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc()))); // At this point we need to ensure that the scorers are sorted!
while let Some((before_pivot_len, pivot_len, pivot_doc)) =
find_pivot_doc(&scorers[..], threshold)
{
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc()))); debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
debug_assert_ne!(pivot_doc, TERMINATED); if let Some((before_pivot_len, pivot_len, pivot_doc)) =
debug_assert!(before_pivot_len < pivot_len); find_pivot_doc(&scorers[..], threshold)
{
debug_assert_ne!(pivot_doc, TERMINATED);
debug_assert!(before_pivot_len < pivot_len);
let block_max_score_upperbound: Score = scorers[..pivot_len] let block_max_score_upperbound: Score = scorers[..pivot_len]
.iter_mut() .iter_mut()
.map(|scorer| { .map(|scorer| {
scorer.shallow_seek(pivot_doc); scorer.shallow_seek(pivot_doc);
scorer.block_max_score() scorer.block_max_score()
}) })
.sum(); .sum();
// Beware after shallow advance, skip readers can be in advance compared to // Beware after shallow advance, skip readers can be in advance compared to
// the segment posting lists. // the segment posting lists.
//
// `block_segment_postings.load_block()` need to be called separately.
if block_max_score_upperbound <= threshold {
// Block max condition was not reached
// We could get away by simply advancing the scorers to DocId + 1 but it would
// be inefficient. The optimization requires proper explanation and was
// isolated in a different function.
block_max_was_too_low_advance_one_scorer(&mut scorers, pivot_len);
continue;
}
// Block max condition is observed.
//
// Let's try and advance all scorers before the pivot to the pivot.
if !align_scorers(&mut scorers, pivot_doc, before_pivot_len) {
// At least of the scorer does not contain the pivot.
// //
// Let's stop scoring this pivot and go through the pivot selection again. // `block_segment_postings.load_block()` need to be called separately.
// Note that the current pivot is not necessarily a bad candidate and it if block_max_score_upperbound <= threshold {
// may be picked again. // Block max condition was not reached
continue; // We could get away by simply advancing the scorers to DocId + 1 but it would
} // be inefficient. The optimization requires proper explanation and was
// isolated in a different function.
// At this point, all scorers are positioned on the doc. block_max_was_too_low_advance_one_scorer(&mut scorers, pivot_len);
let score = scorers[..pivot_len] continue;
.iter_mut()
.map(|scorer| scorer.score())
.sum();
if score > threshold {
threshold = callback(pivot_doc, score);
}
// let's advance all of the scorers that are currently positioned on the pivot.
advance_all_scorers_on_pivot(&mut scorers, pivot_len);
}
}
struct TermScorerWithMaxScore<'a> {
scorer: &'a mut TermScorer,
max_score: Score,
}
impl<'a> From<&'a mut TermScorer> for TermScorerWithMaxScore<'a> {
fn from(scorer: &'a mut TermScorer) -> Self {
let max_score = scorer.max_score();
TermScorerWithMaxScore { scorer, max_score }
}
}
impl<'a> Deref for TermScorerWithMaxScore<'a> {
type Target = TermScorer;
fn deref(&self) -> &Self::Target {
self.scorer
}
}
impl<'a> DerefMut for TermScorerWithMaxScore<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.scorer
}
}
fn is_sorted<I: Iterator<Item = DocId>>(mut it: I) -> bool {
if let Some(first) = it.next() {
let mut prev = first;
for doc in it {
if doc < prev {
return false;
} }
prev = doc;
// Block max condition is observed.
//
// Let's try and advance all scorers before the pivot to the pivot.
if !align_scorers(&mut scorers, pivot_doc, before_pivot_len) {
// At least of the scorer does not contain the pivot.
//
// Let's stop scoring this pivot and go through the pivot selection again.
// Note that the current pivot is not necessarily a bad candidate and it
// may be picked again.
continue;
}
// At this point, all scorers are positioned on the doc.
let score = scorers[..pivot_len]
.iter_mut()
.map(|scorer| scorer.score())
.sum();
if score > threshold {
threshold = callback(pivot_doc, score);
}
// let's advance all of the scorers that are currently positioned on the pivot.
advance_all_scorers_on_pivot(&mut scorers, pivot_len);
} else {
return;
} }
} }
true
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::query::score_combiner::SumCombiner; use crate::query::score_combiner::SumCombiner;
@@ -246,21 +248,17 @@ mod tests {
use std::iter; use std::iter;
struct Float(Score); struct Float(Score);
impl Eq for Float {} impl Eq for Float {}
impl PartialEq for Float { impl PartialEq for Float {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.cmp(&other) == Ordering::Equal self.cmp(&other) == Ordering::Equal
} }
} }
impl PartialOrd for Float { impl PartialOrd for Float {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other)) Some(self.cmp(other))
} }
} }
impl Ord for Float { impl Ord for Float {
fn cmp(&self, other: &Self) -> Ordering { fn cmp(&self, other: &Self) -> Ordering {
other.0.partial_cmp(&self.0).unwrap_or(Ordering::Equal) other.0.partial_cmp(&self.0).unwrap_or(Ordering::Equal)

View File

@@ -83,7 +83,7 @@ use std::collections::BTreeSet;
/// ]; /// ];
/// // Make a BooleanQuery equivalent to /// // Make a BooleanQuery equivalent to
/// // title:+diary title:-girl /// // title:+diary title:-girl
/// let diary_must_and_girl_mustnot = BooleanQuery::new(queries_with_occurs1); /// let diary_must_and_girl_mustnot = BooleanQuery::from(queries_with_occurs1);
/// let count1 = searcher.search(&diary_must_and_girl_mustnot, &Count)?; /// let count1 = searcher.search(&diary_must_and_girl_mustnot, &Count)?;
/// assert_eq!(count1, 1); /// assert_eq!(count1, 1);
/// ///
@@ -93,7 +93,7 @@ use std::collections::BTreeSet;
/// IndexRecordOption::Basic, /// IndexRecordOption::Basic,
/// )); /// ));
/// // "title:diary OR title:cow" /// // "title:diary OR title:cow"
/// let title_diary_or_cow = BooleanQuery::new(vec![ /// let title_diary_or_cow = BooleanQuery::from(vec![
/// (Occur::Should, diary_term_query.box_clone()), /// (Occur::Should, diary_term_query.box_clone()),
/// (Occur::Should, cow_term_query), /// (Occur::Should, cow_term_query),
/// ]); /// ]);
@@ -108,7 +108,7 @@ use std::collections::BTreeSet;
/// // You can combine subqueries of different types into 1 BooleanQuery: /// // You can combine subqueries of different types into 1 BooleanQuery:
/// // `TermQuery` and `PhraseQuery` /// // `TermQuery` and `PhraseQuery`
/// // "title:diary OR "dairy cow" /// // "title:diary OR "dairy cow"
/// let term_of_phrase_query = BooleanQuery::new(vec![ /// let term_of_phrase_query = BooleanQuery::from(vec![
/// (Occur::Should, diary_term_query.box_clone()), /// (Occur::Should, diary_term_query.box_clone()),
/// (Occur::Should, phrase_query.box_clone()), /// (Occur::Should, phrase_query.box_clone()),
/// ]); /// ]);
@@ -117,7 +117,7 @@ use std::collections::BTreeSet;
/// ///
/// // You can nest one BooleanQuery inside another /// // You can nest one BooleanQuery inside another
/// // body:found AND ("title:diary OR "dairy cow") /// // body:found AND ("title:diary OR "dairy cow")
/// let nested_query = BooleanQuery::new(vec![ /// let nested_query = BooleanQuery::from(vec![
/// (Occur::Must, body_term_query), /// (Occur::Must, body_term_query),
/// (Occur::Must, Box::new(term_of_phrase_query)) /// (Occur::Must, Box::new(term_of_phrase_query))
/// ]); /// ]);
@@ -143,7 +143,7 @@ impl Clone for BooleanQuery {
impl From<Vec<(Occur, Box<dyn Query>)>> for BooleanQuery { impl From<Vec<(Occur, Box<dyn Query>)>> for BooleanQuery {
fn from(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery { fn from(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery {
BooleanQuery::new(subqueries) BooleanQuery { subqueries }
} }
} }
@@ -167,23 +167,6 @@ impl Query for BooleanQuery {
} }
impl BooleanQuery { impl BooleanQuery {
/// Creates a new boolean query.
pub fn new(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery {
BooleanQuery { subqueries }
}
/// Returns the intersection of the queries.
pub fn intersection(queries: Vec<Box<dyn Query>>) -> BooleanQuery {
let subqueries = queries.into_iter().map(|s| (Occur::Must, s)).collect();
BooleanQuery::new(subqueries)
}
/// Returns the union of the queries.
pub fn union(queries: Vec<Box<dyn Query>>) -> BooleanQuery {
let subqueries = queries.into_iter().map(|s| (Occur::Should, s)).collect();
BooleanQuery::new(subqueries)
}
/// Helper method to create a boolean query matching a given list of terms. /// Helper method to create a boolean query matching a given list of terms.
/// The resulting query is a disjunction of the terms. /// The resulting query is a disjunction of the terms.
pub fn new_multiterms_query(terms: Vec<Term>) -> BooleanQuery { pub fn new_multiterms_query(terms: Vec<Term>) -> BooleanQuery {
@@ -195,7 +178,7 @@ impl BooleanQuery {
(Occur::Should, term_query) (Occur::Should, term_query)
}) })
.collect(); .collect();
BooleanQuery::new(occur_term_queries) BooleanQuery::from(occur_term_queries)
} }
/// Deconstructed view of the clauses making up this query. /// Deconstructed view of the clauses making up this query.
@@ -203,77 +186,3 @@ impl BooleanQuery {
&self.subqueries[..] &self.subqueries[..]
} }
} }
#[cfg(test)]
mod tests {
use super::BooleanQuery;
use crate::collector::DocSetCollector;
use crate::query::{QueryClone, TermQuery};
use crate::schema::{IndexRecordOption, Schema, TEXT};
use crate::{DocAddress, Index, Term};
fn create_test_index() -> crate::Result<Index> {
let mut schema_builder = Schema::builder();
let text = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut writer = index.writer_for_tests().unwrap();
writer.add_document(doc!(text=>"b c"));
writer.add_document(doc!(text=>"a c"));
writer.add_document(doc!(text=>"a b"));
writer.add_document(doc!(text=>"a d"));
writer.commit()?;
Ok(index)
}
#[test]
fn test_union() -> crate::Result<()> {
let index = create_test_index()?;
let searcher = index.reader()?.searcher();
let text = index.schema().get_field("text").unwrap();
let term_a = TermQuery::new(Term::from_field_text(text, "a"), IndexRecordOption::Basic);
let term_d = TermQuery::new(Term::from_field_text(text, "d"), IndexRecordOption::Basic);
let union_ad = BooleanQuery::union(vec![term_a.box_clone(), term_d.box_clone()]);
let docs = searcher.search(&union_ad, &DocSetCollector)?;
assert_eq!(
docs,
vec![
DocAddress(0u32, 1u32),
DocAddress(0u32, 2u32),
DocAddress(0u32, 3u32)
]
.into_iter()
.collect()
);
Ok(())
}
#[test]
fn test_intersection() -> crate::Result<()> {
let index = create_test_index()?;
let searcher = index.reader()?.searcher();
let text = index.schema().get_field("text").unwrap();
let term_a = TermQuery::new(Term::from_field_text(text, "a"), IndexRecordOption::Basic);
let term_b = TermQuery::new(Term::from_field_text(text, "b"), IndexRecordOption::Basic);
let term_c = TermQuery::new(Term::from_field_text(text, "c"), IndexRecordOption::Basic);
let intersection_ab =
BooleanQuery::intersection(vec![term_a.box_clone(), term_b.box_clone()]);
let intersection_ac =
BooleanQuery::intersection(vec![term_a.box_clone(), term_c.box_clone()]);
let intersection_bc =
BooleanQuery::intersection(vec![term_b.box_clone(), term_c.box_clone()]);
{
let docs = searcher.search(&intersection_ab, &DocSetCollector)?;
assert_eq!(docs, vec![DocAddress(0u32, 2u32)].into_iter().collect());
}
{
let docs = searcher.search(&intersection_ac, &DocSetCollector)?;
assert_eq!(docs, vec![DocAddress(0u32, 1u32)].into_iter().collect());
}
{
let docs = searcher.search(&intersection_bc, &DocSetCollector)?;
assert_eq!(docs, vec![DocAddress(0u32, 0u32)].into_iter().collect());
}
Ok(())
}
}

View File

@@ -32,12 +32,14 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "a b c")); {
index_writer.add_document(doc!(text_field => "a c")); index_writer.add_document(doc!(text_field => "a b c"));
index_writer.add_document(doc!(text_field => "b c")); index_writer.add_document(doc!(text_field => "a c"));
index_writer.add_document(doc!(text_field => "a b c d")); index_writer.add_document(doc!(text_field => "b c"));
index_writer.add_document(doc!(text_field => "d")); index_writer.add_document(doc!(text_field => "a b c d"));
index_writer.add_document(doc!(text_field => "d"));
}
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
(index, text_field) (index, text_field)
@@ -132,29 +134,29 @@ mod tests {
.collect::<Vec<DocId>>() .collect::<Vec<DocId>>()
}; };
{ {
let boolean_query = BooleanQuery::new(vec![(Occur::Must, make_term_query("a"))]); let boolean_query = BooleanQuery::from(vec![(Occur::Must, make_term_query("a"))]);
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]); assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
} }
{ {
let boolean_query = BooleanQuery::new(vec![(Occur::Should, make_term_query("a"))]); let boolean_query = BooleanQuery::from(vec![(Occur::Should, make_term_query("a"))]);
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]); assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
} }
{ {
let boolean_query = BooleanQuery::new(vec![ let boolean_query = BooleanQuery::from(vec![
(Occur::Should, make_term_query("a")), (Occur::Should, make_term_query("a")),
(Occur::Should, make_term_query("b")), (Occur::Should, make_term_query("b")),
]); ]);
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 2, 3]); assert_eq!(matching_docs(&boolean_query), vec![0, 1, 2, 3]);
} }
{ {
let boolean_query = BooleanQuery::new(vec![ let boolean_query = BooleanQuery::from(vec![
(Occur::Must, make_term_query("a")), (Occur::Must, make_term_query("a")),
(Occur::Should, make_term_query("b")), (Occur::Should, make_term_query("b")),
]); ]);
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]); assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
} }
{ {
let boolean_query = BooleanQuery::new(vec![ let boolean_query = BooleanQuery::from(vec![
(Occur::Must, make_term_query("a")), (Occur::Must, make_term_query("a")),
(Occur::Should, make_term_query("b")), (Occur::Should, make_term_query("b")),
(Occur::MustNot, make_term_query("d")), (Occur::MustNot, make_term_query("d")),
@@ -162,7 +164,7 @@ mod tests {
assert_eq!(matching_docs(&boolean_query), vec![0, 1]); assert_eq!(matching_docs(&boolean_query), vec![0, 1]);
} }
{ {
let boolean_query = BooleanQuery::new(vec![(Occur::MustNot, make_term_query("d"))]); let boolean_query = BooleanQuery::from(vec![(Occur::MustNot, make_term_query("d"))]);
assert_eq!(matching_docs(&boolean_query), Vec::<u32>::new()); assert_eq!(matching_docs(&boolean_query), Vec::<u32>::new());
} }
} }
@@ -192,7 +194,7 @@ mod tests {
let score_doc_4: Score; // score of doc 4 should not be influenced by exclusion let score_doc_4: Score; // score of doc 4 should not be influenced by exclusion
{ {
let boolean_query_no_excluded = let boolean_query_no_excluded =
BooleanQuery::new(vec![(Occur::Must, make_term_query("d"))]); BooleanQuery::from(vec![(Occur::Must, make_term_query("d"))]);
let topdocs_no_excluded = matching_topdocs(&boolean_query_no_excluded); let topdocs_no_excluded = matching_topdocs(&boolean_query_no_excluded);
assert_eq!(topdocs_no_excluded.len(), 2); assert_eq!(topdocs_no_excluded.len(), 2);
let (top_score, top_doc) = topdocs_no_excluded[0]; let (top_score, top_doc) = topdocs_no_excluded[0];
@@ -202,7 +204,7 @@ mod tests {
} }
{ {
let boolean_query_two_excluded = BooleanQuery::new(vec![ let boolean_query_two_excluded = BooleanQuery::from(vec![
(Occur::Must, make_term_query("d")), (Occur::Must, make_term_query("d")),
(Occur::MustNot, make_term_query("a")), (Occur::MustNot, make_term_query("a")),
(Occur::MustNot, make_term_query("b")), (Occur::MustNot, make_term_query("b")),
@@ -222,7 +224,7 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "a b c")); index_writer.add_document(doc!(text_field => "a b c"));
index_writer.add_document(doc!(text_field => "a c")); index_writer.add_document(doc!(text_field => "a c"));
index_writer.add_document(doc!(text_field => "b c")); index_writer.add_document(doc!(text_field => "b c"));
@@ -239,7 +241,7 @@ mod tests {
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let boolean_query = let boolean_query =
BooleanQuery::new(vec![(Occur::Should, term_a), (Occur::Should, term_b)]); BooleanQuery::from(vec![(Occur::Should, term_a), (Occur::Should, term_b)]);
let boolean_weight = boolean_query.weight(&searcher, true).unwrap(); let boolean_weight = boolean_query.weight(&searcher, true).unwrap();
{ {
let mut boolean_scorer = boolean_weight let mut boolean_scorer = boolean_weight
@@ -279,7 +281,7 @@ mod tests {
}; };
{ {
let boolean_query = BooleanQuery::new(vec![ let boolean_query = BooleanQuery::from(vec![
(Occur::Must, make_term_query("a")), (Occur::Must, make_term_query("a")),
(Occur::Must, make_term_query("b")), (Occur::Must, make_term_query("b")),
]); ]);
@@ -288,29 +290,4 @@ mod tests {
assert_nearly_equals!(scores[1], 0.84699446); assert_nearly_equals!(scores[1], 0.84699446);
} }
} }
#[test]
pub fn test_explain() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 5_000_000)?;
index_writer.add_document(doc!(text=>"a"));
index_writer.add_document(doc!(text=>"b"));
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let term_a: Box<dyn Query> = Box::new(TermQuery::new(
Term::from_field_text(text, "a"),
IndexRecordOption::Basic,
));
let term_b: Box<dyn Query> = Box::new(TermQuery::new(
Term::from_field_text(text, "b"),
IndexRecordOption::Basic,
));
let query = BooleanQuery::from(vec![(Occur::Should, term_a), (Occur::Should, term_b)]);
let explanation = query.explain(&searcher, DocAddress(0, 0u32))?;
assert_nearly_equals!(explanation.value(), 0.6931472f32);
Ok(())
}
} }

View File

@@ -144,7 +144,7 @@ mod tests {
fn test_boost_query_explain() { fn test_boost_query_explain() {
let schema = Schema::builder().build(); let schema = Schema::builder().build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(Document::new()); index_writer.add_document(Document::new());
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
@@ -153,7 +153,7 @@ mod tests {
let explanation = query.explain(&searcher, DocAddress(0, 0u32)).unwrap(); let explanation = query.explain(&searcher, DocAddress(0, 0u32)).unwrap();
assert_eq!( assert_eq!(
explanation.to_pretty_json(), explanation.to_pretty_json(),
"{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\",\n \"context\": []\n }\n ],\n \"context\": []\n}" "{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\"\n }\n ]\n}"
) )
} }
} }

View File

@@ -17,7 +17,6 @@ pub struct Explanation {
description: String, description: String,
#[serde(skip_serializing_if = "Vec::is_empty")] #[serde(skip_serializing_if = "Vec::is_empty")]
details: Vec<Explanation>, details: Vec<Explanation>,
context: Vec<String>,
} }
impl fmt::Debug for Explanation { impl fmt::Debug for Explanation {
@@ -33,7 +32,6 @@ impl Explanation {
value, value,
description: description.to_string(), description: description.to_string(),
details: vec![], details: vec![],
context: vec![],
} }
} }
@@ -49,11 +47,6 @@ impl Explanation {
self.details.push(child_explanation); self.details.push(child_explanation);
} }
/// Adds some extra context to the explanation.
pub fn add_context(&mut self, context: String) {
self.context.push(context);
}
/// Shortcut for `self.details.push(Explanation::new(name, value));` /// Shortcut for `self.details.push(Explanation::new(name, value));`
pub fn add_const<T: ToString>(&mut self, name: T, value: Score) { pub fn add_const<T: ToString>(&mut self, name: T, value: Score) {
self.details.push(Explanation::new(name, value)); self.details.push(Explanation::new(name, value));

View File

@@ -177,7 +177,7 @@ mod test {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
index_writer.add_document(doc!( index_writer.add_document(doc!(
country_field => "japan", country_field => "japan",
)); ));

View File

@@ -46,7 +46,7 @@ pub(crate) use self::fuzzy_query::DFAWrapper;
pub use self::fuzzy_query::FuzzyTermQuery; pub use self::fuzzy_query::FuzzyTermQuery;
pub use self::intersection::intersect_scorers; pub use self::intersection::intersect_scorers;
pub use self::phrase_query::PhraseQuery; pub use self::phrase_query::PhraseQuery;
pub use self::query::{Query, QueryClone}; pub use self::query::Query;
pub use self::query_parser::QueryParser; pub use self::query_parser::QueryParser;
pub use self::query_parser::QueryParserError; pub use self::query_parser::QueryParserError;
pub use self::range_query::RangeQuery; pub use self::range_query::RangeQuery;

View File

@@ -24,7 +24,7 @@ pub mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for &text in texts { for &text in texts {
let doc = doc!(text_field=>text); let doc = doc!(text_field=>text);
index_writer.add_document(doc); index_writer.add_document(doc);
@@ -135,7 +135,7 @@ pub mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b c")); index_writer.add_document(doc!(text_field=>"a b c"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
@@ -186,7 +186,7 @@ pub mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"b")); index_writer.add_document(doc!(text_field=>"b"));
index_writer.add_document(doc!(text_field=>"a b")); index_writer.add_document(doc!(text_field=>"a b"));
index_writer.add_document(doc!(text_field=>"b a")); index_writer.add_document(doc!(text_field=>"b a"));
@@ -217,7 +217,7 @@ pub mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b c d e f g h")); index_writer.add_document(doc!(text_field=>"a b c d e f g h"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }

View File

@@ -95,7 +95,7 @@ impl PhraseQuery {
))); )));
} }
let terms = self.phrase_terms(); let terms = self.phrase_terms();
let bm25_weight = BM25Weight::for_terms(searcher, &terms)?; let bm25_weight = BM25Weight::for_terms(searcher, &terms);
Ok(PhraseWeight::new( Ok(PhraseWeight::new(
self.phrase_terms.clone(), self.phrase_terms.clone(),
bm25_weight, bm25_weight,

View File

@@ -9,8 +9,8 @@ use crate::query::Weight;
use crate::query::{EmptyScorer, Explanation}; use crate::query::{EmptyScorer, Explanation};
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::schema::Term; use crate::schema::Term;
use crate::Score;
use crate::{DocId, DocSet}; use crate::{DocId, DocSet};
use crate::{Result, Score};
pub struct PhraseWeight { pub struct PhraseWeight {
phrase_terms: Vec<(usize, Term)>, phrase_terms: Vec<(usize, Term)>,
@@ -32,7 +32,7 @@ impl PhraseWeight {
} }
} }
fn fieldnorm_reader(&self, reader: &SegmentReader) -> crate::Result<FieldNormReader> { fn fieldnorm_reader(&self, reader: &SegmentReader) -> FieldNormReader {
let field = self.phrase_terms[0].1.field(); let field = self.phrase_terms[0].1.field();
reader.get_fieldnorms_reader(field) reader.get_fieldnorms_reader(field)
} }
@@ -41,15 +41,15 @@ impl PhraseWeight {
&self, &self,
reader: &SegmentReader, reader: &SegmentReader,
boost: Score, boost: Score,
) -> crate::Result<Option<PhraseScorer<SegmentPostings>>> { ) -> Result<Option<PhraseScorer<SegmentPostings>>> {
let similarity_weight = self.similarity_weight.boost_by(boost); let similarity_weight = self.similarity_weight.boost_by(boost);
let fieldnorm_reader = self.fieldnorm_reader(reader)?; let fieldnorm_reader = self.fieldnorm_reader(reader);
if reader.has_deletes() { if reader.has_deletes() {
let mut term_postings_list = Vec::new(); let mut term_postings_list = Vec::new();
for &(offset, ref term) in &self.phrase_terms { for &(offset, ref term) in &self.phrase_terms {
if let Some(postings) = reader if let Some(postings) = reader
.inverted_index(term.field())? .inverted_index(term.field())
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
{ {
term_postings_list.push((offset, postings)); term_postings_list.push((offset, postings));
} else { } else {
@@ -66,8 +66,8 @@ impl PhraseWeight {
let mut term_postings_list = Vec::new(); let mut term_postings_list = Vec::new();
for &(offset, ref term) in &self.phrase_terms { for &(offset, ref term) in &self.phrase_terms {
if let Some(postings) = reader if let Some(postings) = reader
.inverted_index(term.field())? .inverted_index(term.field())
.read_postings_no_deletes(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings_no_deletes(&term, IndexRecordOption::WithFreqsAndPositions)
{ {
term_postings_list.push((offset, postings)); term_postings_list.push((offset, postings));
} else { } else {
@@ -85,7 +85,7 @@ impl PhraseWeight {
} }
impl Weight for PhraseWeight { impl Weight for PhraseWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: Score) -> Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader, boost)? { if let Some(scorer) = self.phrase_scorer(reader, boost)? {
Ok(Box::new(scorer)) Ok(Box::new(scorer))
} else { } else {
@@ -93,7 +93,7 @@ impl Weight for PhraseWeight {
} }
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let scorer_opt = self.phrase_scorer(reader, 1.0)?; let scorer_opt = self.phrase_scorer(reader, 1.0)?;
if scorer_opt.is_none() { if scorer_opt.is_none() {
return Err(does_not_match(doc)); return Err(does_not_match(doc));
@@ -102,7 +102,7 @@ impl Weight for PhraseWeight {
if scorer.seek(doc) != doc { if scorer.seek(doc) != doc {
return Err(does_not_match(doc)); return Err(does_not_match(doc));
} }
let fieldnorm_reader = self.fieldnorm_reader(reader)?; let fieldnorm_reader = self.fieldnorm_reader(reader);
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc); let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
let phrase_count = scorer.phrase_count(); let phrase_count = scorer.phrase_count();
let mut explanation = Explanation::new("Phrase Scorer", scorer.score()); let mut explanation = Explanation::new("Phrase Scorer", scorer.score());

View File

@@ -40,7 +40,7 @@ use std::fmt;
/// ///
/// When implementing a new type of `Query`, it is normal to implement a /// When implementing a new type of `Query`, it is normal to implement a
/// dedicated `Query`, `Weight` and `Scorer`. /// dedicated `Query`, `Weight` and `Scorer`.
pub trait Query: QueryClone + Send + Sync + downcast_rs::Downcast + fmt::Debug { pub trait Query: QueryClone + downcast_rs::Downcast + fmt::Debug {
/// Create the weight associated to a query. /// Create the weight associated to a query.
/// ///
/// If scoring is not required, setting `scoring_enabled` to `false` /// If scoring is not required, setting `scoring_enabled` to `false`
@@ -71,9 +71,7 @@ pub trait Query: QueryClone + Send + Sync + downcast_rs::Downcast + fmt::Debug {
fn query_terms(&self, _term_set: &mut BTreeSet<Term>) {} fn query_terms(&self, _term_set: &mut BTreeSet<Term>) {}
} }
/// Implements `box_clone`.
pub trait QueryClone { pub trait QueryClone {
/// Returns a boxed clone of `self`.
fn box_clone(&self) -> Box<dyn Query>; fn box_clone(&self) -> Box<dyn Query>;
} }

View File

@@ -21,52 +21,51 @@ use std::str::FromStr;
use tantivy_query_grammar::{UserInputAST, UserInputBound, UserInputLeaf}; use tantivy_query_grammar::{UserInputAST, UserInputBound, UserInputLeaf};
/// Possible error that may happen when parsing a query. /// Possible error that may happen when parsing a query.
#[derive(Debug, PartialEq, Eq, Error)] #[derive(Debug, PartialEq, Eq, Fail)]
pub enum QueryParserError { pub enum QueryParserError {
/// Error in the query syntax /// Error in the query syntax
#[error("Syntax Error")] #[fail(display = "Syntax Error")]
SyntaxError, SyntaxError,
/// `FieldDoesNotExist(field_name: String)` /// `FieldDoesNotExist(field_name: String)`
/// The query references a field that is not in the schema /// The query references a field that is not in the schema
#[error("File does not exists: '{0:?}'")] #[fail(display = "File does not exists: '{:?}'", _0)]
FieldDoesNotExist(String), FieldDoesNotExist(String),
/// The query contains a term for a `u64` or `i64`-field, but the value /// The query contains a term for a `u64` or `i64`-field, but the value
/// is neither. /// is neither.
#[error("Expected a valid integer: '{0:?}'")] #[fail(display = "Expected a valid integer: '{:?}'", _0)]
ExpectedInt(ParseIntError), ExpectedInt(ParseIntError),
/// The query contains a term for a bytes field, but the value is not valid
/// base64.
#[error("Expected base64: '{0:?}'")]
ExpectedBase64(base64::DecodeError),
/// The query contains a term for a `f64`-field, but the value /// The query contains a term for a `f64`-field, but the value
/// is not a f64. /// is not a f64.
#[error("Invalid query: Only excluding terms given")] #[fail(display = "Invalid query: Only excluding terms given")]
ExpectedFloat(ParseFloatError), ExpectedFloat(ParseFloatError),
/// It is forbidden queries that are only "excluding". (e.g. -title:pop) /// It is forbidden queries that are only "excluding". (e.g. -title:pop)
#[error("Invalid query: Only excluding terms given")] #[fail(display = "Invalid query: Only excluding terms given")]
AllButQueryForbidden, AllButQueryForbidden,
/// If no default field is declared, running a query without any /// If no default field is declared, running a query without any
/// field specified is forbbidden. /// field specified is forbbidden.
#[error("No default field declared and no field specified in query")] #[fail(display = "No default field declared and no field specified in query")]
NoDefaultFieldDeclared, NoDefaultFieldDeclared,
/// The field searched for is not declared /// The field searched for is not declared
/// as indexed in the schema. /// as indexed in the schema.
#[error("The field '{0:?}' is not declared as indexed")] #[fail(display = "The field '{:?}' is not declared as indexed", _0)]
FieldNotIndexed(String), FieldNotIndexed(String),
/// A phrase query was requested for a field that does not /// A phrase query was requested for a field that does not
/// have any positions indexed. /// have any positions indexed.
#[error("The field '{0:?}' does not have positions indexed")] #[fail(display = "The field '{:?}' does not have positions indexed", _0)]
FieldDoesNotHavePositionsIndexed(String), FieldDoesNotHavePositionsIndexed(String),
/// The tokenizer for the given field is unknown /// The tokenizer for the given field is unknown
/// The two argument strings are the name of the field, the name of the tokenizer /// The two argument strings are the name of the field, the name of the tokenizer
#[error("The tokenizer '{0:?}' for the field '{1:?}' is unknown")] #[fail(
display = "The tokenizer '{:?}' for the field '{:?}' is unknown",
_0, _1
)]
UnknownTokenizer(String, String), UnknownTokenizer(String, String),
/// The query contains a range query with a phrase as one of the bounds. /// The query contains a range query with a phrase as one of the bounds.
/// Only terms can be used as bounds. /// Only terms can be used as bounds.
#[error("A range query cannot have a phrase as one of the bounds")] #[fail(display = "A range query cannot have a phrase as one of the bounds")]
RangeMustNotHavePhrase, RangeMustNotHavePhrase,
/// The format for the date field is not RFC 3339 compliant. /// The format for the date field is not RFC 3339 compliant.
#[error("The date field has an invalid format")] #[fail(display = "The date field has an invalid format")]
DateFormatError(chrono::ParseError), DateFormatError(chrono::ParseError),
} }
@@ -361,10 +360,9 @@ impl QueryParser {
let facet = Facet::from_text(phrase); let facet = Facet::from_text(phrase);
Ok(vec![(0, Term::from_field_text(field, facet.encoded_str()))]) Ok(vec![(0, Term::from_field_text(field, facet.encoded_str()))])
} }
FieldType::Bytes(_) => { FieldType::Bytes => {
let bytes = base64::decode(phrase).map_err(QueryParserError::ExpectedBase64)?; let field_name = self.schema.get_field_name(field).to_string();
let term = Term::from_field_bytes(field, &bytes); Err(QueryParserError::FieldNotIndexed(field_name))
Ok(vec![(0, term)])
} }
} }
} }
@@ -557,7 +555,7 @@ fn convert_to_query(logical_ast: LogicalAST) -> Box<dyn Query> {
!occur_subqueries.is_empty(), !occur_subqueries.is_empty(),
"Should not be empty after trimming" "Should not be empty after trimming"
); );
Box::new(BooleanQuery::new(occur_subqueries)) Box::new(BooleanQuery::from(occur_subqueries))
} }
Some(LogicalAST::Leaf(trimmed_logical_literal)) => { Some(LogicalAST::Leaf(trimmed_logical_literal)) => {
convert_literal_to_query(*trimmed_logical_literal) convert_literal_to_query(*trimmed_logical_literal)
@@ -606,8 +604,6 @@ mod test {
schema_builder.add_date_field("date", INDEXED); schema_builder.add_date_field("date", INDEXED);
schema_builder.add_f64_field("float", INDEXED); schema_builder.add_f64_field("float", INDEXED);
schema_builder.add_facet_field("facet"); schema_builder.add_facet_field("facet");
schema_builder.add_bytes_field("bytes", INDEXED);
schema_builder.add_bytes_field("bytes_not_indexed", STORED);
schema_builder.build() schema_builder.build()
} }
@@ -795,37 +791,6 @@ mod test {
); );
} }
#[test]
fn test_parse_bytes() {
test_parse_query_to_logical_ast_helper(
"bytes:YnVidQ==",
"Term(field=12,bytes=[98, 117, 98, 117])",
false,
);
}
#[test]
fn test_parse_bytes_not_indexed() {
let error = parse_query_to_logical_ast("bytes_not_indexed:aaa", false).unwrap_err();
assert!(matches!(error, QueryParserError::FieldNotIndexed(_)));
}
#[test]
fn test_parse_bytes_phrase() {
test_parse_query_to_logical_ast_helper(
"bytes:\"YnVidQ==\"",
"Term(field=12,bytes=[98, 117, 98, 117])",
false,
);
}
#[test]
fn test_parse_bytes_invalid_base64() {
let base64_err: QueryParserError =
parse_query_to_logical_ast("bytes:aa", false).unwrap_err();
assert!(matches!(base64_err, QueryParserError::ExpectedBase64(_)));
}
#[test] #[test]
fn test_parse_query_to_ast_ab_c() { fn test_parse_query_to_ast_ab_c() {
test_parse_query_to_logical_ast_helper( test_parse_query_to_logical_ast_helper(

View File

@@ -9,6 +9,7 @@ use crate::query::{Query, Scorer, Weight};
use crate::schema::Type; use crate::schema::Type;
use crate::schema::{Field, IndexRecordOption, Term}; use crate::schema::{Field, IndexRecordOption, Term};
use crate::termdict::{TermDictionary, TermStreamer}; use crate::termdict::{TermDictionary, TermStreamer};
use crate::Result;
use crate::{DocId, Score}; use crate::{DocId, Score};
use std::collections::Bound; use std::collections::Bound;
use std::ops::Range; use std::ops::Range;
@@ -47,7 +48,7 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// ///
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?; /// let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
/// for year in 1950u64..2017u64 { /// for year in 1950u64..2017u64 {
/// let num_docs_within_year = 10 + (year - 1950) * (year - 1950); /// let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
/// for _ in 0..num_docs_within_year { /// for _ in 0..num_docs_within_year {
@@ -245,11 +246,7 @@ impl RangeQuery {
} }
impl Query for RangeQuery { impl Query for RangeQuery {
fn weight( fn weight(&self, searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<dyn Weight>> {
&self,
searcher: &Searcher,
_scoring_enabled: bool,
) -> crate::Result<Box<dyn Weight>> {
let schema = searcher.schema(); let schema = searcher.schema();
let value_type = schema.get_field_entry(self.field).field_type().value_type(); let value_type = schema.get_field_entry(self.field).field_type().value_type();
if value_type != self.value_type { if value_type != self.value_type {
@@ -292,17 +289,17 @@ impl RangeWeight {
} }
impl Weight for RangeWeight { impl Weight for RangeWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: Score) -> Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc(); let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc); let mut doc_bitset = BitSet::with_max_value(max_doc);
let inverted_index = reader.inverted_index(self.field)?; let inverted_index = reader.inverted_index(self.field);
let term_dict = inverted_index.terms(); let term_dict = inverted_index.terms();
let mut term_range = self.term_range(term_dict); let mut term_range = self.term_range(term_dict);
while term_range.advance() { while term_range.advance() {
let term_info = term_range.value(); let term_info = term_range.value();
let mut block_segment_postings = inverted_index let mut block_segment_postings = inverted_index
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?; .read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
loop { loop {
let docs = block_segment_postings.docs(); let docs = block_segment_postings.docs();
if docs.is_empty() { if docs.is_empty() {
@@ -318,7 +315,7 @@ impl Weight for RangeWeight {
Ok(Box::new(ConstScorer::new(doc_bitset, boost))) Ok(Box::new(ConstScorer::new(doc_bitset, boost)))
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?; let mut scorer = self.scorer(reader, 1.0)?;
if scorer.seek(doc) != doc { if scorer.seek(doc) != doc {
return Err(does_not_match(doc)); return Err(does_not_match(doc));
@@ -345,7 +342,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
for year in 1950u64..2017u64 { for year in 1950u64..2017u64 {
let num_docs_within_year = 10 + (year - 1950) * (year - 1950); let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
for _ in 0..num_docs_within_year { for _ in 0..num_docs_within_year {
@@ -488,7 +485,7 @@ mod tests {
schema_builder.add_i64_field("year", INDEXED); schema_builder.add_i64_field("year", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
let title = schema.get_field("title").unwrap(); let title = schema.get_field("title").unwrap();
let year = schema.get_field("year").unwrap(); let year = schema.get_field("year").unwrap();
index_writer.add_document(doc!( index_writer.add_document(doc!(

View File

@@ -103,7 +103,7 @@ mod test {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
index_writer.add_document(doc!( index_writer.add_document(doc!(
country_field => "japan", country_field => "japan",
)); ));

View File

@@ -9,12 +9,12 @@ pub use self::term_weight::TermWeight;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::assert_nearly_equals;
use crate::collector::TopDocs; use crate::collector::TopDocs;
use crate::docset::DocSet; use crate::docset::DocSet;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE; use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::query::{Query, QueryParser, Scorer, TermQuery}; use crate::query::{Query, QueryParser, Scorer, TermQuery};
use crate::schema::{Field, IndexRecordOption, Schema, STRING, TEXT}; use crate::schema::{Field, IndexRecordOption, Schema, STRING, TEXT};
use crate::{assert_nearly_equals, DocAddress};
use crate::{Index, Term, TERMINATED}; use crate::{Index, Term, TERMINATED};
#[test] #[test]
@@ -25,7 +25,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let doc = doc!(text_field => "a"); let doc = doc!(text_field => "a");
index_writer.add_document(doc); index_writer.add_document(doc);
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
@@ -50,7 +50,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
for _ in 0..COMPRESSION_BLOCK_SIZE { for _ in 0..COMPRESSION_BLOCK_SIZE {
let doc = doc!(text_field => "a"); let doc = doc!(text_field => "a");
index_writer.add_document(doc); index_writer.add_document(doc);
@@ -86,7 +86,7 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
index_writer.add_document(doc!( index_writer.add_document(doc!(
left_field => "left1 left2 left2 left2f2 left2f2 left3 abcde abcde abcde abcde abcde abcde abcde abcde abcde abcewde abcde abcde", left_field => "left1 left2 left2 left2f2 left2f2 left3 abcde abcde abcde abcde abcde abcde abcde abcde abcde abcewde abcde abcde",
right_field => "right1 right2", right_field => "right1 right2",
@@ -136,7 +136,7 @@ mod tests {
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 5_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b")); index_writer.add_document(doc!(text_field=>"a b"));
index_writer.add_document(doc!(text_field=>"a c")); index_writer.add_document(doc!(text_field=>"a c"));
index_writer.delete_term(Term::from_field_text(text_field, "b")); index_writer.delete_term(Term::from_field_text(text_field, "b"));
@@ -153,7 +153,7 @@ mod tests {
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a")); index_writer.add_document(doc!(text_field=>"a"));
index_writer.add_document(doc!(text_field=>"a")); index_writer.add_document(doc!(text_field=>"a"));
index_writer.commit()?; index_writer.commit()?;
@@ -179,40 +179,4 @@ mod tests {
"TermQuery(Term(field=1,bytes=[104, 101, 108, 108, 111]))" "TermQuery(Term(field=1,bytes=[104, 101, 108, 108, 111]))"
); );
} }
#[test]
fn test_term_query_explain() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
index_writer.add_document(doc!(text_field=>"b"));
index_writer.add_document(doc!(text_field=>"a"));
index_writer.add_document(doc!(text_field=>"a"));
index_writer.add_document(doc!(text_field=>"b"));
index_writer.commit()?;
let term_a = Term::from_field_text(text_field, "a");
let term_query = TermQuery::new(term_a, IndexRecordOption::Basic);
let searcher = index.reader()?.searcher();
{
let explanation = term_query.explain(&searcher, DocAddress(0u32, 1u32))?;
assert_nearly_equals!(explanation.value(), 0.6931472f32);
}
{
let explanation_err = term_query.explain(&searcher, DocAddress(0u32, 0u32));
assert!(matches!(
explanation_err,
Err(crate::TantivyError::InvalidArgument(_msg))
));
}
{
let explanation_err = term_query.explain(&searcher, DocAddress(0u32, 3u32));
assert!(matches!(
explanation_err,
Err(crate::TantivyError::InvalidArgument(_msg))
));
}
Ok(())
}
} }

View File

@@ -87,43 +87,21 @@ impl TermQuery {
/// While `.weight(...)` returns a boxed trait object, /// While `.weight(...)` returns a boxed trait object,
/// this method return a specific implementation. /// this method return a specific implementation.
/// This is useful for optimization purpose. /// This is useful for optimization purpose.
pub fn specialized_weight( pub fn specialized_weight(&self, searcher: &Searcher, scoring_enabled: bool) -> TermWeight {
&self,
searcher: &Searcher,
scoring_enabled: bool,
) -> crate::Result<TermWeight> {
let field_entry = searcher
.schema()
.get_field_entry(self.term.field());
if !field_entry.is_indexed() {
let error_msg = format!("Field {:?} is not indexed.", field_entry.name());
return Err(crate::TantivyError::SchemaError(error_msg));
}
let has_fieldnorms = searcher
.schema()
.get_field_entry(self.term.field())
.has_fieldnorms();
let term = self.term.clone(); let term = self.term.clone();
let bm25_weight = BM25Weight::for_terms(searcher, &[term])?; let bm25_weight = BM25Weight::for_terms(searcher, &[term]);
let index_record_option = if scoring_enabled { let index_record_option = if scoring_enabled {
self.index_record_option self.index_record_option
} else { } else {
IndexRecordOption::Basic IndexRecordOption::Basic
}; };
Ok(TermWeight::new( TermWeight::new(self.term.clone(), index_record_option, bm25_weight)
self.term.clone(),
index_record_option,
bm25_weight,
has_fieldnorms,
))
} }
} }
impl Query for TermQuery { impl Query for TermQuery {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> { fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
Ok(Box::new( Ok(Box::new(self.specialized_weight(searcher, scoring_enabled)))
self.specialized_weight(searcher, scoring_enabled)?,
))
} }
fn query_terms(&self, term_set: &mut BTreeSet<Term>) { fn query_terms(&self, term_set: &mut BTreeSet<Term>) {
term_set.insert(self.term.clone()); term_set.insert(self.term.clone());

View File

@@ -253,7 +253,7 @@ mod tests {
} }
fn test_block_wand_aux(term_query: &TermQuery, searcher: &Searcher) -> crate::Result<()> { fn test_block_wand_aux(term_query: &TermQuery, searcher: &Searcher) -> crate::Result<()> {
let term_weight = term_query.specialized_weight(&searcher, true)?; let term_weight = term_query.specialized_weight(&searcher, true);
for reader in searcher.segment_readers() { for reader in searcher.segment_readers() {
let mut block_max_scores = vec![]; let mut block_max_scores = vec![];
let mut block_max_scores_b = vec![]; let mut block_max_scores_b = vec![];

View File

@@ -1,14 +1,14 @@
use super::term_scorer::TermScorer; use super::term_scorer::TermScorer;
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::docset::DocSet; use crate::docset::DocSet;
use crate::fieldnorm::FieldNormReader;
use crate::postings::SegmentPostings; use crate::postings::SegmentPostings;
use crate::query::bm25::BM25Weight; use crate::query::bm25::BM25Weight;
use crate::query::explanation::does_not_match; use crate::query::explanation::does_not_match;
use crate::query::weight::for_each_scorer; use crate::query::weight::{for_each_pruning_scorer, for_each_scorer};
use crate::query::Weight; use crate::query::Weight;
use crate::query::{Explanation, Scorer}; use crate::query::{Explanation, Scorer};
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::Result;
use crate::Term; use crate::Term;
use crate::{DocId, Score}; use crate::{DocId, Score};
@@ -16,37 +16,32 @@ pub struct TermWeight {
term: Term, term: Term,
index_record_option: IndexRecordOption, index_record_option: IndexRecordOption,
similarity_weight: BM25Weight, similarity_weight: BM25Weight,
has_fieldnorms: bool,
} }
impl Weight for TermWeight { impl Weight for TermWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: Score) -> Result<Box<dyn Scorer>> {
let term_scorer = self.specialized_scorer(reader, boost)?; let term_scorer = self.specialized_scorer(reader, boost)?;
Ok(Box::new(term_scorer)) Ok(Box::new(term_scorer))
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.specialized_scorer(reader, 1.0)?; let mut scorer = self.specialized_scorer(reader, 1.0)?;
if scorer.doc() > doc || scorer.seek(doc) != doc { if scorer.seek(doc) != doc {
return Err(does_not_match(doc)); return Err(does_not_match(doc));
} }
let mut explanation = scorer.explain(); Ok(scorer.explain())
explanation.add_context(format!(
"Term ={:?}:{:?}",
self.term.field(),
self.term.value_bytes()
));
Ok(explanation)
} }
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> { fn count(&self, reader: &SegmentReader) -> Result<u32> {
if let Some(delete_bitset) = reader.delete_bitset() { if let Some(delete_bitset) = reader.delete_bitset() {
Ok(self.scorer(reader, 1.0)?.count(delete_bitset)) Ok(self.scorer(reader, 1.0)?.count(delete_bitset))
} else { } else {
let field = self.term.field(); let field = self.term.field();
let inv_index = reader.inverted_index(field)?; Ok(reader
let term_info = inv_index.get_term_info(&self.term); .inverted_index(field)
Ok(term_info.map(|term_info| term_info.doc_freq).unwrap_or(0)) .get_term_info(&self.term)
.map(|term_info| term_info.doc_freq)
.unwrap_or(0))
} }
} }
@@ -78,8 +73,8 @@ impl Weight for TermWeight {
reader: &SegmentReader, reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score, callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> { ) -> crate::Result<()> {
let scorer = self.specialized_scorer(reader, 1.0)?; let mut scorer = self.scorer(reader, 1.0)?;
crate::query::boolean_query::block_wand(vec![scorer], threshold, callback); for_each_pruning_scorer(&mut scorer, threshold, callback);
Ok(()) Ok(())
} }
} }
@@ -89,13 +84,11 @@ impl TermWeight {
term: Term, term: Term,
index_record_option: IndexRecordOption, index_record_option: IndexRecordOption,
similarity_weight: BM25Weight, similarity_weight: BM25Weight,
has_fieldnorms: bool,
) -> TermWeight { ) -> TermWeight {
TermWeight { TermWeight {
term, term,
index_record_option, index_record_option,
similarity_weight, similarity_weight,
has_fieldnorms,
} }
} }
@@ -103,17 +96,13 @@ impl TermWeight {
&self, &self,
reader: &SegmentReader, reader: &SegmentReader,
boost: Score, boost: Score,
) -> crate::Result<TermScorer> { ) -> Result<TermScorer> {
let field = self.term.field(); let field = self.term.field();
let inverted_index = reader.inverted_index(field)?; let inverted_index = reader.inverted_index(field);
let fieldnorm_reader = if self.has_fieldnorms { let fieldnorm_reader = reader.get_fieldnorms_reader(field);
reader.get_fieldnorms_reader(field)?
} else {
FieldNormReader::const_fieldnorm_id(1u8, reader.num_docs())
};
let similarity_weight = self.similarity_weight.boost_by(boost); let similarity_weight = self.similarity_weight.boost_by(boost);
let postings_opt: Option<SegmentPostings> = let postings_opt: Option<SegmentPostings> =
inverted_index.read_postings(&self.term, self.index_record_option)?; inverted_index.read_postings(&self.term, self.index_record_option);
if let Some(segment_postings) = postings_opt { if let Some(segment_postings) = postings_opt {
Ok(TermScorer::new( Ok(TermScorer::new(
segment_postings, segment_postings,

View File

@@ -398,9 +398,9 @@ mod bench {
use crate::query::score_combiner::DoNothingCombiner; use crate::query::score_combiner::DoNothingCombiner;
use crate::query::{ConstScorer, Union, VecDocSet}; use crate::query::{ConstScorer, Union, VecDocSet};
use crate::tests;
use crate::DocId; use crate::DocId;
use crate::DocSet; use crate::DocSet;
use crate::{tests, TERMINATED};
use test::Bencher; use test::Bencher;
#[bench] #[bench]
@@ -414,12 +414,10 @@ mod bench {
union_docset union_docset
.iter() .iter()
.map(|doc_ids| VecDocSet::from(doc_ids.clone())) .map(|doc_ids| VecDocSet::from(doc_ids.clone()))
.map(|docset| ConstScorer::new(docset, 1.0)) .map(ConstScorer::new)
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
); );
while v.doc() != TERMINATED { while v.advance() {}
v.advance();
}
}); });
} }
#[bench] #[bench]
@@ -434,12 +432,10 @@ mod bench {
union_docset union_docset
.iter() .iter()
.map(|doc_ids| VecDocSet::from(doc_ids.clone())) .map(|doc_ids| VecDocSet::from(doc_ids.clone()))
.map(|docset| ConstScorer::new(docset, 1.0)) .map(ConstScorer::new)
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
); );
while v.doc() != TERMINATED { while v.advance() {}
v.advance();
}
}); });
} }
} }

View File

@@ -9,8 +9,8 @@ use crate::directory::META_LOCK;
use crate::Index; use crate::Index;
use crate::Searcher; use crate::Searcher;
use crate::SegmentReader; use crate::SegmentReader;
use std::convert::TryInto;
use std::sync::Arc; use std::sync::Arc;
use std::{convert::TryInto, io};
/// Defines when a new version of the index should be reloaded. /// Defines when a new version of the index should be reloaded.
/// ///
@@ -138,11 +138,9 @@ impl InnerIndexReader {
.collect::<crate::Result<_>>()? .collect::<crate::Result<_>>()?
}; };
let schema = self.index.schema(); let schema = self.index.schema();
let searchers: Vec<Searcher> = std::iter::repeat_with(|| { let searchers = (0..self.num_searchers)
Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone()) .map(|_| Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone()))
}) .collect();
.take(self.num_searchers)
.collect::<io::Result<_>>()?;
self.searcher_pool.publish_new_generation(searchers); self.searcher_pool.publish_new_generation(searchers);
Ok(()) Ok(())
} }

View File

@@ -1,5 +1,5 @@
use crossbeam::channel::unbounded; use crossbeam::crossbeam_channel::unbounded;
use crossbeam::channel::{Receiver, RecvError, Sender}; use crossbeam::{Receiver, RecvError, Sender};
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
@@ -186,7 +186,6 @@ mod tests {
use super::Pool; use super::Pool;
use super::Queue; use super::Queue;
use crossbeam::channel;
use std::{iter, mem}; use std::{iter, mem};
#[test] #[test]
@@ -231,11 +230,11 @@ mod tests {
let mut threads = vec![]; let mut threads = vec![];
// spawn one more thread than there are elements in the pool // spawn one more thread than there are elements in the pool
let (start_1_send, start_1_recv) = channel::bounded(0); let (start_1_send, start_1_recv) = crossbeam::bounded(0);
let (start_2_send, start_2_recv) = channel::bounded(0); let (start_2_send, start_2_recv) = crossbeam::bounded(0);
let (start_3_send, start_3_recv) = channel::bounded(0); let (start_3_send, start_3_recv) = crossbeam::bounded(0);
let (event_send1, event_recv) = channel::unbounded(); let (event_send1, event_recv) = crossbeam::unbounded();
let event_send2 = event_send1.clone(); let event_send2 = event_send1.clone();
let event_send3 = event_send1.clone(); let event_send3 = event_send1.clone();

View File

@@ -1,164 +0,0 @@
use serde::{Deserialize, Serialize};
use std::ops::BitOr;
use super::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag};
/// Define how an a bytes field should be handled by tantivy.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct BytesOptions {
indexed: bool,
fast: bool,
stored: bool,
}
impl BytesOptions {
/// Returns true iff the value is indexed.
pub fn is_indexed(&self) -> bool {
self.indexed
}
/// Returns true iff the value is a fast field.
pub fn is_fast(&self) -> bool {
self.fast
}
/// Returns true iff the value is stored.
pub fn is_stored(&self) -> bool {
self.stored
}
/// Set the field as indexed.
///
/// Setting an integer as indexed will generate
/// a posting list for each value taken by the integer.
pub fn set_indexed(mut self) -> BytesOptions {
self.indexed = true;
self
}
/// Set the field as a single-valued fast field.
///
/// Fast fields are designed for random access.
/// Access time are similar to a random lookup in an array.
/// If more than one value is associated to a fast field, only the last one is
/// kept.
pub fn set_fast(mut self) -> BytesOptions {
self.fast = true;
self
}
/// Set the field as stored.
///
/// Only the fields that are set as *stored* are
/// persisted into the Tantivy's store.
pub fn set_stored(mut self) -> BytesOptions {
self.stored = true;
self
}
}
impl Default for BytesOptions {
fn default() -> BytesOptions {
BytesOptions {
indexed: false,
fast: false,
stored: false,
}
}
}
impl<T: Into<BytesOptions>> BitOr<T> for BytesOptions {
type Output = BytesOptions;
fn bitor(self, other: T) -> BytesOptions {
let other = other.into();
BytesOptions {
indexed: self.indexed | other.indexed,
stored: self.stored | other.stored,
fast: self.fast | other.fast,
}
}
}
impl From<()> for BytesOptions {
fn from(_: ()) -> Self {
Self::default()
}
}
impl From<FastFlag> for BytesOptions {
fn from(_: FastFlag) -> Self {
BytesOptions {
indexed: false,
stored: false,
fast: true,
}
}
}
impl From<StoredFlag> for BytesOptions {
fn from(_: StoredFlag) -> Self {
BytesOptions {
indexed: false,
stored: true,
fast: false,
}
}
}
impl From<IndexedFlag> for BytesOptions {
fn from(_: IndexedFlag) -> Self {
BytesOptions {
indexed: true,
stored: false,
fast: false,
}
}
}
impl<Head, Tail> From<SchemaFlagList<Head, Tail>> for BytesOptions
where
Head: Clone,
Tail: Clone,
Self: BitOr<Output = Self> + From<Head> + From<Tail>,
{
fn from(head_tail: SchemaFlagList<Head, Tail>) -> Self {
Self::from(head_tail.head) | Self::from(head_tail.tail)
}
}
#[cfg(test)]
mod tests {
use crate::schema::{BytesOptions, FAST, INDEXED, STORED};
#[test]
fn test_bytes_option_fast_flag() {
assert_eq!(BytesOptions::default().set_fast(), FAST.into());
assert_eq!(BytesOptions::default().set_indexed(), INDEXED.into());
assert_eq!(BytesOptions::default().set_stored(), STORED.into());
}
#[test]
fn test_bytes_option_fast_flag_composition() {
assert_eq!(
BytesOptions::default().set_fast().set_stored(),
(FAST | STORED).into()
);
assert_eq!(
BytesOptions::default().set_indexed().set_fast(),
(INDEXED | FAST).into()
);
assert_eq!(
BytesOptions::default().set_stored().set_indexed(),
(STORED | INDEXED).into()
);
}
#[test]
fn test_bytes_option_fast_() {
assert!(!BytesOptions::default().is_stored());
assert!(!BytesOptions::default().is_fast());
assert!(!BytesOptions::default().is_indexed());
assert!(BytesOptions::default().set_stored().is_stored());
assert!(BytesOptions::default().set_fast().is_fast());
assert!(BytesOptions::default().set_indexed().is_indexed());
}
}

View File

@@ -74,8 +74,9 @@ impl Document {
} }
/// Add a text field. /// Add a text field.
pub fn add_text<S: ToString>(&mut self, field: Field, text: S) { pub fn add_text(&mut self, field: Field, text: &str) {
self.add(FieldValue::new(field, Value::Str(text.to_string()))); let value = Value::Str(String::from(text));
self.add(FieldValue::new(field, value));
} }
/// Add a pre-tokenized text field. /// Add a pre-tokenized text field.
@@ -109,8 +110,8 @@ impl Document {
} }
/// Add a bytes field /// Add a bytes field
pub fn add_bytes<T: Into<Vec<u8>>>(&mut self, field: Field, value: T) { pub fn add_bytes(&mut self, field: Field, value: Vec<u8>) {
self.add(FieldValue::new(field, Value::Bytes(value.into()))) self.add(FieldValue::new(field, Value::Bytes(value)))
} }
/// Add a field value /// Add a field value
@@ -161,16 +162,20 @@ impl Document {
} }
/// Returns all of the `FieldValue`s associated the given field /// Returns all of the `FieldValue`s associated the given field
pub fn get_all(&self, field: Field) -> impl Iterator<Item = &Value> { pub fn get_all(&self, field: Field) -> Vec<&Value> {
self.field_values self.field_values
.iter() .iter()
.filter(move |field_value| field_value.field() == field) .filter(|field_value| field_value.field() == field)
.map(FieldValue::value) .map(FieldValue::value)
.collect()
} }
/// Returns the first `FieldValue` associated the given field /// Returns the first `FieldValue` associated the given field
pub fn get_first(&self, field: Field) -> Option<&Value> { pub fn get_first(&self, field: Field) -> Option<&Value> {
self.get_all(field).next() self.field_values
.iter()
.find(|field_value| field_value.field() == field)
.map(FieldValue::value)
} }
/// Prepares Document for being stored in the document store /// Prepares Document for being stored in the document store

Some files were not shown because too many files have changed in this diff Show More