Compare commits

..

5 Commits

Author SHA1 Message Date
Pascal Seitz
a88e659e02 make convert_to_fast_value_and_append_to_json_term pub 2024-07-11 08:54:01 +08:00
Pascal Seitz
dd2c4a8963 clippy 2024-04-22 09:56:49 +08:00
Pascal Seitz
786781d0fc cleanup 2024-04-22 09:44:41 +08:00
Pascal Seitz
2d7483e3d4 add JsonTermSerializer 2024-04-20 18:56:27 +08:00
Pascal Seitz
87b9f0678c split term and indexing term 2024-04-18 23:38:21 +08:00
79 changed files with 1609 additions and 2349 deletions

View File

@@ -15,11 +15,11 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Install Rust
run: rustup toolchain install nightly-2024-04-10 --profile minimal --component llvm-tools-preview
run: rustup toolchain install nightly-2023-09-10 --profile minimal --component llvm-tools-preview
- uses: Swatinem/rust-cache@v2
- uses: taiki-e/install-action@cargo-llvm-cov
- name: Generate code coverage
run: cargo +nightly-2024-04-10 llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
run: cargo +nightly-2023-09-10 llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
continue-on-error: true

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy"
version = "0.23.0"
version = "0.22.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
@@ -15,16 +15,12 @@ rust-version = "1.63"
exclude = ["benches/*.json", "benches/*.txt"]
[dependencies]
# Switch back to the non-forked oneshot crate once https://github.com/faern/oneshot/pull/35 is merged
oneshot = { git = "https://github.com/fulmicoton/oneshot.git", rev = "b208f49" }
oneshot = "0.1.5"
base64 = "0.22.0"
byteorder = "1.4.3"
crc32fast = "1.3.2"
once_cell = "1.10.0"
regex = { version = "1.5.5", default-features = false, features = [
"std",
"unicode",
] }
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
aho-corasick = "1.0"
tantivy-fst = "0.5"
memmap2 = { version = "0.9.0", optional = true }
@@ -34,15 +30,14 @@ tempfile = { version = "3.3.0", optional = true }
log = "0.4.16"
serde = { version = "1.0.136", features = ["derive"] }
serde_json = "1.0.79"
num_cpus = "1.13.1"
fs4 = { version = "0.8.0", optional = true }
levenshtein_automata = "0.2.1"
uuid = { version = "1.0.0", features = ["v4", "serde"] }
crossbeam-channel = "0.5.4"
rust-stemmers = "1.2.0"
downcast-rs = "1.2.0"
bitpacking = { version = "0.9.2", default-features = false, features = [
"bitpacker4x",
] }
bitpacking = { version = "0.9.2", default-features = false, features = ["bitpacker4x"] }
census = "0.4.2"
rustc-hash = "1.1.0"
thiserror = "1.0.30"
@@ -53,17 +48,17 @@ smallvec = "1.8.0"
rayon = "1.5.2"
lru = "0.12.0"
fastdivide = "0.4.0"
itertools = "0.13.0"
itertools = "0.12.0"
measure_time = "0.8.2"
arc-swap = "1.5.0"
columnar = { version = "0.3", path = "./columnar", package = "tantivy-columnar" }
sstable = { version = "0.3", path = "./sstable", package = "tantivy-sstable", optional = true }
stacker = { version = "0.3", path = "./stacker", package = "tantivy-stacker" }
query-grammar = { version = "0.22.0", path = "./query-grammar", package = "tantivy-query-grammar" }
tantivy-bitpacker = { version = "0.6", path = "./bitpacker" }
common = { version = "0.7", path = "./common/", package = "tantivy-common" }
tokenizer-api = { version = "0.3", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
columnar = { version= "0.3", path="./columnar", package ="tantivy-columnar" }
sstable = { version= "0.3", path="./sstable", package ="tantivy-sstable", optional = true }
stacker = { version= "0.3", path="./stacker", package ="tantivy-stacker" }
query-grammar = { version= "0.22.0", path="./query-grammar", package = "tantivy-query-grammar" }
tantivy-bitpacker = { version= "0.6", path="./bitpacker" }
common = { version= "0.7", path = "./common/", package = "tantivy-common" }
tokenizer-api = { version= "0.3", path="./tokenizer-api", package="tantivy-tokenizer-api" }
sketches-ddsketch = { version = "0.2.1", features = ["use_serde"] }
futures-util = { version = "0.3.28", optional = true }
fnv = "1.0.7"
@@ -72,7 +67,6 @@ fnv = "1.0.7"
winapi = "0.3.9"
[dev-dependencies]
binggan = "0.8.0"
rand = "0.8.5"
maplit = "1.0.2"
matches = "0.1.9"
@@ -118,26 +112,17 @@ lz4-compression = ["lz4_flex"]
zstd-compression = ["zstd"]
failpoints = ["fail", "fail/failpoints"]
unstable = [] # useful for benches.
unstable = [] # useful for benches.
quickwit = ["sstable", "futures-util"]
# Compares only the hash of a string when indexing data.
# Compares only the hash of a string when indexing data.
# Increases indexing speed, but may lead to extremely rare missing terms, when there's a hash collision.
# Uses 64bit ahash.
compare_hash_only = ["stacker/compare_hash_only"]
[workspace]
members = [
"query-grammar",
"bitpacker",
"common",
"ownedbytes",
"stacker",
"sstable",
"tokenizer-api",
"columnar",
]
members = ["query-grammar", "bitpacker", "common", "ownedbytes", "stacker", "sstable", "tokenizer-api", "columnar"]
# Following the "fail" crate best practises, we isolate
# tests that define specific behavior in fail check points
@@ -158,7 +143,3 @@ harness = false
[[bench]]
name = "index-bench"
harness = false
[[bench]]
name = "agg_bench"
harness = false

View File

@@ -1,413 +0,0 @@
use binggan::{black_box, InputGroup, PeakMemAlloc, INSTRUMENTED_SYSTEM};
use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use rand_distr::Distribution;
use serde_json::json;
use tantivy::aggregation::agg_req::Aggregations;
use tantivy::aggregation::AggregationCollector;
use tantivy::query::{AllQuery, TermQuery};
use tantivy::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
use tantivy::{doc, Index, Term};
#[global_allocator]
pub static GLOBAL: &PeakMemAlloc<std::alloc::System> = &INSTRUMENTED_SYSTEM;
/// Mini macro to register a function via its name
/// runner.register("average_u64", move |index| average_u64(index));
macro_rules! register {
($runner:expr, $func:ident) => {
$runner.register(stringify!($func), move |index| $func(index))
};
}
fn main() {
let inputs = vec![
("full", get_test_index_bench(Cardinality::Full).unwrap()),
(
"dense",
get_test_index_bench(Cardinality::OptionalDense).unwrap(),
),
(
"sparse",
get_test_index_bench(Cardinality::OptionalSparse).unwrap(),
),
(
"multivalue",
get_test_index_bench(Cardinality::Multivalued).unwrap(),
),
];
bench_agg(InputGroup::new_with_inputs(inputs));
}
fn bench_agg(mut group: InputGroup<Index>) {
group.set_alloc(GLOBAL); // Set the peak mem allocator. This will enable peak memory reporting.
register!(group, average_u64);
register!(group, average_f64);
register!(group, average_f64_u64);
register!(group, stats_f64);
register!(group, percentiles_f64);
register!(group, terms_few);
register!(group, terms_many);
register!(group, terms_many_order_by_term);
register!(group, terms_many_with_top_hits);
register!(group, terms_many_with_avg_sub_agg);
register!(group, terms_many_json_mixed_type_with_sub_agg_card);
register!(group, range_agg);
register!(group, range_agg_with_avg_sub_agg);
register!(group, range_agg_with_term_agg_few);
register!(group, range_agg_with_term_agg_many);
register!(group, histogram);
register!(group, histogram_hard_bounds);
register!(group, histogram_with_avg_sub_agg);
register!(group, avg_and_range_with_avg_sub_agg);
group.run();
}
fn exec_term_with_agg(index: &Index, agg_req: serde_json::Value) {
let agg_req: Aggregations = serde_json::from_value(agg_req).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let collector = get_collector(agg_req);
let searcher = reader.searcher();
black_box(searcher.search(&term_query, &collector).unwrap());
}
fn average_u64(index: &Index) {
let agg_req = json!({
"average": { "avg": { "field": "score", } }
});
exec_term_with_agg(index, agg_req)
}
fn average_f64(index: &Index) {
let agg_req = json!({
"average": { "avg": { "field": "score_f64", } }
});
exec_term_with_agg(index, agg_req)
}
fn average_f64_u64(index: &Index) {
let agg_req = json!({
"average_f64": { "avg": { "field": "score_f64" } },
"average": { "avg": { "field": "score" } },
});
exec_term_with_agg(index, agg_req)
}
fn stats_f64(index: &Index) {
let agg_req = json!({
"average_f64": { "stats": { "field": "score_f64", } }
});
exec_term_with_agg(index, agg_req)
}
fn percentiles_f64(index: &Index) {
let agg_req = json!({
"mypercentiles": {
"percentiles": {
"field": "score_f64",
"percents": [ 95, 99, 99.9 ]
}
}
});
execute_agg(index, agg_req);
}
fn terms_few(index: &Index) {
let agg_req = json!({
"my_texts": { "terms": { "field": "text_few_terms" } },
});
execute_agg(index, agg_req);
}
fn terms_many(index: &Index) {
let agg_req = json!({
"my_texts": { "terms": { "field": "text_many_terms" } },
});
execute_agg(index, agg_req);
}
fn terms_many_order_by_term(index: &Index) {
let agg_req = json!({
"my_texts": { "terms": { "field": "text_many_terms", "order": { "_key": "desc" } } },
});
execute_agg(index, agg_req);
}
fn terms_many_with_top_hits(index: &Index) {
let agg_req = json!({
"my_texts": {
"terms": { "field": "text_many_terms" },
"aggs": {
"top_hits": { "top_hits":
{
"sort": [
{ "score": "desc" }
],
"size": 2,
"doc_value_fields": ["score_f64"]
}
}
}
},
});
execute_agg(index, agg_req);
}
fn terms_many_with_avg_sub_agg(index: &Index) {
let agg_req = json!({
"my_texts": {
"terms": { "field": "text_many_terms" },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
});
execute_agg(index, agg_req);
}
fn terms_many_json_mixed_type_with_sub_agg_card(index: &Index) {
let agg_req = json!({
"my_texts": {
"terms": { "field": "json.mixed_type" },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
});
execute_agg(index, agg_req);
}
fn execute_agg(index: &Index, agg_req: serde_json::Value) {
let agg_req: Aggregations = serde_json::from_value(agg_req).unwrap();
let collector = get_collector(agg_req);
let reader = index.reader().unwrap();
let searcher = reader.searcher();
black_box(searcher.search(&AllQuery, &collector).unwrap());
}
fn range_agg(index: &Index) {
let agg_req = json!({
"range_f64": { "range": { "field": "score_f64", "ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
] } },
});
execute_agg(index, agg_req);
}
fn range_agg_with_avg_sub_agg(index: &Index) {
let agg_req = json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
]
},
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
});
execute_agg(index, agg_req);
}
fn range_agg_with_term_agg_few(index: &Index) {
let agg_req = json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
]
},
"aggs": {
"my_texts": { "terms": { "field": "text_few_terms" } },
}
},
});
execute_agg(index, agg_req);
}
fn range_agg_with_term_agg_many(index: &Index) {
let agg_req = json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
]
},
"aggs": {
"my_texts": { "terms": { "field": "text_many_terms" } },
}
},
});
execute_agg(index, agg_req);
}
fn histogram(index: &Index) {
let agg_req = json!({
"rangef64": {
"histogram": {
"field": "score_f64",
"interval": 100 // 1000 buckets
},
}
});
execute_agg(index, agg_req);
}
fn histogram_hard_bounds(index: &Index) {
let agg_req = json!({
"rangef64": { "histogram": { "field": "score_f64", "interval": 100, "hard_bounds": { "min": 1000, "max": 300000 } } },
});
execute_agg(index, agg_req);
}
fn histogram_with_avg_sub_agg(index: &Index) {
let agg_req = json!({
"rangef64": {
"histogram": { "field": "score_f64", "interval": 100 },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
}
});
execute_agg(index, agg_req);
}
fn avg_and_range_with_avg_sub_agg(index: &Index) {
let agg_req = json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 60000 }
]
},
"aggs": {
"average_in_range": { "avg": { "field": "score" } }
}
},
"average": { "avg": { "field": "score" } }
});
execute_agg(index, agg_req);
}
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum Cardinality {
/// All documents contain exactly one value.
/// `Full` is the default for auto-detecting the Cardinality, since it is the most strict.
#[default]
Full = 0,
/// All documents contain at most one value.
OptionalDense = 1,
/// All documents may contain any number of values.
Multivalued = 2,
/// 1 / 20 documents has a value
OptionalSparse = 3,
}
fn get_collector(agg_req: Aggregations) -> AggregationCollector {
AggregationCollector::from_aggs(agg_req, Default::default())
}
fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
let mut schema_builder = Schema::builder();
let text_fieldtype = tantivy::schema::TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
)
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype);
let json_field = schema_builder.add_json_field("json", FAST);
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST);
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST);
let score_fieldtype = tantivy::schema::NumericOptions::default().set_fast();
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
let index = Index::create_from_tempdir(schema_builder.build())?;
let few_terms_data = ["INFO", "ERROR", "WARN", "DEBUG"];
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
let many_terms_data = (0..150_000)
.map(|num| format!("author{num}"))
.collect::<Vec<_>>();
{
let mut rng = StdRng::from_seed([1u8; 32]);
let mut index_writer = index.writer_with_num_threads(1, 200_000_000)?;
// To make the different test cases comparable we just change one doc to force the
// cardinality
if cardinality == Cardinality::OptionalDense {
index_writer.add_document(doc!())?;
}
if cardinality == Cardinality::Multivalued {
index_writer.add_document(doc!(
json_field => json!({"mixed_type": 10.0}),
json_field => json!({"mixed_type": 10.0}),
text_field => "cool",
text_field => "cool",
text_field_many_terms => "cool",
text_field_many_terms => "cool",
text_field_few_terms => "cool",
text_field_few_terms => "cool",
score_field => 1u64,
score_field => 1u64,
score_field_f64 => lg_norm.sample(&mut rng),
score_field_f64 => lg_norm.sample(&mut rng),
score_field_i64 => 1i64,
score_field_i64 => 1i64,
))?;
}
let mut doc_with_value = 1_000_000;
if cardinality == Cardinality::OptionalSparse {
doc_with_value /= 20;
}
let _val_max = 1_000_000.0;
for _ in 0..doc_with_value {
let val: f64 = rng.gen_range(0.0..1_000_000.0);
let json = if rng.gen_bool(0.1) {
// 10% are numeric values
json!({ "mixed_type": val })
} else {
json!({"mixed_type": many_terms_data.choose(&mut rng).unwrap().to_string()})
};
index_writer.add_document(doc!(
text_field => "cool",
json_field => json,
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
score_field => val as u64,
score_field_f64 => lg_norm.sample(&mut rng),
score_field_i64 => val as i64,
))?;
if cardinality == Cardinality::OptionalSparse {
for _ in 0..20 {
index_writer.add_document(doc!(text_field => "cool"))?;
}
}
}
// writing the segment
index_writer.commit()?;
}
Ok(index)
}

View File

@@ -18,7 +18,7 @@ fn benchmark(
benchmark_dynamic_json(b, input, schema, commit, parse_json)
} else {
_benchmark(b, input, schema, commit, parse_json, |schema, doc_json| {
TantivyDocument::parse_json(schema, doc_json).unwrap()
TantivyDocument::parse_json(&schema, doc_json).unwrap()
})
}
}
@@ -90,7 +90,8 @@ fn benchmark_dynamic_json(
) {
let json_field = schema.get_field("json").unwrap();
_benchmark(b, input, schema, commit, parse_json, |_schema, doc_json| {
let json_val: serde_json::Value = serde_json::from_str(doc_json).unwrap();
let json_val: serde_json::Map<String, serde_json::Value> =
serde_json::from_str(doc_json).unwrap();
tantivy::doc!(json_field=>json_val)
})
}
@@ -137,16 +138,15 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
for (prefix, schema, is_dynamic) in benches {
for commit in [false, true] {
let suffix = if commit { "with-commit" } else { "no-commit" };
{
let parse_json = false;
for parse_json in [false] {
// for parse_json in [false, true] {
let suffix = if parse_json {
format!("{suffix}-with-json-parsing")
format!("{}-with-json-parsing", suffix)
} else {
suffix.to_string()
format!("{}", suffix)
};
let bench_name = format!("{prefix}{suffix}");
let bench_name = format!("{}{}", prefix, suffix);
group.bench_function(bench_name, |b| {
benchmark(b, HDFS_LOGS, schema.clone(), commit, parse_json, is_dynamic)
});

View File

@@ -9,7 +9,7 @@ description = "column oriented storage for tantivy"
categories = ["database-implementations", "data-structures", "compression"]
[dependencies]
itertools = "0.13.0"
itertools = "0.12.0"
fastdivide = "0.4.0"
stacker = { version= "0.3", path = "../stacker", package="tantivy-stacker"}

View File

@@ -59,6 +59,22 @@ pub struct ColumnarWriter {
buffers: SpareBuffers,
}
#[inline]
fn mutate_or_create_column<V, TMutator>(
arena_hash_map: &mut ArenaHashMap,
column_name: &str,
updater: TMutator,
) where
V: Copy + 'static,
TMutator: FnMut(Option<V>) -> V,
{
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
arena_hash_map.mutate_or_create(column_name.as_bytes(), updater);
}
impl ColumnarWriter {
pub fn mem_usage(&self) -> usize {
self.arena.mem_usage()
@@ -159,8 +175,9 @@ impl ColumnarWriter {
},
&mut self.dictionaries,
);
hash_map.mutate_or_create(
column_name.as_bytes(),
mutate_or_create_column(
hash_map,
column_name,
|column_opt: Option<StrOrBytesColumnWriter>| {
let mut column_writer = if let Some(column_writer) = column_opt {
column_writer
@@ -175,21 +192,24 @@ impl ColumnarWriter {
);
}
ColumnType::Bool => {
self.bool_field_hash_map.mutate_or_create(
column_name.as_bytes(),
mutate_or_create_column(
&mut self.bool_field_hash_map,
column_name,
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
);
}
ColumnType::DateTime => {
self.datetime_field_hash_map.mutate_or_create(
column_name.as_bytes(),
mutate_or_create_column(
&mut self.datetime_field_hash_map,
column_name,
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
);
}
ColumnType::I64 | ColumnType::F64 | ColumnType::U64 => {
let numerical_type = column_type.numerical_type().unwrap();
self.numerical_field_hash_map.mutate_or_create(
column_name.as_bytes(),
mutate_or_create_column(
&mut self.numerical_field_hash_map,
column_name,
|column_opt: Option<NumericalColumnWriter>| {
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
column.force_numerical_type(numerical_type);
@@ -197,8 +217,9 @@ impl ColumnarWriter {
},
);
}
ColumnType::IpAddr => self.ip_addr_field_hash_map.mutate_or_create(
column_name.as_bytes(),
ColumnType::IpAddr => mutate_or_create_column(
&mut self.ip_addr_field_hash_map,
column_name,
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
),
}
@@ -211,8 +232,9 @@ impl ColumnarWriter {
numerical_value: T,
) {
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
hash_map.mutate_or_create(
column_name.as_bytes(),
mutate_or_create_column(
hash_map,
column_name,
|column_opt: Option<NumericalColumnWriter>| {
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
column.record_numerical_value(doc, numerical_value.into(), arena);
@@ -222,6 +244,10 @@ impl ColumnarWriter {
}
pub fn record_ip_addr(&mut self, doc: RowId, column_name: &str, ip_addr: Ipv6Addr) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
let (hash_map, arena) = (&mut self.ip_addr_field_hash_map, &mut self.arena);
hash_map.mutate_or_create(
column_name.as_bytes(),
@@ -235,30 +261,24 @@ impl ColumnarWriter {
pub fn record_bool(&mut self, doc: RowId, column_name: &str, val: bool) {
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(doc, val, arena);
column
},
);
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(doc, val, arena);
column
});
}
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: common::DateTime) {
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(
doc,
NumericalValue::I64(datetime.into_timestamp_nanos()),
arena,
);
column
},
);
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(
doc,
NumericalValue::I64(datetime.into_timestamp_nanos()),
arena,
);
column
});
}
pub fn record_str(&mut self, doc: RowId, column_name: &str, value: &str) {
@@ -283,6 +303,10 @@ impl ColumnarWriter {
}
pub fn record_bytes(&mut self, doc: RowId, column_name: &str, value: &[u8]) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
let (hash_map, arena, dictionaries) = (
&mut self.bytes_field_hash_map,
&mut self.arena,

View File

@@ -151,7 +151,7 @@ pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
(result, vlen)
}
/// Write a `u32` as a vint payload.
pub fn write_u32_vint<W: io::Write + ?Sized>(val: u32, writer: &mut W) -> io::Result<()> {
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
let mut buf = [0u8; 8];
let data = serialize_vint_u32(val, &mut buf);
writer.write_all(data)

View File

@@ -19,14 +19,13 @@ use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
use tempfile::TempDir;
fn main() -> tantivy::Result<()> {
// Normally you would use `MMapDirectory` instead to persist data on disk.
// https://docs.rs/tantivy/latest/tantivy/directory/struct.MmapDirectory.html
// But for this example, we will use a temporary directory `TempDir`.
// Let's create a temporary directory for the
// sake of this example
let index_path = TempDir::new()?;
// # Defining the schema
//
// The Tantivy index requires a schema.
// The Tantivy index requires a very strict schema.
// The schema declares which fields are in the index,
// and for each field, its type and "the way it should
// be indexed".

View File

@@ -11,10 +11,9 @@ use columnar::Column;
// ---
// Importing tantivy...
use tantivy::collector::{Collector, SegmentCollector};
use tantivy::index::SegmentReader;
use tantivy::query::QueryParser;
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, IndexWriter, Score};
use tantivy::{doc, Index, IndexWriter, Score, SegmentReader};
#[derive(Default)]
struct Stats {

View File

@@ -4,7 +4,7 @@
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::{DateOptions, Document, Schema, Value, INDEXED, STORED, STRING};
use tantivy::schema::{DateOptions, Document, OwnedValue, Schema, INDEXED, STORED, STRING};
use tantivy::{Index, IndexWriter, TantivyDocument};
fn main() -> tantivy::Result<()> {
@@ -13,7 +13,7 @@ fn main() -> tantivy::Result<()> {
let opts = DateOptions::from(INDEXED)
.set_stored()
.set_fast()
.set_precision(tantivy::schema::DateTimePrecision::Seconds);
.set_precision(tantivy::DateTimePrecision::Seconds);
// Add `occurred_at` date field type
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
let event_type = schema_builder.add_text_field("event", STRING | STORED);
@@ -61,12 +61,10 @@ fn main() -> tantivy::Result<()> {
assert_eq!(count_docs.len(), 1);
for (_score, doc_address) in count_docs {
let retrieved_doc = searcher.doc::<TantivyDocument>(doc_address)?;
assert!(retrieved_doc
.get_first(occurred_at)
.unwrap()
.as_value()
.as_datetime()
.is_some(),);
assert!(matches!(
retrieved_doc.get_first(occurred_at),
Some(OwnedValue::Date(_))
));
assert_eq!(
retrieved_doc.to_json(&schema),
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#

View File

@@ -51,7 +51,7 @@ fn main() -> tantivy::Result<()> {
let reader = index.reader()?;
let searcher = reader.searcher();
{
let facets = [
let facets = vec![
Facet::from("/ingredient/egg"),
Facet::from("/ingredient/oil"),
Facet::from("/ingredient/garlic"),
@@ -94,8 +94,9 @@ fn main() -> tantivy::Result<()> {
.doc::<TantivyDocument>(*doc_id)
.unwrap()
.get_first(title)
.and_then(|v| v.as_str().map(|el| el.to_string()))
.and_then(|v| v.as_str())
.unwrap()
.to_owned()
})
.collect();
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);

View File

@@ -61,7 +61,7 @@ fn main() -> tantivy::Result<()> {
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
))?;
println!("add doc {i} from thread 1 - opstamp {opstamp}");
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
thread::sleep(Duration::from_millis(20));
}
Result::<(), TantivyError>::Ok(())
@@ -82,7 +82,7 @@ fn main() -> tantivy::Result<()> {
body => "Some great book description..."
))?
};
println!("add doc {i} from thread 2 - opstamp {opstamp}");
println!("add doc {} from thread 2 - opstamp {}", i, opstamp);
thread::sleep(Duration::from_millis(10));
}
Result::<(), TantivyError>::Ok(())

View File

@@ -7,11 +7,10 @@
// the list of documents containing a term, getting
// its term frequency, and accessing its positions.
use tantivy::postings::Postings;
// ---
// Importing tantivy...
use tantivy::schema::*;
use tantivy::{doc, DocSet, Index, IndexWriter, TERMINATED};
use tantivy::{doc, DocSet, Index, IndexWriter, Postings, TERMINATED};
fn main() -> tantivy::Result<()> {
// We first create a schema for the sake of the

View File

@@ -3,11 +3,10 @@ use std::collections::{HashMap, HashSet};
use std::sync::{Arc, RwLock, Weak};
use tantivy::collector::TopDocs;
use tantivy::index::SegmentId;
use tantivy::query::QueryParser;
use tantivy::schema::{Schema, FAST, TEXT};
use tantivy::{
doc, DocAddress, DocId, Index, IndexWriter, Opstamp, Searcher, SearcherGeneration,
doc, DocAddress, DocId, Index, IndexWriter, Opstamp, Searcher, SearcherGeneration, SegmentId,
SegmentReader, Warmer,
};

View File

@@ -0,0 +1,585 @@
#[cfg(all(test, feature = "unstable"))]
mod bench {
use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use rand_distr::Distribution;
use serde_json::json;
use test::{self, Bencher};
use crate::aggregation::agg_req::Aggregations;
use crate::aggregation::AggregationCollector;
use crate::query::{AllQuery, TermQuery};
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
use crate::{Index, Term};
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum Cardinality {
/// All documents contain exactly one value.
/// `Full` is the default for auto-detecting the Cardinality, since it is the most strict.
#[default]
Full = 0,
/// All documents contain at most one value.
Optional = 1,
/// All documents may contain any number of values.
Multivalued = 2,
/// 1 / 20 documents has a value
Sparse = 3,
}
fn get_collector(agg_req: Aggregations) -> AggregationCollector {
AggregationCollector::from_aggs(agg_req, Default::default())
}
fn get_test_index_bench(cardinality: Cardinality) -> crate::Result<Index> {
let mut schema_builder = Schema::builder();
let text_fieldtype = crate::schema::TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
)
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype);
let json_field = schema_builder.add_json_field("json", FAST);
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST);
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST);
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
let index = Index::create_from_tempdir(schema_builder.build())?;
let few_terms_data = ["INFO", "ERROR", "WARN", "DEBUG"];
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
let many_terms_data = (0..150_000)
.map(|num| format!("author{}", num))
.collect::<Vec<_>>();
{
let mut rng = StdRng::from_seed([1u8; 32]);
let mut index_writer = index.writer_with_num_threads(1, 200_000_000)?;
// To make the different test cases comparable we just change one doc to force the
// cardinality
if cardinality == Cardinality::Optional {
index_writer.add_document(doc!())?;
}
if cardinality == Cardinality::Multivalued {
index_writer.add_document(doc!(
json_field => json!({"mixed_type": 10.0}),
json_field => json!({"mixed_type": 10.0}),
text_field => "cool",
text_field => "cool",
text_field_many_terms => "cool",
text_field_many_terms => "cool",
text_field_few_terms => "cool",
text_field_few_terms => "cool",
score_field => 1u64,
score_field => 1u64,
score_field_f64 => lg_norm.sample(&mut rng),
score_field_f64 => lg_norm.sample(&mut rng),
score_field_i64 => 1i64,
score_field_i64 => 1i64,
))?;
}
let mut doc_with_value = 1_000_000;
if cardinality == Cardinality::Sparse {
doc_with_value /= 20;
}
let _val_max = 1_000_000.0;
for _ in 0..doc_with_value {
let val: f64 = rng.gen_range(0.0..1_000_000.0);
let json = if rng.gen_bool(0.1) {
// 10% are numeric values
json!({ "mixed_type": val })
} else {
json!({"mixed_type": many_terms_data.choose(&mut rng).unwrap().to_string()})
};
index_writer.add_document(doc!(
text_field => "cool",
json_field => json,
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
score_field => val as u64,
score_field_f64 => lg_norm.sample(&mut rng),
score_field_i64 => val as i64,
))?;
if cardinality == Cardinality::Sparse {
for _ in 0..20 {
index_writer.add_document(doc!(text_field => "cool"))?;
}
}
}
// writing the segment
index_writer.commit()?;
}
Ok(index)
}
use paste::paste;
#[macro_export]
macro_rules! bench_all_cardinalities {
( $x:ident ) => {
paste! {
#[bench]
fn $x(b: &mut Bencher) {
[<$x _card>](b, Cardinality::Full)
}
#[bench]
fn [<$x _opt>](b: &mut Bencher) {
[<$x _card>](b, Cardinality::Optional)
}
#[bench]
fn [<$x _multi>](b: &mut Bencher) {
[<$x _card>](b, Cardinality::Multivalued)
}
#[bench]
fn [<$x _sparse>](b: &mut Bencher) {
[<$x _card>](b, Cardinality::Sparse)
}
}
};
}
bench_all_cardinalities!(bench_aggregation_average_u64);
fn bench_aggregation_average_u64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = serde_json::from_value(json!({
"average": { "avg": { "field": "score", } }
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_stats_f64);
fn bench_aggregation_stats_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = serde_json::from_value(json!({
"average_f64": { "stats": { "field": "score_f64", } }
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_average_f64);
fn bench_aggregation_average_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = serde_json::from_value(json!({
"average_f64": { "avg": { "field": "score_f64", } }
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_percentiles_f64);
fn bench_aggregation_percentiles_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_str = r#"
{
"mypercentiles": {
"percentiles": {
"field": "score_f64",
"percents": [ 95, 99, 99.9 ]
}
}
} "#;
let agg_req_1: Aggregations = serde_json::from_str(agg_req_str).unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_average_u64_and_f64);
fn bench_aggregation_average_u64_and_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = serde_json::from_value(json!({
"average_f64": { "avg": { "field": "score_f64" } },
"average": { "avg": { "field": "score" } },
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_few);
fn bench_aggregation_terms_few_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": { "terms": { "field": "text_few_terms" } },
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_with_top_hits_agg);
fn bench_aggregation_terms_many_with_top_hits_agg_card(
b: &mut Bencher,
cardinality: Cardinality,
) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": {
"terms": { "field": "text_many_terms" },
"aggs": {
"top_hits": { "top_hits":
{
"sort": [
{ "score": "desc" }
],
"size": 2,
"doc_value_fields": ["score_f64"]
}
}
}
},
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_with_sub_agg);
fn bench_aggregation_terms_many_with_sub_agg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": {
"terms": { "field": "text_many_terms" },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_json_mixed_type_with_sub_agg);
fn bench_aggregation_terms_many_json_mixed_type_with_sub_agg_card(
b: &mut Bencher,
cardinality: Cardinality,
) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": {
"terms": { "field": "json.mixed_type" },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many2);
fn bench_aggregation_terms_many2_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": { "terms": { "field": "text_many_terms" } },
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_order_by_term);
fn bench_aggregation_terms_many_order_by_term_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": { "terms": { "field": "text_many_terms", "order": { "_key": "desc" } } },
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_range_only);
fn bench_aggregation_range_only_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = serde_json::from_value(json!({
"range_f64": { "range": { "field": "score_f64", "ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
] } },
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_range_with_avg);
fn bench_aggregation_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = serde_json::from_value(json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
]
},
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
// hard bounds has a different algorithm, because it actually limits collection range
//
bench_all_cardinalities!(bench_aggregation_histogram_only_hard_bounds);
fn bench_aggregation_histogram_only_hard_bounds_card(
b: &mut Bencher,
cardinality: Cardinality,
) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = serde_json::from_value(json!({
"rangef64": { "histogram": { "field": "score_f64", "interval": 100, "hard_bounds": { "min": 1000, "max": 300000 } } },
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_histogram_with_avg);
fn bench_aggregation_histogram_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = serde_json::from_value(json!({
"rangef64": {
"histogram": { "field": "score_f64", "interval": 100 },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
}
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_histogram_only);
fn bench_aggregation_histogram_only_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = serde_json::from_value(json!({
"rangef64": {
"histogram": {
"field": "score_f64",
"interval": 100 // 1000 buckets
},
}
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_avg_and_range_with_avg);
fn bench_aggregation_avg_and_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = serde_json::from_value(json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 60000 }
]
},
"aggs": {
"average_in_range": { "avg": { "field": "score" } }
}
},
"average": { "avg": { "field": "score" } }
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
}

View File

@@ -81,11 +81,10 @@ impl AggregationLimits {
}
}
pub(crate) fn add_memory_consumed(&self, add_num_bytes: u64) -> crate::Result<()> {
let prev_value = self
.memory_consumption
.fetch_add(add_num_bytes, Ordering::Relaxed);
validate_memory_consumption(prev_value + add_num_bytes, self.memory_limit)?;
pub(crate) fn add_memory_consumed(&self, num_bytes: u64) -> crate::Result<()> {
self.memory_consumption
.fetch_add(num_bytes, Ordering::Relaxed);
validate_memory_consumption(&self.memory_consumption, self.memory_limit)?;
Ok(())
}
@@ -95,11 +94,11 @@ impl AggregationLimits {
}
fn validate_memory_consumption(
memory_consumption: u64,
memory_consumption: &AtomicU64,
memory_limit: ByteCount,
) -> Result<(), AggregationError> {
// Load the estimated memory consumed by the aggregations
let memory_consumed: ByteCount = memory_consumption.into();
let memory_consumed: ByteCount = memory_consumption.load(Ordering::Relaxed).into();
if memory_consumed > memory_limit {
return Err(AggregationError::MemoryExceeded {
limit: memory_limit,
@@ -119,11 +118,10 @@ pub struct ResourceLimitGuard {
}
impl ResourceLimitGuard {
pub(crate) fn add_memory_consumed(&self, add_num_bytes: u64) -> crate::Result<()> {
let prev_value = self
.memory_consumption
.fetch_add(add_num_bytes, Ordering::Relaxed);
validate_memory_consumption(prev_value + add_num_bytes, self.memory_limit)?;
pub(crate) fn add_memory_consumed(&self, num_bytes: u64) -> crate::Result<()> {
self.memory_consumption
.fetch_add(num_bytes, Ordering::Relaxed);
validate_memory_consumption(&self.memory_consumption, self.memory_limit)?;
Ok(())
}
}

View File

@@ -17,8 +17,7 @@ use super::metric::{
use super::segment_agg_result::AggregationLimits;
use super::VecWithNames;
use crate::aggregation::{f64_to_fastfield_u64, Key};
use crate::index::SegmentReader;
use crate::SegmentOrdinal;
use crate::{SegmentOrdinal, SegmentReader};
#[derive(Default)]
pub(crate) struct AggregationsWithAccessor {
@@ -335,8 +334,8 @@ fn get_missing_val(
}
_ => {
return Err(crate::TantivyError::InvalidArgument(format!(
"Missing value {missing:?} for field {field_name} is not supported for column \
type {column_type:?}"
"Missing value {:?} for field {} is not supported for column type {:?}",
missing, field_name, column_type
)));
}
};
@@ -403,7 +402,7 @@ fn get_dynamic_columns(
.iter()
.map(|h| h.open())
.collect::<io::Result<_>>()?;
assert!(!ff_fields.is_empty(), "field {field_name} not found");
assert!(!ff_fields.is_empty(), "field {} not found", field_name);
Ok(cols)
}

View File

@@ -331,11 +331,9 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
}
let mem_delta = self.get_memory_consumption() - mem_pre;
if mem_delta > 0 {
bucket_agg_accessor
.limits
.add_memory_consumed(mem_delta as u64)?;
}
bucket_agg_accessor
.limits
.add_memory_consumed(mem_delta as u64)?;
Ok(())
}

View File

@@ -324,11 +324,9 @@ impl SegmentAggregationCollector for SegmentTermCollector {
}
let mem_delta = self.get_memory_consumption() - mem_pre;
if mem_delta > 0 {
bucket_agg_accessor
.limits
.add_memory_consumed(mem_delta as u64)?;
}
bucket_agg_accessor
.limits
.add_memory_consumed(mem_delta as u64)?;
Ok(())
}
@@ -357,7 +355,8 @@ impl SegmentTermCollector {
) -> crate::Result<Self> {
if field_type == ColumnType::Bytes {
return Err(TantivyError::InvalidArgument(format!(
"terms aggregation is not supported for column type {field_type:?}"
"terms aggregation is not supported for column type {:?}",
field_type
)));
}
let term_buckets = TermBuckets::default();

View File

@@ -8,8 +8,7 @@ use super::segment_agg_result::{
};
use crate::aggregation::agg_req_with_accessor::get_aggs_with_segment_accessor_and_validate;
use crate::collector::{Collector, SegmentCollector};
use crate::index::SegmentReader;
use crate::{DocId, SegmentOrdinal, TantivyError};
use crate::{DocId, SegmentOrdinal, SegmentReader, TantivyError};
/// The default max bucket count, before the aggregation fails.
pub const DEFAULT_BUCKET_LIMIT: u32 = 65000;

View File

@@ -131,8 +131,8 @@ impl<'de> Deserialize<'de> for KeyOrder {
))?;
if key_order.next().is_some() {
return Err(serde::de::Error::custom(format!(
"Expected exactly one key-value pair in sort parameter of top_hits, found \
{key_order:?}"
"Expected exactly one key-value pair in sort parameter of top_hits, found {:?}",
key_order
)));
}
Ok(Self { field, order })
@@ -144,22 +144,27 @@ fn globbed_string_to_regex(glob: &str) -> Result<Regex, crate::TantivyError> {
// Replace `*` glob with `.*` regex
let sanitized = format!("^{}$", regex::escape(glob).replace(r"\*", ".*"));
Regex::new(&sanitized.replace('*', ".*")).map_err(|e| {
crate::TantivyError::SchemaError(format!("Invalid regex '{glob}' in docvalue_fields: {e}"))
crate::TantivyError::SchemaError(format!(
"Invalid regex '{}' in docvalue_fields: {}",
glob, e
))
})
}
fn use_doc_value_fields_err(parameter: &str) -> crate::Result<()> {
Err(crate::TantivyError::AggregationError(
AggregationError::InvalidRequest(format!(
"The `{parameter}` parameter is not supported, only `docvalue_fields` is supported in \
`top_hits` aggregation"
"The `{}` parameter is not supported, only `docvalue_fields` is supported in \
`top_hits` aggregation",
parameter
)),
))
}
fn unsupported_err(parameter: &str) -> crate::Result<()> {
Err(crate::TantivyError::AggregationError(
AggregationError::InvalidRequest(format!(
"The `{parameter}` parameter is not supported in the `top_hits` aggregation"
"The `{}` parameter is not supported in the `top_hits` aggregation",
parameter
)),
))
}
@@ -212,7 +217,8 @@ impl TopHitsAggregation {
.collect::<Vec<_>>();
assert!(
!fields.is_empty(),
"No fields matched the glob '{field}' in docvalue_fields"
"No fields matched the glob '{}' in docvalue_fields",
field
);
Ok(fields)
})
@@ -248,7 +254,7 @@ impl TopHitsAggregation {
.map(|field| {
let accessors = accessors
.get(field)
.unwrap_or_else(|| panic!("field '{field}' not found in accessors"));
.unwrap_or_else(|| panic!("field '{}' not found in accessors", field));
let values: Vec<FastFieldValue> = accessors
.iter()

View File

@@ -143,6 +143,8 @@ use std::fmt::Display;
#[cfg(test)]
mod agg_tests;
mod agg_bench;
use core::fmt;
pub use agg_limits::AggregationLimits;
@@ -158,14 +160,15 @@ use serde::de::{self, Visitor};
use serde::{Deserialize, Deserializer, Serialize};
fn parse_str_into_f64<E: de::Error>(value: &str) -> Result<f64, E> {
let parsed = value
.parse::<f64>()
.map_err(|_err| de::Error::custom(format!("Failed to parse f64 from string: {value:?}")))?;
let parsed = value.parse::<f64>().map_err(|_err| {
de::Error::custom(format!("Failed to parse f64 from string: {:?}", value))
})?;
// Check if the parsed value is NaN or infinity
if parsed.is_nan() || parsed.is_infinite() {
Err(de::Error::custom(format!(
"Value is not a valid f64 (NaN or Infinity): {value:?}"
"Value is not a valid f64 (NaN or Infinity): {:?}",
value
)))
} else {
Ok(parsed)

View File

@@ -598,7 +598,7 @@ mod tests {
let mid = n % 4;
n /= 4;
let leaf = n % 5;
Facet::from(&format!("/top{top}/mid{mid}/leaf{leaf}"))
Facet::from(&format!("/top{}/mid{}/leaf{}", top, mid, leaf))
})
.collect();
for i in 0..num_facets * 10 {
@@ -737,7 +737,7 @@ mod tests {
vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
.into_iter()
.flat_map(|(c, count)| {
let facet = Facet::from(&format!("/facet/{c}"));
let facet = Facet::from(&format!("/facet/{}", c));
let doc = doc!(facet_field => facet);
iter::repeat(doc).take(count)
})
@@ -785,7 +785,7 @@ mod tests {
let docs: Vec<TantivyDocument> = vec![("b", 2), ("a", 2), ("c", 4)]
.into_iter()
.flat_map(|(c, count)| {
let facet = Facet::from(&format!("/facet/{c}"));
let facet = Facet::from(&format!("/facet/{}", c));
let doc = doc!(facet_field => facet);
iter::repeat(doc).take(count)
})

View File

@@ -4,8 +4,7 @@ use std::marker::PhantomData;
use serde::{Deserialize, Serialize};
use super::top_score_collector::TopNComputer;
use crate::index::SegmentReader;
use crate::{DocAddress, DocId, SegmentOrdinal};
use crate::{DocAddress, DocId, SegmentOrdinal, SegmentReader};
/// Contains a feature (field, score, etc.) of a document along with the document address.
///

View File

@@ -1,25 +1,19 @@
use std::sync::Arc;
#[cfg(feature = "quickwit")]
use futures_util::{future::Either, FutureExt};
use rayon::{ThreadPool, ThreadPoolBuilder};
use crate::TantivyError;
/// Executor makes it possible to run tasks in single thread or
/// in a thread pool.
#[derive(Clone)]
/// Search executor whether search request are single thread or multithread.
///
/// We don't expose Rayon thread pool directly here for several reasons.
///
/// First dependency hell. It is not a good idea to expose the
/// API of a dependency, knowing it might conflict with a different version
/// used by the client. Second, we may stop using rayon in the future.
pub enum Executor {
/// Single thread variant of an Executor
SingleThread,
/// Thread pool variant of an Executor
ThreadPool(Arc<rayon::ThreadPool>),
}
#[cfg(feature = "quickwit")]
impl From<Arc<rayon::ThreadPool>> for Executor {
fn from(thread_pool: Arc<rayon::ThreadPool>) -> Self {
Executor::ThreadPool(thread_pool)
}
ThreadPool(ThreadPool),
}
impl Executor {
@@ -30,11 +24,11 @@ impl Executor {
/// Creates an Executor that dispatches the tasks in a thread pool.
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> crate::Result<Executor> {
let pool = rayon::ThreadPoolBuilder::new()
let pool = ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(move |num| format!("{prefix}{num}"))
.build()?;
Ok(Executor::ThreadPool(Arc::new(pool)))
Ok(Executor::ThreadPool(pool))
}
/// Perform a map in the thread pool.
@@ -97,36 +91,11 @@ impl Executor {
}
}
}
/// Spawn a task on the pool, returning a future completing on task success.
///
/// If the task panic, returns `Err(())`.
#[cfg(feature = "quickwit")]
pub fn spawn_blocking<T: Send + 'static>(
&self,
cpu_intensive_task: impl FnOnce() -> T + Send + 'static,
) -> impl std::future::Future<Output = Result<T, ()>> {
match self {
Executor::SingleThread => Either::Left(std::future::ready(Ok(cpu_intensive_task()))),
Executor::ThreadPool(pool) => {
let (sender, receiver) = oneshot::channel();
pool.spawn(|| {
if sender.is_closed() {
return;
}
let task_result = cpu_intensive_task();
let _ = sender.send(task_result);
});
let res = receiver.map(|res| res.map_err(|_| ()));
Either::Right(res)
}
}
}
}
#[cfg(test)]
mod tests {
use super::Executor;
#[test]
@@ -178,62 +147,4 @@ mod tests {
assert_eq!(result[i], i * 2);
}
}
#[cfg(feature = "quickwit")]
#[test]
fn test_cancel_cpu_intensive_tasks() {
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
let counter: Arc<AtomicU64> = Default::default();
let other_counter: Arc<AtomicU64> = Default::default();
let mut futures = Vec::new();
let mut other_futures = Vec::new();
let (tx, rx) = crossbeam_channel::bounded::<()>(0);
let rx = Arc::new(rx);
let executor = Executor::multi_thread(3, "search-test").unwrap();
for i in 0..1000 {
let counter_clone: Arc<AtomicU64> = counter.clone();
let other_counter_clone: Arc<AtomicU64> = other_counter.clone();
let rx_clone = rx.clone();
let rx_clone2 = rx.clone();
let fut = executor.spawn_blocking(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
let () = rx_clone.recv().unwrap();
});
futures.push(fut);
let other_fut = executor.spawn_blocking(move || {
other_counter_clone.fetch_add(1, Ordering::SeqCst);
let () = rx_clone2.recv().unwrap();
});
other_futures.push(other_fut);
}
// We execute 100 futures.
for i in 0..100 {
tx.send(()).unwrap();
}
let counter_val = counter.load(Ordering::SeqCst);
let other_counter_val = other_counter.load(Ordering::SeqCst);
assert!(counter_val >= 30);
assert!(other_counter_val >= 30);
drop(other_futures);
// We execute 100 futures.
for i in 0..100 {
tx.send(()).unwrap();
}
let counter_val2 = counter.load(Ordering::SeqCst);
assert!(counter_val2 >= counter_val + 100 - 6);
let other_counter_val2 = other_counter.load(Ordering::SeqCst);
assert!(other_counter_val2 <= other_counter_val + 6);
}
}

View File

@@ -4,7 +4,8 @@ use rustc_hash::FxHashMap;
use crate::postings::{IndexingContext, IndexingPosition, PostingsWriter};
use crate::schema::document::{ReferenceValue, ReferenceValueLeaf, Value};
use crate::schema::Type;
use crate::schema::indexing_term::IndexingTerm;
use crate::schema::{Field, Type};
use crate::time::format_description::well_known::Rfc3339;
use crate::time::{OffsetDateTime, UtcOffset};
use crate::tokenizer::TextAnalyzer;
@@ -31,7 +32,7 @@ use crate::{DateTime, DocId, Term};
/// position 1.
/// As a result, with lemmatization, "The Smiths" will match our object.
///
/// Worse, if a same term appears in the second object, a non increasing value would be pushed
/// Worse, if a same term is appears in the second object, a non increasing value would be pushed
/// to the position recorder probably provoking a panic.
///
/// This problem is solved for regular multivalued object by offsetting the position
@@ -50,7 +51,7 @@ use crate::{DateTime, DocId, Term};
/// We can therefore afford working with a map that is not imperfect. It is fine if several
/// path map to the same index position as long as the probability is relatively low.
#[derive(Default)]
pub(crate) struct IndexingPositionsPerPath {
struct IndexingPositionsPerPath {
positions_per_path: FxHashMap<u32, IndexingPosition>,
}
@@ -58,9 +59,6 @@ impl IndexingPositionsPerPath {
fn get_position_from_id(&mut self, id: u32) -> &mut IndexingPosition {
self.positions_per_path.entry(id).or_default()
}
pub fn clear(&mut self) {
self.positions_per_path.clear();
}
}
/// Convert JSON_PATH_SEGMENT_SEP to a dot.
@@ -71,12 +69,42 @@ pub fn json_path_sep_to_dot(path: &mut str) {
}
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn index_json_values<'a, V: Value<'a>>(
doc: DocId,
json_visitors: impl Iterator<Item = crate::Result<V::ObjectIter>>,
text_analyzer: &mut TextAnalyzer,
expand_dots_enabled: bool,
term_buffer: &mut IndexingTerm,
postings_writer: &mut dyn PostingsWriter,
json_path_writer: &mut JsonPathWriter,
ctx: &mut IndexingContext,
) -> crate::Result<()> {
json_path_writer.clear();
json_path_writer.set_expand_dots(expand_dots_enabled);
let mut positions_per_path: IndexingPositionsPerPath = Default::default();
for json_visitor_res in json_visitors {
let json_visitor = json_visitor_res?;
index_json_object::<V>(
doc,
json_visitor,
text_analyzer,
term_buffer,
json_path_writer,
postings_writer,
ctx,
&mut positions_per_path,
);
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn index_json_object<'a, V: Value<'a>>(
doc: DocId,
json_visitor: V::ObjectIter,
text_analyzer: &mut TextAnalyzer,
term_buffer: &mut Term,
term_buffer: &mut IndexingTerm,
json_path_writer: &mut JsonPathWriter,
postings_writer: &mut dyn PostingsWriter,
ctx: &mut IndexingContext,
@@ -99,23 +127,20 @@ fn index_json_object<'a, V: Value<'a>>(
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn index_json_value<'a, V: Value<'a>>(
fn index_json_value<'a, V: Value<'a>>(
doc: DocId,
json_value: V,
text_analyzer: &mut TextAnalyzer,
term_buffer: &mut Term,
term_buffer: &mut IndexingTerm,
json_path_writer: &mut JsonPathWriter,
postings_writer: &mut dyn PostingsWriter,
ctx: &mut IndexingContext,
positions_per_path: &mut IndexingPositionsPerPath,
) {
let set_path_id = |term_buffer: &mut Term, unordered_id: u32| {
let set_path_id = |term_buffer: &mut IndexingTerm, unordered_id: u32| {
term_buffer.truncate_value_bytes(0);
term_buffer.append_bytes(&unordered_id.to_be_bytes());
};
let set_type = |term_buffer: &mut Term, typ: Type| {
term_buffer.append_bytes(&[typ.to_code()]);
};
match json_value.as_value() {
ReferenceValue::Leaf(leaf) => match leaf {
@@ -128,7 +153,7 @@ pub(crate) fn index_json_value<'a, V: Value<'a>>(
// TODO: make sure the chain position works out.
set_path_id(term_buffer, unordered_id);
set_type(term_buffer, Type::Str);
term_buffer.append_bytes(&[Type::Str.to_code()]);
let indexing_position = positions_per_path.get_position_from_id(unordered_id);
postings_writer.index_text(
doc,
@@ -139,18 +164,12 @@ pub(crate) fn index_json_value<'a, V: Value<'a>>(
);
}
ReferenceValueLeaf::U64(val) => {
// try to parse to i64, since when querying we will apply the same logic and prefer
// i64 values
set_path_id(
term_buffer,
ctx.path_to_unordered_id
.get_or_allocate_unordered_id(json_path_writer.as_str()),
);
if let Ok(i64_val) = val.try_into() {
term_buffer.append_type_and_fast_value::<i64>(i64_val);
} else {
term_buffer.append_type_and_fast_value(val);
}
term_buffer.append_type_and_fast_value(val);
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
}
ReferenceValueLeaf::I64(val) => {
@@ -190,18 +209,16 @@ pub(crate) fn index_json_value<'a, V: Value<'a>>(
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
}
ReferenceValueLeaf::PreTokStr(_) => {
unimplemented!(
"Pre-tokenized string support in dynamic fields is not yet implemented"
)
unimplemented!("Pre-tokenized string support in JSON fields is not yet implemented")
}
ReferenceValueLeaf::Bytes(_) => {
unimplemented!("Bytes support in dynamic fields is not yet implemented")
unimplemented!("Bytes support in JSON fields is not yet implemented")
}
ReferenceValueLeaf::Facet(_) => {
unimplemented!("Facet support in dynamic fields is not yet implemented")
unimplemented!("Facet support in JSON fields is not yet implemented")
}
ReferenceValueLeaf::IpAddr(_) => {
unimplemented!("IP address support in dynamic fields is not yet implemented")
unimplemented!("IP address support in JSON fields is not yet implemented")
}
},
ReferenceValue::Array(elements) => {
@@ -239,8 +256,9 @@ pub(crate) fn index_json_value<'a, V: Value<'a>>(
pub fn convert_to_fast_value_and_append_to_json_term(mut term: Term, phrase: &str) -> Option<Term> {
assert_eq!(
term.value()
.as_json_value_bytes()
.as_json()
.expect("expecting a Term with a json type and json path")
.1
.as_serialized()
.len(),
0,
@@ -325,27 +343,47 @@ pub(crate) fn encode_column_name(
path.into()
}
pub fn term_from_json_paths<'a>(
json_field: Field,
paths: impl Iterator<Item = &'a str>,
expand_dots_enabled: bool,
) -> Term {
let mut json_path = JsonPathWriter::with_expand_dots(expand_dots_enabled);
for path in paths {
json_path.push(path);
}
json_path.set_end();
let mut term = Term::with_type_and_field(Type::Json, json_field);
term.append_bytes(json_path.as_str().as_bytes());
term
}
#[cfg(test)]
mod tests {
use super::split_json_path;
use crate::json_utils::term_from_json_paths;
use crate::schema::Field;
use crate::Term;
#[test]
fn test_json_writer() {
let field = Field::from_field_id(1);
let mut term = Term::from_field_json_path(field, "attributes.color", false);
let mut term = term_from_json_paths(field, ["attributes", "color"].into_iter(), false);
term.append_type_and_str("red");
assert_eq!(
format!("{term:?}"),
format!("{:?}", term),
"Term(field=1, type=Json, path=attributes.color, type=Str, \"red\")"
);
let mut term = Term::from_field_json_path(field, "attributes.dimensions.width", false);
let mut term = term_from_json_paths(
field,
["attributes", "dimensions", "width"].into_iter(),
false,
);
term.append_type_and_fast_value(400i64);
assert_eq!(
format!("{term:?}"),
format!("{:?}", term),
"Term(field=1, type=Json, path=attributes.dimensions.width, type=I64, 400)"
);
}
@@ -353,7 +391,7 @@ mod tests {
#[test]
fn test_string_term() {
let field = Field::from_field_id(1);
let mut term = Term::from_field_json_path(field, "color", false);
let mut term = term_from_json_paths(field, ["color"].into_iter(), false);
term.append_type_and_str("red");
assert_eq!(term.serialized_term(), b"\x00\x00\x00\x01jcolor\x00sred")
@@ -362,46 +400,46 @@ mod tests {
#[test]
fn test_i64_term() {
let field = Field::from_field_id(1);
let mut term = Term::from_field_json_path(field, "color", false);
let mut term = term_from_json_paths(field, ["color"].into_iter(), false);
term.append_type_and_fast_value(-4i64);
assert_eq!(
term.serialized_term(),
b"\x00\x00\x00\x01jcolor\x00i\x7f\xff\xff\xff\xff\xff\xff\xfc"
term.value().as_serialized(),
b"jcolor\x00i\x7f\xff\xff\xff\xff\xff\xff\xfc"
)
}
#[test]
fn test_u64_term() {
let field = Field::from_field_id(1);
let mut term = Term::from_field_json_path(field, "color", false);
let mut term = term_from_json_paths(field, ["color"].into_iter(), false);
term.append_type_and_fast_value(4u64);
assert_eq!(
term.serialized_term(),
b"\x00\x00\x00\x01jcolor\x00u\x00\x00\x00\x00\x00\x00\x00\x04"
term.value().as_serialized(),
b"jcolor\x00u\x00\x00\x00\x00\x00\x00\x00\x04"
)
}
#[test]
fn test_f64_term() {
let field = Field::from_field_id(1);
let mut term = Term::from_field_json_path(field, "color", false);
let mut term = term_from_json_paths(field, ["color"].into_iter(), false);
term.append_type_and_fast_value(4.0f64);
assert_eq!(
term.serialized_term(),
b"\x00\x00\x00\x01jcolor\x00f\xc0\x10\x00\x00\x00\x00\x00\x00"
term.value().as_serialized(),
b"jcolor\x00f\xc0\x10\x00\x00\x00\x00\x00\x00"
)
}
#[test]
fn test_bool_term() {
let field = Field::from_field_id(1);
let mut term = Term::from_field_json_path(field, "color", false);
let mut term = term_from_json_paths(field, ["color"].into_iter(), false);
term.append_type_and_fast_value(true);
assert_eq!(
term.serialized_term(),
b"\x00\x00\x00\x01jcolor\x00o\x00\x00\x00\x00\x00\x00\x00\x01"
term.value().as_serialized(),
b"jcolor\x00o\x00\x00\x00\x00\x00\x00\x00\x01"
)
}

View File

@@ -4,13 +4,13 @@ use std::{fmt, io};
use crate::collector::Collector;
use crate::core::Executor;
use crate::index::{SegmentId, SegmentReader};
use crate::index::SegmentReader;
use crate::query::{Bm25StatisticsProvider, EnableScoring, Query};
use crate::schema::document::DocumentDeserialize;
use crate::schema::{Schema, Term};
use crate::space_usage::SearcherSpaceUsage;
use crate::store::{CacheStats, StoreReader};
use crate::{DocAddress, Index, Opstamp, TrackedObject};
use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject};
/// Identifies the searcher generation accessed by a [`Searcher`].
///
@@ -109,9 +109,8 @@ impl Searcher {
&self,
doc_address: DocAddress,
) -> crate::Result<D> {
let executor = self.inner.index.search_executor();
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
store_reader.get_async(doc_address.doc_id, executor).await
store_reader.get_async(doc_address.doc_id).await
}
/// Access the schema associated with the index of this searcher.

View File

@@ -1,14 +1,13 @@
use crate::collector::Count;
use crate::directory::{RamDirectory, WatchCallback};
use crate::index::SegmentId;
use crate::indexer::{LogMergePolicy, NoMergePolicy};
use crate::postings::Postings;
use crate::json_utils::term_from_json_paths;
use crate::query::TermQuery;
use crate::schema::{Field, IndexRecordOption, Schema, INDEXED, STRING, TEXT};
use crate::tokenizer::TokenizerManager;
use crate::{
Directory, DocSet, Index, IndexBuilder, IndexReader, IndexSettings, IndexWriter, ReloadPolicy,
TantivyDocument, Term,
Directory, DocSet, Index, IndexBuilder, IndexReader, IndexSettings, IndexWriter, Postings,
ReloadPolicy, SegmentId, TantivyDocument, Term,
};
#[test]
@@ -418,8 +417,8 @@ fn test_non_text_json_term_freq() {
let segment_reader = searcher.segment_reader(0u32);
let inv_idx = segment_reader.inverted_index(field).unwrap();
let mut term = Term::from_field_json_path(field, "tenant_id", false);
term.append_type_and_fast_value(75i64);
let mut term = term_from_json_paths(field, ["tenant_id"].iter().cloned(), false);
term.append_type_and_fast_value(75u64);
let postings = inv_idx
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
@@ -452,8 +451,8 @@ fn test_non_text_json_term_freq_bitpacked() {
let segment_reader = searcher.segment_reader(0u32);
let inv_idx = segment_reader.inverted_index(field).unwrap();
let mut term = Term::from_field_json_path(field, "tenant_id", false);
term.append_type_and_fast_value(75i64);
let mut term = term_from_json_paths(field, ["tenant_id"].iter().cloned(), false);
term.append_type_and_fast_value(75u64);
let mut postings = inv_idx
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)

View File

@@ -566,7 +566,7 @@ mod tests {
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
let num_paths = 10;
let paths: Vec<PathBuf> = (0..num_paths)
.map(|i| PathBuf::from(&*format!("file_{i}")))
.map(|i| PathBuf::from(&*format!("file_{}", i)))
.collect();
{
for path in &paths {

View File

@@ -62,7 +62,8 @@ impl FacetReader {
#[cfg(test)]
mod tests {
use crate::schema::{Facet, FacetOptions, SchemaBuilder, Value, STORED};
use crate::schema::document::Value;
use crate::schema::{Facet, FacetOptions, SchemaBuilder, STORED};
use crate::{DocAddress, Index, IndexWriter, TantivyDocument};
#[test]
@@ -88,9 +89,7 @@ mod tests {
let doc = searcher
.doc::<TantivyDocument>(DocAddress::new(0u32, 0u32))
.unwrap();
let value = doc
.get_first(facet_field)
.and_then(|v| v.as_value().as_facet());
let value = doc.get_first(facet_field).and_then(|v| v.as_facet());
assert_eq!(value, None);
}
@@ -147,11 +146,8 @@ mod tests {
facet_ords.extend(facet_reader.facet_ords(0u32));
assert_eq!(&facet_ords, &[0u64]);
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0u32, 0u32))?;
let value: Option<Facet> = doc
.get_first(facet_field)
.and_then(|v| v.as_facet())
.map(|facet| Facet::from_encoded_string(facet.to_string()));
assert_eq!(value, Facet::from_text("/a/b").ok());
let value: Option<&Facet> = doc.get_first(facet_field).and_then(|v| v.as_facet());
assert_eq!(value, Facet::from_text("/a/b").ok().as_ref());
Ok(())
}

View File

@@ -80,7 +80,7 @@ mod tests {
use std::path::Path;
use columnar::StrColumn;
use common::{ByteCount, DateTimePrecision, HasLen, TerminatingWrite};
use common::{ByteCount, HasLen, TerminatingWrite};
use once_cell::sync::Lazy;
use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
@@ -88,15 +88,14 @@ mod tests {
use super::*;
use crate::directory::{Directory, RamDirectory, WritePtr};
use crate::index::SegmentId;
use crate::merge_policy::NoMergePolicy;
use crate::schema::{
DateOptions, Facet, FacetOptions, Field, JsonObjectOptions, Schema, SchemaBuilder,
TantivyDocument, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
Facet, FacetOptions, Field, JsonObjectOptions, Schema, SchemaBuilder, TantivyDocument,
TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
};
use crate::time::OffsetDateTime;
use crate::tokenizer::{LowerCaser, RawTokenizer, TextAnalyzer, TokenizerManager};
use crate::{Index, IndexWriter, SegmentReader};
use crate::{DateOptions, DateTimePrecision, Index, IndexWriter, SegmentId, SegmentReader};
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
let mut schema_builder = Schema::builder();

View File

@@ -1,14 +1,14 @@
use std::io;
use columnar::{ColumnarWriter, NumericalValue};
use common::{DateTimePrecision, JsonPathWriter};
use common::JsonPathWriter;
use tokenizer_api::Token;
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::schema::document::{Document, ReferenceValue, ReferenceValueLeaf, Value};
use crate::schema::{value_type_to_column_type, Field, FieldType, Schema, Type};
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::{DocId, TantivyError};
use crate::{DateTimePrecision, DocId, TantivyError};
/// Only index JSON down to a depth of 20.
/// This is mostly to guard us from a stack overflow triggered by malicious input.
@@ -183,7 +183,8 @@ impl FastFieldsWriter {
.record_datetime(doc_id, field_name, truncated_datetime);
}
ReferenceValueLeaf::Facet(val) => {
self.columnar_writer.record_str(doc_id, field_name, val);
self.columnar_writer
.record_str(doc_id, field_name, val.encoded_str());
}
ReferenceValueLeaf::Bytes(val) => {
self.columnar_writer.record_bytes(doc_id, field_name, val);

View File

@@ -6,7 +6,6 @@ use rand::{thread_rng, Rng};
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::schema::*;
#[allow(deprecated)]
use crate::{doc, schema, Index, IndexSettings, IndexSortByField, IndexWriter, Order, Searcher};
fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> {

View File

@@ -3,7 +3,7 @@ use std::fmt;
#[cfg(feature = "mmap")]
use std::path::Path;
use std::path::PathBuf;
use std::thread::available_parallelism;
use std::sync::Arc;
use super::segment::Segment;
use super::segment_reader::merge_field_meta_data;
@@ -252,8 +252,9 @@ impl IndexBuilder {
let field_type = entry.field_type().value_type();
if !supported_field_types.contains(&field_type) {
return Err(TantivyError::InvalidArgument(format!(
"Unsupported field type in sort_by_field: {field_type:?}. Supported field \
types: {supported_field_types:?} ",
"Unsupported field type in sort_by_field: {:?}. Supported field types: \
{:?} ",
field_type, supported_field_types,
)));
}
}
@@ -292,7 +293,7 @@ pub struct Index {
directory: ManagedDirectory,
schema: Schema,
settings: IndexSettings,
executor: Executor,
executor: Arc<Executor>,
tokenizers: TokenizerManager,
fast_field_tokenizers: TokenizerManager,
inventory: SegmentMetaInventory,
@@ -317,25 +318,29 @@ impl Index {
///
/// By default the executor is single thread, and simply runs in the calling thread.
pub fn search_executor(&self) -> &Executor {
&self.executor
self.executor.as_ref()
}
/// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads.
pub fn set_multithread_executor(&mut self, num_threads: usize) -> crate::Result<()> {
self.executor = Executor::multi_thread(num_threads, "tantivy-search-")?;
self.executor = Arc::new(Executor::multi_thread(num_threads, "tantivy-search-")?);
Ok(())
}
/// Custom thread pool by a outer thread pool.
pub fn set_executor(&mut self, executor: Executor) {
self.executor = executor;
pub fn set_shared_multithread_executor(
&mut self,
shared_thread_pool: Arc<Executor>,
) -> crate::Result<()> {
self.executor = shared_thread_pool.clone();
Ok(())
}
/// Replace the default single thread search executor pool
/// by a thread pool with as many threads as there are CPUs on the system.
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
let default_num_threads = available_parallelism()?.get();
let default_num_threads = num_cpus::get();
self.set_multithread_executor(default_num_threads)
}
@@ -413,7 +418,7 @@ impl Index {
schema,
tokenizers: TokenizerManager::default(),
fast_field_tokenizers: TokenizerManager::default(),
executor: Executor::single_thread(),
executor: Arc::new(Executor::single_thread()),
inventory,
}
}
@@ -616,7 +621,7 @@ impl Index {
&self,
memory_budget_in_bytes: usize,
) -> crate::Result<IndexWriter<D>> {
let mut num_threads = std::cmp::min(available_parallelism()?.get(), MAX_NUM_THREAD);
let mut num_threads = std::cmp::min(num_cpus::get(), MAX_NUM_THREAD);
let memory_budget_num_bytes_per_thread = memory_budget_in_bytes / num_threads;
if memory_budget_num_bytes_per_thread < MEMORY_BUDGET_NUM_BYTES_MIN {
num_threads = (memory_budget_in_bytes / MEMORY_BUDGET_NUM_BYTES_MIN).max(1);

View File

@@ -1,3 +1,5 @@
//! # Index Module
//!
//! The `index` module in Tantivy contains core components to read and write indexes.
//!
//! It contains `Index` and `Segment`, where a `Index` consists of one or more `Segment`s.

View File

@@ -318,14 +318,14 @@ impl SegmentReader {
if create_canonical {
// Without expand dots enabled dots need to be escaped.
let escaped_json_path = json_path.replace('.', "\\.");
let full_path = format!("{field_name}.{escaped_json_path}");
let full_path = format!("{}.{}", field_name, escaped_json_path);
let full_path_unescaped = format!("{}.{}", field_name, &json_path);
map_to_canonical.insert(full_path_unescaped, full_path.to_string());
full_path
} else {
// With expand dots enabled, we can use '.' instead of '\u{1}'.
json_path_sep_to_dot(&mut json_path);
format!("{field_name}.{json_path}")
format!("{}.{}", field_name, json_path)
}
};
indexed_fields.extend(

View File

@@ -246,9 +246,8 @@ impl DeleteCursor {
mod tests {
use super::{DeleteOperation, DeleteQueue};
use crate::index::SegmentReader;
use crate::query::{Explanation, Scorer, Weight};
use crate::{DocId, Score};
use crate::{DocId, Score, SegmentReader};
struct DummyWeight;
impl Weight for DummyWeight {

View File

@@ -306,10 +306,12 @@ mod tests_indexsorting {
let my_string_field = index.schema().get_field("string_field").unwrap();
let searcher = index.reader()?.searcher();
{
assert!(searcher
.doc::<TantivyDocument>(DocAddress::new(0, 0))?
.get_first(my_string_field)
.is_none());
assert_eq!(
searcher
.doc::<TantivyDocument>(DocAddress::new(0, 0))?
.get_first(my_string_field),
None
);
assert_eq!(
searcher
.doc::<TantivyDocument>(DocAddress::new(0, 3))?
@@ -342,7 +344,7 @@ mod tests_indexsorting {
Some("blublub")
);
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 4))?;
assert!(doc.get_first(my_string_field).is_none());
assert_eq!(doc.get_first(my_string_field), None);
}
// sort by field desc
let index = create_test_index(

View File

@@ -814,9 +814,10 @@ mod tests {
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::indexer::NoMergePolicy;
use crate::query::{BooleanQuery, Occur, Query, QueryParser, TermQuery};
use crate::schema::document::Value;
use crate::schema::{
self, Facet, FacetOptions, IndexRecordOption, IpAddrOptions, NumericOptions, Schema,
TextFieldIndexing, TextOptions, Value, FAST, INDEXED, STORED, STRING, TEXT,
TextFieldIndexing, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
};
use crate::store::DOCSTORE_CACHE_CAPACITY;
use crate::{
@@ -1979,13 +1980,7 @@ mod tests {
.unwrap();
// test store iterator
for doc in store_reader.iter::<TantivyDocument>(segment_reader.alive_bitset()) {
let id = doc
.unwrap()
.get_first(id_field)
.unwrap()
.as_value()
.as_u64()
.unwrap();
let id = doc.unwrap().get_first(id_field).unwrap().as_u64().unwrap();
assert!(expected_ids_and_num_occurrences.contains_key(&id));
}
// test store random access
@@ -2018,7 +2013,7 @@ mod tests {
let mut bool2 = doc.get_all(multi_bools);
assert_eq!(bool, bool2.next().unwrap().as_bool().unwrap());
assert_ne!(bool, bool2.next().unwrap().as_bool().unwrap());
assert!(bool2.next().is_none())
assert_eq!(None, bool2.next())
}
}
}

View File

@@ -144,9 +144,9 @@ mod tests {
use once_cell::sync::Lazy;
use super::*;
use crate::index::{SegmentId, SegmentMetaInventory};
use crate::schema;
use crate::index::SegmentMetaInventory;
use crate::schema::INDEXED;
use crate::{schema, SegmentId};
static INVENTORY: Lazy<SegmentMetaInventory> = Lazy::new(SegmentMetaInventory::default);

View File

@@ -1,8 +1,7 @@
use std::collections::HashSet;
use std::ops::Deref;
use crate::index::SegmentId;
use crate::{Inventory, Opstamp, TrackedObject};
use crate::{Inventory, Opstamp, SegmentId, TrackedObject};
#[derive(Default)]
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);

View File

@@ -13,7 +13,7 @@ use crate::docset::{DocSet, TERMINATED};
use crate::error::DataCorruption;
use crate::fastfield::{AliveBitSet, FastFieldNotAvailableError};
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
use crate::index::{Segment, SegmentComponent, SegmentReader};
use crate::index::{Segment, SegmentReader};
use crate::indexer::doc_id_mapping::{MappingType, SegmentDocIdMapping};
use crate::indexer::SegmentSerializer;
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
@@ -21,7 +21,8 @@ use crate::schema::{value_type_to_column_type, Field, FieldType, Schema};
use crate::store::StoreWriter;
use crate::termdict::{TermMerger, TermOrdinal};
use crate::{
DocAddress, DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order, SegmentOrdinal,
DocAddress, DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order,
SegmentComponent, SegmentOrdinal,
};
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
@@ -793,16 +794,17 @@ mod tests {
BytesFastFieldTestCollector, FastFieldTestCollector, TEST_COLLECTOR_WITH_SCORE,
};
use crate::collector::{Count, FacetCollector};
use crate::index::{Index, SegmentId};
use crate::index::Index;
use crate::query::{AllQuery, BooleanQuery, EnableScoring, Scorer, TermQuery};
use crate::schema::document::Value;
use crate::schema::{
Facet, FacetOptions, IndexRecordOption, NumericOptions, TantivyDocument, Term,
TextFieldIndexing, Value, INDEXED, TEXT,
TextFieldIndexing, INDEXED, TEXT,
};
use crate::time::OffsetDateTime;
use crate::{
assert_nearly_equals, schema, DateTime, DocAddress, DocId, DocSet, IndexSettings,
IndexSortByField, IndexWriter, Order, Searcher,
IndexSortByField, IndexWriter, Order, Searcher, SegmentId,
};
#[test]
@@ -909,24 +911,15 @@ mod tests {
}
{
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 0))?;
assert_eq!(
doc.get_first(text_field).unwrap().as_value().as_str(),
Some("af b")
);
assert_eq!(doc.get_first(text_field).unwrap().as_str(), Some("af b"));
}
{
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 1))?;
assert_eq!(
doc.get_first(text_field).unwrap().as_value().as_str(),
Some("a b c")
);
assert_eq!(doc.get_first(text_field).unwrap().as_str(), Some("a b c"));
}
{
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 2))?;
assert_eq!(
doc.get_first(text_field).unwrap().as_value().as_str(),
Some("a b c d")
);
assert_eq!(doc.get_first(text_field).unwrap().as_str(), Some("a b c d"));
}
{
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 3))?;

View File

@@ -3,15 +3,15 @@ mod tests {
use crate::collector::TopDocs;
use crate::fastfield::AliveBitSet;
use crate::index::Index;
use crate::postings::Postings;
use crate::query::QueryParser;
use crate::schema::document::Value;
use crate::schema::{
self, BytesOptions, Facet, FacetOptions, IndexRecordOption, NumericOptions,
TextFieldIndexing, TextOptions, Value,
TextFieldIndexing, TextOptions,
};
use crate::{
DocAddress, DocSet, IndexSettings, IndexSortByField, IndexWriter, Order, TantivyDocument,
Term,
DocAddress, DocSet, IndexSettings, IndexSortByField, IndexWriter, Order, Postings,
TantivyDocument, Term,
};
fn create_test_index_posting_list_issue(index_settings: Option<IndexSettings>) -> Index {
@@ -280,16 +280,13 @@ mod tests {
.doc::<TantivyDocument>(DocAddress::new(0, blubber_pos))
.unwrap();
assert_eq!(
doc.get_first(my_text_field).unwrap().as_value().as_str(),
doc.get_first(my_text_field).unwrap().as_str(),
Some("blubber")
);
let doc = searcher
.doc::<TantivyDocument>(DocAddress::new(0, 0))
.unwrap();
assert_eq!(
doc.get_first(int_field).unwrap().as_value().as_u64(),
Some(1000)
);
assert_eq!(doc.get_first(int_field).unwrap().as_u64(), Some(1000));
}
}

View File

@@ -182,7 +182,7 @@ mod tests_mmap {
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap();
index_writer
.add_document(doc!(field=>json!({format!("{field_name_in}"): "test1", format!("num{field_name_in}"): 10})))
.add_document(doc!(field=>json!({format!("{field_name_in}"): "test1"})))
.unwrap();
index_writer
.add_document(doc!(field=>json!({format!("a{field_name_in}"): "test2"})))
@@ -216,7 +216,7 @@ mod tests_mmap {
let test_query = |query_str: &str| {
let query = parse_query.parse_query(query_str).unwrap();
let num_docs = searcher.search(&query, &Count).unwrap();
assert_eq!(num_docs, 1, "{query_str}");
assert_eq!(num_docs, 1, "{}", query_str);
};
test_query(format!("json.{field_name_out}:test1").as_str());
test_query(format!("json.a{field_name_out}:test2").as_str());
@@ -260,64 +260,6 @@ mod tests_mmap {
"test6",
);
test_agg(format!("json.{field_name_out}a").as_str(), "test7");
// `.` is stored as `\u{0001}` internally in tantivy
let field_name_out_internal = if field_name_out == "." {
"\u{0001}"
} else {
field_name_out
};
let mut fields = reader.searcher().segment_readers()[0]
.inverted_index(field)
.unwrap()
.list_encoded_fields()
.unwrap();
assert_eq!(fields.len(), 8);
fields.sort();
let mut expected_fields = vec![
(format!("a{field_name_out_internal}"), Type::Str),
(format!("a{field_name_out_internal}a"), Type::Str),
(
format!("a{field_name_out_internal}a{field_name_out_internal}"),
Type::Str,
),
(
format!("a{field_name_out_internal}\u{1}ab{field_name_out_internal}"),
Type::Str,
),
(
format!("a{field_name_out_internal}\u{1}a{field_name_out_internal}"),
Type::Str,
),
(format!("{field_name_out_internal}a"), Type::Str),
(format!("{field_name_out_internal}"), Type::Str),
(format!("num{field_name_out_internal}"), Type::I64),
];
expected_fields.sort();
assert_eq!(fields, expected_fields);
// Check columnar reader
let mut columns = reader.searcher().segment_readers()[0]
.fast_fields()
.columnar()
.list_columns()
.unwrap()
.into_iter()
.map(|(name, _)| name)
.collect::<Vec<_>>();
let mut expected_columns = vec![
format!("json\u{1}{field_name_out_internal}"),
format!("json\u{1}{field_name_out_internal}a"),
format!("json\u{1}a{field_name_out_internal}"),
format!("json\u{1}a{field_name_out_internal}a"),
format!("json\u{1}a{field_name_out_internal}a{field_name_out_internal}"),
format!("json\u{1}a{field_name_out_internal}\u{1}ab{field_name_out_internal}"),
format!("json\u{1}a{field_name_out_internal}\u{1}a{field_name_out_internal}"),
format!("json\u{1}num{field_name_out_internal}"),
];
columns.sort();
expected_columns.sort();
assert_eq!(columns, expected_columns);
}
#[test]
@@ -590,10 +532,10 @@ mod tests_mmap {
let query_parser = QueryParser::for_index(&index, vec![]);
// Test if field name can be queried
for (indexed_field, val) in fields_and_vals.iter() {
let query_str = &format!("{indexed_field}:{val}");
let query_str = &format!("{}:{}", indexed_field, val);
let query = query_parser.parse_query(query_str).unwrap();
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2)).unwrap();
assert!(!count_docs.is_empty(), "{indexed_field}:{val}");
assert!(!count_docs.is_empty(), "{}:{}", indexed_field, val);
}
// Test if field name can be used for aggregation
for (field_name, val) in fields_and_vals.iter() {

View File

@@ -1,24 +1,24 @@
use columnar::MonotonicallyMappableToU64;
use common::JsonPathWriter;
use itertools::Itertools;
use tokenizer_api::BoxTokenStream;
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
use super::operation::AddOperation;
use crate::core::json_utils::index_json_values;
use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
use crate::index::{Segment, SegmentComponent};
use crate::index::Segment;
use crate::indexer::segment_serializer::SegmentSerializer;
use crate::json_utils::{index_json_value, IndexingPositionsPerPath};
use crate::postings::{
compute_table_memory_size, serialize_postings, IndexingContext, IndexingPosition,
PerFieldPostingsWriter, PostingsWriter,
};
use crate::schema::document::{Document, Value};
use crate::schema::{FieldEntry, FieldType, Schema, Term, DATE_TIME_PRECISION_INDEXED};
use crate::schema::document::{Document, ReferenceValue, Value};
use crate::schema::indexing_term::IndexingTerm;
use crate::schema::{FieldEntry, FieldType, Schema};
use crate::store::{StoreReader, StoreWriter};
use crate::tokenizer::{FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer};
use crate::{DocId, Opstamp, TantivyError};
use crate::{DocId, Opstamp, SegmentComponent, TantivyError};
/// Computes the initial size of the hash table.
///
@@ -68,10 +68,9 @@ pub struct SegmentWriter {
pub(crate) fast_field_writers: FastFieldsWriter,
pub(crate) fieldnorms_writer: FieldNormsWriter,
pub(crate) json_path_writer: JsonPathWriter,
pub(crate) json_positions_per_path: IndexingPositionsPerPath,
pub(crate) doc_opstamps: Vec<Opstamp>,
per_field_text_analyzers: Vec<TextAnalyzer>,
term_buffer: Term,
term_buffer: IndexingTerm,
schema: Schema,
}
@@ -120,7 +119,6 @@ impl SegmentWriter {
per_field_postings_writers,
fieldnorms_writer: FieldNormsWriter::for_schema(&schema),
json_path_writer: JsonPathWriter::default(),
json_positions_per_path: IndexingPositionsPerPath::default(),
segment_serializer,
fast_field_writers: FastFieldsWriter::from_schema_and_tokenizer_manager(
&schema,
@@ -128,7 +126,7 @@ impl SegmentWriter {
)?,
doc_opstamps: Vec::with_capacity(1_000),
per_field_text_analyzers,
term_buffer: Term::with_capacity(16),
term_buffer: IndexingTerm::new(),
schema,
})
}
@@ -197,7 +195,7 @@ impl SegmentWriter {
let (term_buffer, ctx) = (&mut self.term_buffer, &mut self.ctx);
let postings_writer: &mut dyn PostingsWriter =
self.per_field_postings_writers.get_for_field_mut(field);
term_buffer.clear_with_field_and_type(field_entry.field_type().value_type(), field);
term_buffer.clear_with_field(field);
match field_entry.field_type() {
FieldType::Facet(_) => {
@@ -206,7 +204,8 @@ impl SegmentWriter {
// Used to help with linting and type checking.
let value = value_access as D::Value<'_>;
let facet_str = value.as_facet().ok_or_else(make_schema_error)?;
let facet = value.as_facet().ok_or_else(make_schema_error)?;
let facet_str = facet.encoded_str();
let mut facet_tokenizer = facet_tokenizer.token_stream(facet_str);
let mut indexing_position = IndexingPosition::default();
postings_writer.index_text(
@@ -229,7 +228,7 @@ impl SegmentWriter {
&mut self.per_field_text_analyzers[field.field_id() as usize];
text_analyzer.token_stream(text)
} else if let Some(tok_str) = value.as_pre_tokenized_text() {
BoxTokenStream::new(PreTokenizedStream::from(*tok_str.clone()))
BoxTokenStream::new(PreTokenizedStream::from(tok_str.clone()))
} else {
continue;
};
@@ -272,8 +271,7 @@ impl SegmentWriter {
num_vals += 1;
let date_val = value.as_datetime().ok_or_else(make_schema_error)?;
term_buffer
.set_u64(date_val.truncate(DATE_TIME_PRECISION_INDEXED).to_u64());
term_buffer.set_date(date_val);
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
}
if field_entry.has_fieldnorms() {
@@ -333,7 +331,7 @@ impl SegmentWriter {
num_vals += 1;
let bytes = value.as_bytes().ok_or_else(make_schema_error)?;
term_buffer.set_bytes(bytes);
term_buffer.set_value_bytes(bytes);
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
}
if field_entry.has_fieldnorms() {
@@ -343,24 +341,26 @@ impl SegmentWriter {
FieldType::JsonObject(json_options) => {
let text_analyzer =
&mut self.per_field_text_analyzers[field.field_id() as usize];
let json_values_it = values.map(|value_access| {
// Used to help with linting and type checking.
let value_access = value_access as D::Value<'_>;
let value = value_access.as_value();
self.json_positions_per_path.clear();
self.json_path_writer
.set_expand_dots(json_options.is_expand_dots_enabled());
for json_value in values {
self.json_path_writer.clear();
index_json_value(
doc_id,
json_value,
text_analyzer,
term_buffer,
&mut self.json_path_writer,
postings_writer,
ctx,
&mut self.json_positions_per_path,
);
}
match value {
ReferenceValue::Object(object_iter) => Ok(object_iter),
_ => Err(make_schema_error()),
}
});
index_json_values::<D::Value<'_>>(
doc_id,
json_values_it,
text_analyzer,
json_options.is_expand_dots_enabled(),
term_buffer,
postings_writer,
&mut self.json_path_writer,
ctx,
)?;
}
FieldType::IpAddr(_) => {
let mut num_vals = 0;
@@ -497,19 +497,20 @@ mod tests {
use crate::collector::{Count, TopDocs};
use crate::directory::RamDirectory;
use crate::fastfield::FastValue;
use crate::postings::{Postings, TermInfo};
use crate::json_utils::term_from_json_paths;
use crate::postings::TermInfo;
use crate::query::{PhraseQuery, QueryParser};
use crate::schema::document::Value;
use crate::schema::{
Document, IndexRecordOption, OwnedValue, Schema, TextFieldIndexing, TextOptions, Value,
STORED, STRING, TEXT,
Document, IndexRecordOption, Schema, TextFieldIndexing, TextOptions, STORED, STRING, TEXT,
};
use crate::store::{Compressor, StoreReader, StoreWriter};
use crate::time::format_description::well_known::Rfc3339;
use crate::time::OffsetDateTime;
use crate::tokenizer::{PreTokenizedString, Token};
use crate::{
DateTime, Directory, DocAddress, DocSet, Index, IndexWriter, TantivyDocument, Term,
TERMINATED,
DateTime, Directory, DocAddress, DocSet, Index, IndexWriter, Postings, TantivyDocument,
Term, TERMINATED,
};
#[test]
@@ -554,15 +555,9 @@ mod tests {
let reader = StoreReader::open(directory.open_read(path).unwrap(), 0).unwrap();
let doc = reader.get::<TantivyDocument>(0).unwrap();
assert_eq!(doc.field_values().count(), 2);
assert_eq!(
doc.get_all(text_field).next().unwrap().as_value().as_str(),
Some("A")
);
assert_eq!(
doc.get_all(text_field).nth(1).unwrap().as_value().as_str(),
Some("title")
);
assert_eq!(doc.field_values().len(), 2);
assert_eq!(doc.field_values()[0].value().as_str(), Some("A"));
assert_eq!(doc.field_values()[1].value().as_str(), Some("title"));
}
#[test]
fn test_simple_json_indexing() {
@@ -602,51 +597,12 @@ mod tests {
assert_eq!(score_docs.len(), 2);
}
#[test]
fn test_flat_json_indexing() {
// A JSON Object that contains mixed values on the first level
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", STORED | STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let mut writer = index.writer_for_tests().unwrap();
// Text, i64, u64
writer.add_document(doc!(json_field=>"b")).unwrap();
writer
.add_document(doc!(json_field=>OwnedValue::I64(10i64)))
.unwrap();
writer
.add_document(doc!(json_field=>OwnedValue::U64(55u64)))
.unwrap();
writer
.add_document(doc!(json_field=>json!({"my_field": "a"})))
.unwrap();
writer.commit().unwrap();
let search_and_expect = |query| {
let query_parser = QueryParser::for_index(&index, vec![json_field]);
let text_query = query_parser.parse_query(query).unwrap();
let score_docs: Vec<(_, DocAddress)> = index
.reader()
.unwrap()
.searcher()
.search(&text_query, &TopDocs::with_limit(4))
.unwrap();
assert_eq!(score_docs.len(), 1);
};
search_and_expect("my_field:a");
search_and_expect("b");
search_and_expect("10");
search_and_expect("55");
}
#[test]
fn test_json_indexing() {
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
let schema = schema_builder.build();
let json_val: serde_json::Value = serde_json::from_str(
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
r#"{
"toto": "titi",
"float": -0.2,
@@ -674,10 +630,14 @@ mod tests {
doc_id: 0u32,
})
.unwrap();
let serdeser_json_val = serde_json::from_str::<serde_json::Value>(&doc.to_json(&schema))
let serdeser_json_val = serde_json::from_str::<serde_json::Map<String, serde_json::Value>>(
&doc.to_json(&schema),
)
.unwrap()
.get("json")
.unwrap()[0]
.as_object()
.unwrap()
.get("json")
.unwrap()[0]
.clone();
assert_eq!(json_val, serdeser_json_val);
let segment_reader = searcher.segment_reader(0u32);
@@ -686,8 +646,9 @@ mod tests {
let mut term_stream = term_dict.stream().unwrap();
let term_from_path =
|path: &str| -> Term { Term::from_field_json_path(json_field, path, false) };
let term_from_path = |paths: &[&str]| -> Term {
term_from_json_paths(json_field, paths.iter().cloned(), false)
};
fn set_fast_val<T: FastValue>(val: T, mut term: Term) -> Term {
term.append_type_and_fast_value(val);
@@ -698,14 +659,15 @@ mod tests {
term
}
let term = term_from_path("bool");
let term = term_from_path(&["bool"]);
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
set_fast_val(true, term).serialized_value_bytes()
);
let term = term_from_path("complexobject.field\\.with\\.dot");
let term = term_from_path(&["complexobject", "field.with.dot"]);
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
@@ -713,7 +675,7 @@ mod tests {
);
// Date
let term = term_from_path("date");
let term = term_from_path(&["date"]);
assert!(term_stream.advance());
assert_eq!(
@@ -728,7 +690,7 @@ mod tests {
);
// Float
let term = term_from_path("float");
let term = term_from_path(&["float"]);
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
@@ -736,21 +698,21 @@ mod tests {
);
// Number In Array
let term = term_from_path("my_arr");
let term = term_from_path(&["my_arr"]);
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
set_fast_val(2i64, term).serialized_value_bytes()
);
let term = term_from_path("my_arr");
let term = term_from_path(&["my_arr"]);
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
set_fast_val(3i64, term).serialized_value_bytes()
);
let term = term_from_path("my_arr");
let term = term_from_path(&["my_arr"]);
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
@@ -758,13 +720,13 @@ mod tests {
);
// El in Array
let term = term_from_path("my_arr.my_key");
let term = term_from_path(&["my_arr", "my_key"]);
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
set_str("tokens", term).serialized_value_bytes()
);
let term = term_from_path("my_arr.my_key");
let term = term_from_path(&["my_arr", "my_key"]);
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
@@ -772,21 +734,21 @@ mod tests {
);
// Signed
let term = term_from_path("signed");
let term = term_from_path(&["signed"]);
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
set_fast_val(-2i64, term).serialized_value_bytes()
);
let term = term_from_path("toto");
let term = term_from_path(&["toto"]);
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
set_str("titi", term).serialized_value_bytes()
);
// Unsigned
let term = term_from_path("unsigned");
let term = term_from_path(&["unsigned"]);
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
@@ -813,7 +775,7 @@ mod tests {
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0u32);
let inv_index = segment_reader.inverted_index(json_field).unwrap();
let mut term = Term::from_field_json_path(json_field, "mykey", false);
let mut term = term_from_json_paths(json_field, ["mykey"].into_iter(), false);
term.append_type_and_str("token");
let term_info = inv_index.get_term_info(&term).unwrap().unwrap();
assert_eq!(
@@ -841,7 +803,7 @@ mod tests {
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", STRING);
let schema = schema_builder.build();
let json_val: serde_json::Value =
let json_val: serde_json::Map<String, serde_json::Value> =
serde_json::from_str(r#"{"mykey": "two tokens"}"#).unwrap();
let doc = doc!(json_field=>json_val);
let index = Index::create_in_ram(schema);
@@ -852,7 +814,7 @@ mod tests {
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0u32);
let inv_index = segment_reader.inverted_index(json_field).unwrap();
let mut term = Term::from_field_json_path(json_field, "mykey", false);
let mut term = term_from_json_paths(json_field, ["mykey"].into_iter(), false);
term.append_type_and_str("two tokens");
let term_info = inv_index.get_term_info(&term).unwrap().unwrap();
assert_eq!(
@@ -881,7 +843,7 @@ mod tests {
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", TEXT);
let schema = schema_builder.build();
let json_val: serde_json::Value = serde_json::from_str(
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
r#"{"mykey": [{"field": "hello happy tax payer"}, {"field": "nothello"}]}"#,
)
.unwrap();
@@ -893,7 +855,7 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let term = Term::from_field_json_path(json_field, "mykey.field", false);
let term = term_from_json_paths(json_field, ["mykey", "field"].into_iter(), false);
let mut hello_term = term.clone();
hello_term.append_type_and_str("hello");

View File

@@ -216,6 +216,11 @@ use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
pub use self::docset::{DocSet, COLLECT_BLOCK_BUFFER_LEN, TERMINATED};
#[deprecated(
since = "0.22.0",
note = "Will be removed in tantivy 0.23. Use export from snippet module instead"
)]
pub use self::snippet::{Snippet, SnippetGenerator};
#[doc(hidden)]
pub use crate::core::json_utils;
pub use crate::core::{Executor, Searcher, SearcherGeneration};
@@ -223,10 +228,16 @@ pub use crate::directory::Directory;
#[allow(deprecated)] // Remove with index sorting
pub use crate::index::{
Index, IndexBuilder, IndexMeta, IndexSettings, IndexSortByField, InvertedIndexReader, Order,
Segment, SegmentMeta, SegmentReader,
Segment, SegmentComponent, SegmentId, SegmentMeta, SegmentReader,
};
#[deprecated(
since = "0.22.0",
note = "Will be removed in tantivy 0.23. Use export from indexer module instead"
)]
pub use crate::indexer::PreparedCommit;
pub use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
pub use crate::schema::{Document, TantivyDocument, Term};
pub use crate::postings::Postings;
pub use crate::schema::{DateOptions, DateTimePrecision, Document, TantivyDocument, Term};
/// Index format version.
const INDEX_FORMAT_VERSION: u32 = 6;
@@ -381,10 +392,9 @@ pub mod tests {
use crate::docset::{DocSet, TERMINATED};
use crate::index::SegmentReader;
use crate::merge_policy::NoMergePolicy;
use crate::postings::Postings;
use crate::query::BooleanQuery;
use crate::schema::*;
use crate::{DateTime, DocAddress, Index, IndexWriter, ReloadPolicy};
use crate::{DateTime, DocAddress, Index, IndexWriter, Postings, ReloadPolicy};
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();
@@ -436,6 +446,7 @@ pub mod tests {
}
#[test]
#[cfg(not(feature = "lz4"))]
fn test_version_string() {
use regex::Regex;
let regex_ptn = Regex::new(
@@ -935,7 +946,7 @@ pub mod tests {
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
let schema = schema_builder.build();
let json_val: serde_json::Value = serde_json::from_str(
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
r#"{
"signed": 2,
"float": 2.0,
@@ -1025,16 +1036,13 @@ pub mod tests {
text_field => "some other value",
other_text_field => "short");
assert_eq!(document.len(), 3);
let values: Vec<OwnedValue> = document.get_all(text_field).map(OwnedValue::from).collect();
let values: Vec<&OwnedValue> = document.get_all(text_field).collect();
assert_eq!(values.len(), 2);
assert_eq!(values[0].as_ref().as_str(), Some("tantivy"));
assert_eq!(values[1].as_ref().as_str(), Some("some other value"));
let values: Vec<OwnedValue> = document
.get_all(other_text_field)
.map(OwnedValue::from)
.collect();
assert_eq!(values[0].as_str(), Some("tantivy"));
assert_eq!(values[1].as_str(), Some("some other value"));
let values: Vec<&OwnedValue> = document.get_all(other_text_field).collect();
assert_eq!(values.len(), 1);
assert_eq!(values[0].as_ref().as_str(), Some("short"));
assert_eq!(values[0].as_str(), Some("short"));
}
#[test]
@@ -1101,9 +1109,9 @@ pub mod tests {
#[test]
fn test_update_via_delete_insert() -> crate::Result<()> {
use crate::collector::Count;
use crate::index::SegmentId;
use crate::indexer::NoMergePolicy;
use crate::query::AllQuery;
use crate::SegmentId;
const DOC_COUNT: u64 = 2u64;

View File

@@ -41,7 +41,6 @@
/// );
/// # }
/// ```
#[macro_export]
macro_rules! doc(
() => {
@@ -53,7 +52,7 @@ macro_rules! doc(
{
let mut document = $crate::TantivyDocument::default();
$(
document.add_field_value($field, &$value);
document.add_field_value($field, $value);
)*
document
}

View File

@@ -8,9 +8,10 @@ use crate::indexer::path_to_unordered_id::OrderedPathId;
use crate::postings::postings_writer::SpecializedPostingsWriter;
use crate::postings::recorder::{BufferLender, DocIdRecorder, Recorder};
use crate::postings::{FieldSerializer, IndexingContext, IndexingPosition, PostingsWriter};
use crate::schema::{Field, Type};
use crate::schema::indexing_term::IndexingTerm;
use crate::schema::{Field, Type, ValueBytes};
use crate::tokenizer::TokenStream;
use crate::{DocId, Term};
use crate::DocId;
/// The `JsonPostingsWriter` is odd in that it relies on a hidden contract:
///
@@ -34,7 +35,7 @@ impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
&mut self,
doc: crate::DocId,
pos: u32,
term: &crate::Term,
term: &IndexingTerm,
ctx: &mut IndexingContext,
) {
self.non_str_posting_writer.subscribe(doc, pos, term, ctx);
@@ -44,7 +45,7 @@ impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
&mut self,
doc_id: DocId,
token_stream: &mut dyn TokenStream,
term_buffer: &mut Term,
term_buffer: &mut IndexingTerm,
ctx: &mut IndexingContext,
indexing_position: &mut IndexingPosition,
) {
@@ -66,42 +67,40 @@ impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
ctx: &IndexingContext,
serializer: &mut FieldSerializer,
) -> io::Result<()> {
let mut term_buffer = Term::with_capacity(48);
let mut term_buffer = JsonTermSerializer(Vec::with_capacity(48));
let mut buffer_lender = BufferLender::default();
term_buffer.clear_with_field_and_type(Type::Json, Field::from_field_id(0));
let mut prev_term_id = u32::MAX;
let mut term_path_len = 0; // this will be set in the first iteration
for (_field, path_id, term, addr) in term_addrs {
if prev_term_id != path_id.path_id() {
term_buffer.truncate_value_bytes(0);
term_buffer.clear();
term_buffer.append_path(ordered_id_to_path[path_id.path_id() as usize].as_bytes());
term_buffer.append_bytes(&[JSON_END_OF_PATH]);
term_path_len = term_buffer.len_bytes();
term_path_len = term_buffer.len();
prev_term_id = path_id.path_id();
}
term_buffer.truncate_value_bytes(term_path_len);
term_buffer.truncate(term_path_len);
term_buffer.append_bytes(term);
if let Some(json_value) = term_buffer.value().as_json_value_bytes() {
let typ = json_value.typ();
if typ == Type::Str {
SpecializedPostingsWriter::<Rec>::serialize_one_term(
term_buffer.serialized_value_bytes(),
*addr,
doc_id_map,
&mut buffer_lender,
ctx,
serializer,
)?;
} else {
SpecializedPostingsWriter::<DocIdRecorder>::serialize_one_term(
term_buffer.serialized_value_bytes(),
*addr,
doc_id_map,
&mut buffer_lender,
ctx,
serializer,
)?;
}
let json_value = ValueBytes::wrap(term);
let typ = json_value.typ();
if typ == Type::Str {
SpecializedPostingsWriter::<Rec>::serialize_one_term(
term_buffer.as_bytes(),
*addr,
doc_id_map,
&mut buffer_lender,
ctx,
serializer,
)?;
} else {
SpecializedPostingsWriter::<DocIdRecorder>::serialize_one_term(
term_buffer.as_bytes(),
*addr,
doc_id_map,
&mut buffer_lender,
ctx,
serializer,
)?;
}
}
Ok(())
@@ -111,3 +110,40 @@ impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
self.str_posting_writer.total_num_tokens() + self.non_str_posting_writer.total_num_tokens()
}
}
struct JsonTermSerializer(Vec<u8>);
impl JsonTermSerializer {
#[inline]
pub fn append_path(&mut self, bytes: &[u8]) {
if bytes.contains(&0u8) {
self.0
.extend(bytes.iter().map(|&b| if b == 0 { b'0' } else { b }));
} else {
self.0.extend_from_slice(bytes);
}
}
/// Appends value bytes to the Term.
///
/// This function returns the segment that has just been added.
#[inline]
pub fn append_bytes(&mut self, bytes: &[u8]) -> &mut [u8] {
let len_before = self.0.len();
self.0.extend_from_slice(bytes);
&mut self.0[len_before..]
}
fn clear(&mut self) {
self.0.clear();
}
fn truncate(&mut self, len: usize) {
self.0.truncate(len);
}
fn len(&self) -> usize {
self.0.len()
}
fn as_bytes(&self) -> &[u8] {
&self.0
}
}

View File

@@ -11,7 +11,8 @@ use crate::postings::recorder::{BufferLender, Recorder};
use crate::postings::{
FieldSerializer, IndexingContext, InvertedIndexSerializer, PerFieldPostingsWriter,
};
use crate::schema::{Field, Schema, Term, Type};
use crate::schema::indexing_term::{get_field_from_indexing_term, IndexingTerm};
use crate::schema::{Field, Schema, Type};
use crate::tokenizer::{Token, TokenStream, MAX_TOKEN_LEN};
use crate::DocId;
@@ -60,14 +61,14 @@ pub(crate) fn serialize_postings(
let mut term_offsets: Vec<(Field, OrderedPathId, &[u8], Addr)> =
Vec::with_capacity(ctx.term_index.len());
term_offsets.extend(ctx.term_index.iter().map(|(key, addr)| {
let field = Term::wrap(key).field();
let field = get_field_from_indexing_term(key);
if schema.get_field_entry(field).field_type().value_type() == Type::Json {
let byte_range_path = 5..5 + 4;
let byte_range_path = 4..4 + 4;
let unordered_id = u32::from_be_bytes(key[byte_range_path.clone()].try_into().unwrap());
let path_id = unordered_id_to_ordered_id[unordered_id as usize];
(field, path_id, &key[byte_range_path.end..], addr)
} else {
(field, 0.into(), &key[5..], addr)
(field, 0.into(), &key[4..], addr)
}
}));
// Sort by field, path, and term
@@ -114,7 +115,7 @@ pub(crate) trait PostingsWriter: Send + Sync {
/// * term - the term
/// * ctx - Contains a term hashmap and a memory arena to store all necessary posting list
/// information.
fn subscribe(&mut self, doc: DocId, pos: u32, term: &Term, ctx: &mut IndexingContext);
fn subscribe(&mut self, doc: DocId, pos: u32, term: &IndexingTerm, ctx: &mut IndexingContext);
/// Serializes the postings on disk.
/// The actual serialization format is handled by the `PostingsSerializer`.
@@ -132,7 +133,7 @@ pub(crate) trait PostingsWriter: Send + Sync {
&mut self,
doc_id: DocId,
token_stream: &mut dyn TokenStream,
term_buffer: &mut Term,
term_buffer: &mut IndexingTerm,
ctx: &mut IndexingContext,
indexing_position: &mut IndexingPosition,
) {
@@ -203,26 +204,35 @@ impl<Rec: Recorder> SpecializedPostingsWriter<Rec> {
impl<Rec: Recorder> PostingsWriter for SpecializedPostingsWriter<Rec> {
#[inline]
fn subscribe(&mut self, doc: DocId, position: u32, term: &Term, ctx: &mut IndexingContext) {
debug_assert!(term.serialized_term().len() >= 4);
fn subscribe(
&mut self,
doc: DocId,
position: u32,
term: &IndexingTerm,
ctx: &mut IndexingContext,
) {
debug_assert!(term.serialized_for_hashmap().len() >= 4);
self.total_num_tokens += 1;
let (term_index, arena) = (&mut ctx.term_index, &mut ctx.arena);
term_index.mutate_or_create(term.serialized_term(), |opt_recorder: Option<Rec>| {
if let Some(mut recorder) = opt_recorder {
let current_doc = recorder.current_doc();
if current_doc != doc {
recorder.close_doc(arena);
term_index.mutate_or_create(
term.serialized_for_hashmap(),
|opt_recorder: Option<Rec>| {
if let Some(mut recorder) = opt_recorder {
let current_doc = recorder.current_doc();
if current_doc != doc {
recorder.close_doc(arena);
recorder.new_doc(doc, arena);
}
recorder.record_position(position, arena);
recorder
} else {
let mut recorder = Rec::default();
recorder.new_doc(doc, arena);
recorder.record_position(position, arena);
recorder
}
recorder.record_position(position, arena);
recorder
} else {
let mut recorder = Rec::default();
recorder.new_doc(doc, arena);
recorder.record_position(position, arena);
recorder
}
});
},
);
}
fn serialize(

View File

@@ -56,7 +56,7 @@ pub struct InvertedIndexSerializer {
impl InvertedIndexSerializer {
/// Open a new `InvertedIndexSerializer` for the given segment
pub fn open(segment: &mut Segment) -> crate::Result<InvertedIndexSerializer> {
use crate::index::SegmentComponent::{Positions, Postings, Terms};
use crate::SegmentComponent::{Positions, Postings, Terms};
let inv_index_serializer = InvertedIndexSerializer {
terms_write: CompositeWrite::wrap(segment.open_write(Terms)?),
postings_write: CompositeWrite::wrap(segment.open_write(Postings)?),

View File

@@ -1,9 +1,8 @@
use super::Scorer;
use crate::docset::TERMINATED;
use crate::index::SegmentReader;
use crate::query::explanation::does_not_match;
use crate::query::{EnableScoring, Explanation, Query, Weight};
use crate::{DocId, DocSet, Score, Searcher};
use crate::{DocId, DocSet, Score, Searcher, SegmentReader};
/// `EmptyQuery` is a dummy `Query` in which no document matches.
///

View File

@@ -3,7 +3,7 @@ use once_cell::sync::OnceCell;
use tantivy_fst::Automaton;
use crate::query::{AutomatonWeight, EnableScoring, Query, Weight};
use crate::schema::{Term, Type};
use crate::schema::Term;
use crate::TantivyError::InvalidArgument;
pub(crate) struct DfaWrapper(pub DFA);
@@ -133,39 +133,33 @@ impl FuzzyTermQuery {
let term_value = self.term.value();
let term_text = if term_value.typ() == Type::Json {
if let Some(json_path_type) = term_value.json_path_type() {
if json_path_type != Type::Str {
return Err(InvalidArgument(format!(
"The fuzzy term query requires a string path type for a json term. Found \
{json_path_type:?}"
)));
}
let get_automaton = |term_text: &str| {
if self.prefix {
automaton_builder.build_prefix_dfa(term_text)
} else {
automaton_builder.build_dfa(term_text)
}
std::str::from_utf8(self.term.serialized_value_bytes()).map_err(|_| {
InvalidArgument(
"Failed to convert json term value bytes to utf8 string.".to_string(),
)
})?
} else {
term_value.as_str().ok_or_else(|| {
InvalidArgument("The fuzzy term query requires a string term.".to_string())
})?
};
let automaton = if self.prefix {
automaton_builder.build_prefix_dfa(term_text)
} else {
automaton_builder.build_dfa(term_text)
};
if let Some((json_path_bytes, _)) = term_value.as_json() {
if let Some((json_path_bytes, _term_value)) = term_value.as_json() {
let term_text =
std::str::from_utf8(self.term.serialized_value_bytes()).map_err(|_| {
InvalidArgument(
"Failed to convert json term value bytes to utf8 string.".to_string(),
)
})?;
let automaton = get_automaton(term_text);
Ok(AutomatonWeight::new_for_json_path(
self.term.field(),
DfaWrapper(automaton),
json_path_bytes,
))
} else {
let term_text = term_value.as_str().ok_or_else(|| {
InvalidArgument("The fuzzy term query requires a string term.".to_string())
})?;
let automaton = get_automaton(term_text);
Ok(AutomatonWeight::new(
self.term.field(),
DfaWrapper(automaton),

View File

@@ -180,7 +180,7 @@ impl MoreLikeThis {
let facets: Vec<&str> = values
.iter()
.map(|value| {
value.as_facet().ok_or_else(|| {
value.as_facet().map(|f| f.encoded_str()).ok_or_else(|| {
TantivyError::InvalidArgument("invalid field value".to_string())
})
})
@@ -220,7 +220,7 @@ impl MoreLikeThis {
let mut token_stream = tokenizer.token_stream(text);
token_stream.process(sink);
} else if let Some(tok_str) = value.as_pre_tokenized_text() {
let mut token_stream = PreTokenizedStream::from(*tok_str.clone());
let mut token_stream = PreTokenizedStream::from(tok_str.clone());
token_stream.process(sink);
}
}

View File

@@ -137,7 +137,7 @@ impl Query for PhrasePrefixQuery {
// There are no prefix. Let's just match the suffix.
let end_term =
if let Some(end_value) = prefix_end(self.prefix.1.serialized_value_bytes()) {
let mut end_term = Term::with_capacity(end_value.len());
let mut end_term = Term::new();
end_term.set_field_and_type(self.field, self.prefix.1.typ());
end_term.append_bytes(&end_value);
Bound::Excluded(end_term)

View File

@@ -11,7 +11,9 @@ use rustc_hash::FxHashMap;
use super::logical_ast::*;
use crate::index::Index;
use crate::json_utils::convert_to_fast_value_and_append_to_json_term;
use crate::json_utils::{
convert_to_fast_value_and_append_to_json_term, split_json_path, term_from_json_paths,
};
use crate::query::range_query::{is_type_valid_for_fastfield_range_query, RangeQuery};
use crate::query::{
AllQuery, BooleanQuery, BoostQuery, EmptyQuery, FuzzyTermQuery, Occur, PhrasePrefixQuery,
@@ -964,8 +966,14 @@ fn generate_literals_for_json_object(
let index_record_option = text_options.index_option();
let mut logical_literals = Vec::new();
let get_term_with_path =
|| Term::from_field_json_path(field, json_path, json_options.is_expand_dots_enabled());
let paths = split_json_path(json_path);
let get_term_with_path = || {
term_from_json_paths(
field,
paths.iter().map(|el| el.as_str()),
json_options.is_expand_dots_enabled(),
)
};
// Try to convert the phrase to a fast value
if let Some(term) = convert_to_fast_value_and_append_to_json_term(get_term_with_path(), phrase)

View File

@@ -174,7 +174,7 @@ impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> DocSet for RangeDocSe
}
fn size_hint(&self) -> u32 {
self.column.num_docs()
0 // heuristic possible by checking number of hits when fetching a block
}
}

View File

@@ -185,7 +185,7 @@ mod test {
Err(crate::TantivyError::InvalidArgument(msg)) => {
assert!(msg.contains("error: unclosed group"))
}
res => panic!("unexpected result: {res:?}"),
res => panic!("unexpected result: {:?}", res),
}
}
}

View File

@@ -127,7 +127,6 @@ impl Scorer for TermScorer {
mod tests {
use proptest::prelude::*;
use crate::index::SegmentId;
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::merge_policy::NoMergePolicy;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
@@ -135,7 +134,8 @@ mod tests {
use crate::query::{Bm25Weight, EnableScoring, Scorer, TermQuery};
use crate::schema::{IndexRecordOption, Schema, TEXT};
use crate::{
assert_nearly_equals, DocId, DocSet, Index, IndexWriter, Score, Searcher, Term, TERMINATED,
assert_nearly_equals, DocId, DocSet, Index, IndexWriter, Score, Searcher, SegmentId, Term,
TERMINATED,
};
#[test]

View File

@@ -179,10 +179,9 @@ mod tests {
use super::Warmer;
use crate::core::searcher::SearcherGeneration;
use crate::directory::RamDirectory;
use crate::index::SegmentId;
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::schema::{Schema, INDEXED};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher, SegmentId};
#[derive(Default)]
struct TestWarmer {

View File

@@ -873,7 +873,7 @@ mod tests {
);
let facet = Facet::from_text("/hello/world").unwrap();
let result = serialize_value(ReferenceValueLeaf::Facet(facet.encoded_str()).into());
let result = serialize_value(ReferenceValueLeaf::Facet(&facet).into());
let value = deserialize_value(result);
assert_eq!(value, crate::schema::OwnedValue::Facet(facet));
@@ -881,8 +881,7 @@ mod tests {
text: "hello, world".to_string(),
tokens: vec![Token::default(), Token::default()],
};
let result =
serialize_value(ReferenceValueLeaf::PreTokStr(pre_tok_str.clone().into()).into());
let result = serialize_value(ReferenceValueLeaf::PreTokStr(&pre_tok_str).into());
let value = deserialize_value(result);
assert_eq!(value, crate::schema::OwnedValue::PreTokStr(pre_tok_str));
}
@@ -961,19 +960,13 @@ mod tests {
"my-third-key".to_string(),
crate::schema::OwnedValue::F64(123.0),
);
assert_eq!(
value,
crate::schema::OwnedValue::Object(expected_object.into_iter().collect())
);
assert_eq!(value, crate::schema::OwnedValue::Object(expected_object));
let object = serde_json::Map::new();
let result = serialize_value(ReferenceValue::Object(JsonObjectIter(object.iter())));
let value = deserialize_value(result);
let expected_object = BTreeMap::new();
assert_eq!(
value,
crate::schema::OwnedValue::Object(expected_object.into_iter().collect())
);
assert_eq!(value, crate::schema::OwnedValue::Object(expected_object));
let mut object = serde_json::Map::new();
object.insert("my-first-key".into(), serde_json::Value::Null);
@@ -985,10 +978,7 @@ mod tests {
expected_object.insert("my-first-key".to_string(), crate::schema::OwnedValue::Null);
expected_object.insert("my-second-key".to_string(), crate::schema::OwnedValue::Null);
expected_object.insert("my-third-key".to_string(), crate::schema::OwnedValue::Null);
assert_eq!(
value,
crate::schema::OwnedValue::Object(expected_object.into_iter().collect())
);
assert_eq!(value, crate::schema::OwnedValue::Object(expected_object));
}
#[test]
@@ -1065,10 +1055,7 @@ mod tests {
.collect(),
),
);
assert_eq!(
value,
crate::schema::OwnedValue::Object(expected_object.into_iter().collect())
);
assert_eq!(value, crate::schema::OwnedValue::Object(expected_object));
// Some more extreme nesting that might behave weirdly
let mut object = serde_json::Map::new();
@@ -1090,9 +1077,6 @@ mod tests {
OwnedValue::Array(vec![OwnedValue::Null]),
])]),
);
assert_eq!(
value,
OwnedValue::Object(expected_object.into_iter().collect())
);
assert_eq!(value, OwnedValue::Object(expected_object));
}
}

View File

@@ -1,64 +1,93 @@
use std::collections::{BTreeMap, HashMap, HashSet};
use std::io::{self, Read, Write};
use std::net::Ipv6Addr;
use columnar::MonotonicallyMappableToU128;
use common::{read_u32_vint_no_advance, serialize_vint_u32, BinarySerializable, DateTime, VInt};
use common::DateTime;
use serde_json::Map;
pub use CompactDoc as TantivyDocument;
use super::{ReferenceValue, ReferenceValueLeaf, Value};
use crate::schema::document::{
DeserializeError, Document, DocumentDeserialize, DocumentDeserializer,
};
use crate::schema::field_type::ValueParsingError;
use crate::schema::{Facet, Field, NamedFieldDocument, OwnedValue, Schema};
use crate::schema::field_value::FieldValueIter;
use crate::schema::{Facet, Field, FieldValue, NamedFieldDocument, OwnedValue, Schema};
use crate::tokenizer::PreTokenizedString;
#[repr(packed)]
#[derive(Debug, Clone)]
/// A field value pair in the compact tantivy document
struct FieldValueAddr {
pub field: u16,
pub value_addr: ValueAddr,
/// TantivyDocument provides a default implementation of the `Document` trait.
/// It is the object that can be indexed and then searched for.
///
/// Documents are fundamentally a collection of unordered couples `(field, value)`.
/// In this list, one field may appear more than once.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)]
pub struct TantivyDocument {
field_values: Vec<FieldValue>,
}
#[derive(Debug, Clone)]
/// The default document in tantivy. It encodes data in a compact form.
pub struct CompactDoc {
/// `node_data` is a vec of bytes, where each value is serialized into bytes and stored. It
/// includes all the data of the document and also metadata like where the nodes are located
/// in an object or array.
pub node_data: Vec<u8>,
/// The root (Field, Value) pairs
field_values: Vec<FieldValueAddr>,
}
impl Document for TantivyDocument {
type Value<'a> = &'a OwnedValue;
type FieldsValuesIter<'a> = FieldValueIter<'a>;
impl Default for CompactDoc {
fn default() -> Self {
Self::new()
fn iter_fields_and_values(&self) -> Self::FieldsValuesIter<'_> {
FieldValueIter(self.field_values.iter())
}
}
impl CompactDoc {
/// Creates a new, empty document object
/// The reserved capacity is for the total serialized data
pub fn with_capacity(bytes: usize) -> CompactDoc {
CompactDoc {
node_data: Vec::with_capacity(bytes),
field_values: Vec::with_capacity(4),
impl DocumentDeserialize for TantivyDocument {
fn deserialize<'de, D>(mut deserializer: D) -> Result<Self, DeserializeError>
where D: DocumentDeserializer<'de> {
let mut field_values = Vec::with_capacity(deserializer.size_hint());
while let Some((field, value)) = deserializer.next_field()? {
field_values.push(FieldValue::new(field, value));
}
}
Ok(Self { field_values })
}
}
impl From<Vec<FieldValue>> for TantivyDocument {
fn from(field_values: Vec<FieldValue>) -> Self {
Self { field_values }
}
}
impl PartialEq for TantivyDocument {
fn eq(&self, other: &Self) -> bool {
// super slow, but only here for tests
let convert_to_comparable_map = |field_values: &[FieldValue]| {
let mut field_value_set: HashMap<Field, HashSet<String>> = Default::default();
for field_value in field_values.iter() {
let value = serde_json::to_string(field_value.value()).unwrap();
field_value_set
.entry(field_value.field())
.or_default()
.insert(value);
}
field_value_set
};
let self_field_values: HashMap<Field, HashSet<String>> =
convert_to_comparable_map(&self.field_values);
let other_field_values: HashMap<Field, HashSet<String>> =
convert_to_comparable_map(&other.field_values);
self_field_values.eq(&other_field_values)
}
}
impl Eq for TantivyDocument {}
impl IntoIterator for TantivyDocument {
type Item = FieldValue;
type IntoIter = std::vec::IntoIter<FieldValue>;
fn into_iter(self) -> Self::IntoIter {
self.field_values.into_iter()
}
}
impl TantivyDocument {
/// Creates a new, empty document object
pub fn new() -> CompactDoc {
CompactDoc::with_capacity(1024)
}
/// Skrinks the capacity of the document to fit the data
pub fn shrink_to_fit(&mut self) {
self.node_data.shrink_to_fit();
self.field_values.shrink_to_fit();
pub fn new() -> TantivyDocument {
TantivyDocument::default()
}
/// Returns the length of the document.
@@ -70,111 +99,83 @@ impl CompactDoc {
pub fn add_facet<F>(&mut self, field: Field, path: F)
where Facet: From<F> {
let facet = Facet::from(path);
self.add_leaf_field_value(field, ReferenceValueLeaf::Facet(facet.encoded_str()));
let value = OwnedValue::Facet(facet);
self.add_field_value(field, value);
}
/// Add a text field.
pub fn add_text<S: AsRef<str>>(&mut self, field: Field, text: S) {
self.add_leaf_field_value(field, ReferenceValueLeaf::Str(text.as_ref()));
pub fn add_text<S: ToString>(&mut self, field: Field, text: S) {
let value = OwnedValue::Str(text.to_string());
self.add_field_value(field, value);
}
/// Add a pre-tokenized text field.
pub fn add_pre_tokenized_text(&mut self, field: Field, pre_tokenized_text: PreTokenizedString) {
self.add_leaf_field_value(field, pre_tokenized_text);
self.add_field_value(field, pre_tokenized_text);
}
/// Add a u64 field
pub fn add_u64(&mut self, field: Field, value: u64) {
self.add_leaf_field_value(field, value);
self.add_field_value(field, value);
}
/// Add a IP address field. Internally only Ipv6Addr is used.
pub fn add_ip_addr(&mut self, field: Field, value: Ipv6Addr) {
self.add_leaf_field_value(field, value);
self.add_field_value(field, value);
}
/// Add a i64 field
pub fn add_i64(&mut self, field: Field, value: i64) {
self.add_leaf_field_value(field, value);
self.add_field_value(field, value);
}
/// Add a f64 field
pub fn add_f64(&mut self, field: Field, value: f64) {
self.add_leaf_field_value(field, value);
self.add_field_value(field, value);
}
/// Add a bool field
pub fn add_bool(&mut self, field: Field, value: bool) {
self.add_leaf_field_value(field, value);
self.add_field_value(field, value);
}
/// Add a date field with unspecified time zone offset
pub fn add_date(&mut self, field: Field, value: DateTime) {
self.add_leaf_field_value(field, value);
self.add_field_value(field, value);
}
/// Add a bytes field
pub fn add_bytes(&mut self, field: Field, value: &[u8]) {
self.add_leaf_field_value(field, value);
pub fn add_bytes<T: Into<Vec<u8>>>(&mut self, field: Field, value: T) {
self.add_field_value(field, value.into());
}
/// Add a dynamic object field
pub fn add_object(&mut self, field: Field, object: BTreeMap<String, OwnedValue>) {
self.add_field_value(field, &OwnedValue::from(object));
self.add_field_value(field, object);
}
/// Add a (field, value) to the document.
///
/// `OwnedValue` implements Value, which should be easiest to use, but is not the most
/// performant.
pub fn add_field_value<'a, V: Value<'a>>(&mut self, field: Field, value: V) {
let field_value = FieldValueAddr {
field: field
.field_id()
.try_into()
.expect("support only up to u16::MAX field ids"),
value_addr: self.add_value(value),
};
self.field_values.push(field_value);
}
/// Add a (field, leaf value) to the document.
/// Leaf values don't have nested values.
pub fn add_leaf_field_value<'a, T: Into<ReferenceValueLeaf<'a>>>(
&mut self,
field: Field,
typed_val: T,
) {
pub fn add_field_value<T: Into<OwnedValue>>(&mut self, field: Field, typed_val: T) {
let value = typed_val.into();
let field_value = FieldValueAddr {
field: field
.field_id()
.try_into()
.expect("support only up to u16::MAX field ids"),
value_addr: self.add_value_leaf(value),
};
let field_value = FieldValue { field, value };
self.field_values.push(field_value);
}
/// field_values accessor
pub fn field_values(&self) -> impl Iterator<Item = (Field, CompactDocValue<'_>)> {
self.field_values.iter().map(|field_val| {
let field = Field::from_field_id(field_val.field as u32);
let val = self.get_compact_doc_value(field_val.value_addr);
(field, val)
})
pub fn field_values(&self) -> &[FieldValue] {
&self.field_values
}
/// Returns all of the `ReferenceValue`s associated the given field
pub fn get_all(&self, field: Field) -> impl Iterator<Item = CompactDocValue<'_>> + '_ {
/// Returns all of the `FieldValue`s associated the given field
pub fn get_all(&self, field: Field) -> impl Iterator<Item = &OwnedValue> {
self.field_values
.iter()
.filter(move |field_value| Field::from_field_id(field_value.field as u32) == field)
.map(|val| self.get_compact_doc_value(val.value_addr))
.filter(move |field_value| field_value.field() == field)
.map(FieldValue::value)
}
/// Returns the first `ReferenceValue` associated the given field
pub fn get_first(&self, field: Field) -> Option<CompactDocValue<'_>> {
/// Returns the first `FieldValue` associated the given field
pub fn get_first(&self, field: Field) -> Option<&OwnedValue> {
self.get_all(field).next()
}
@@ -182,12 +183,12 @@ impl CompactDoc {
pub fn convert_named_doc(
schema: &Schema,
named_doc: NamedFieldDocument,
) -> Result<Self, DocParsingError> {
let mut document = Self::new();
) -> Result<TantivyDocument, DocParsingError> {
let mut document = TantivyDocument::new();
for (field_name, values) in named_doc.0 {
if let Ok(field) = schema.get_field(&field_name) {
for value in values {
document.add_field_value(field, &value);
document.add_field_value(field, value);
}
}
}
@@ -195,7 +196,7 @@ impl CompactDoc {
}
/// Build a document object from a json-object.
pub fn parse_json(schema: &Schema, doc_json: &str) -> Result<Self, DocParsingError> {
pub fn parse_json(schema: &Schema, doc_json: &str) -> Result<TantivyDocument, DocParsingError> {
let json_obj: Map<String, serde_json::Value> =
serde_json::from_str(doc_json).map_err(|_| DocParsingError::invalid_json(doc_json))?;
Self::from_json_object(schema, json_obj)
@@ -205,8 +206,8 @@ impl CompactDoc {
pub fn from_json_object(
schema: &Schema,
json_obj: Map<String, serde_json::Value>,
) -> Result<Self, DocParsingError> {
let mut doc = Self::default();
) -> Result<TantivyDocument, DocParsingError> {
let mut doc = TantivyDocument::default();
for (field_name, json_value) in json_obj {
if let Ok(field) = schema.get_field(&field_name) {
let field_entry = schema.get_field_entry(field);
@@ -217,482 +218,20 @@ impl CompactDoc {
let value = field_type
.value_from_json(json_item)
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
doc.add_field_value(field, &value);
doc.add_field_value(field, value);
}
}
_ => {
let value = field_type
.value_from_json(json_value)
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
doc.add_field_value(field, &value);
doc.add_field_value(field, value);
}
}
}
}
Ok(doc)
}
fn add_value_leaf(&mut self, leaf: ReferenceValueLeaf) -> ValueAddr {
let type_id = ValueType::from(&leaf);
// Write into `node_data` and return u32 position as its address
// Null and bool are inlined into the address
let val_addr = match leaf {
ReferenceValueLeaf::Null => 0,
ReferenceValueLeaf::Str(bytes) => {
write_bytes_into(&mut self.node_data, bytes.as_bytes())
}
ReferenceValueLeaf::Facet(bytes) => {
write_bytes_into(&mut self.node_data, bytes.as_bytes())
}
ReferenceValueLeaf::Bytes(bytes) => write_bytes_into(&mut self.node_data, bytes),
ReferenceValueLeaf::U64(num) => write_into(&mut self.node_data, num),
ReferenceValueLeaf::I64(num) => write_into(&mut self.node_data, num),
ReferenceValueLeaf::F64(num) => write_into(&mut self.node_data, num),
ReferenceValueLeaf::Bool(b) => b as u32,
ReferenceValueLeaf::Date(date) => {
write_into(&mut self.node_data, date.into_timestamp_nanos())
}
ReferenceValueLeaf::IpAddr(num) => write_into(&mut self.node_data, num.to_u128()),
ReferenceValueLeaf::PreTokStr(pre_tok) => write_into(&mut self.node_data, *pre_tok),
};
ValueAddr { type_id, val_addr }
}
/// Adds a value and returns in address into the
fn add_value<'a, V: Value<'a>>(&mut self, value: V) -> ValueAddr {
let value = value.as_value();
let type_id = ValueType::from(&value);
match value {
ReferenceValue::Leaf(leaf) => self.add_value_leaf(leaf),
ReferenceValue::Array(elements) => {
// addresses of the elements in node_data
// Reusing a vec would be nicer, but it's not easy because of the recursion
// A global vec would work if every writer get it's discriminator
let mut addresses = Vec::new();
for elem in elements {
let value_addr = self.add_value(elem);
write_into(&mut addresses, value_addr);
}
ValueAddr {
type_id,
val_addr: write_bytes_into(&mut self.node_data, &addresses),
}
}
ReferenceValue::Object(entries) => {
// addresses of the elements in node_data
let mut addresses = Vec::new();
for (key, value) in entries {
let key_addr = self.add_value_leaf(ReferenceValueLeaf::Str(key));
let value_addr = self.add_value(value);
write_into(&mut addresses, key_addr);
write_into(&mut addresses, value_addr);
}
ValueAddr {
type_id,
val_addr: write_bytes_into(&mut self.node_data, &addresses),
}
}
}
}
/// Get CompactDocValue for address
fn get_compact_doc_value(&self, value_addr: ValueAddr) -> CompactDocValue<'_> {
CompactDocValue {
container: self,
value_addr,
}
}
/// get &[u8] reference from node_data
fn extract_bytes(&self, addr: Addr) -> &[u8] {
binary_deserialize_bytes(self.get_slice(addr))
}
/// get &str reference from node_data
fn extract_str(&self, addr: Addr) -> &str {
let data = self.extract_bytes(addr);
// Utf-8 checks would have a noticeable performance overhead here
unsafe { std::str::from_utf8_unchecked(data) }
}
/// deserialized owned value from node_data
fn read_from<T: BinarySerializable>(&self, addr: Addr) -> io::Result<T> {
let data_slice = &self.node_data[addr as usize..];
let mut cursor = std::io::Cursor::new(data_slice);
T::deserialize(&mut cursor)
}
/// get slice from address. The returned slice is open ended
fn get_slice(&self, addr: Addr) -> &[u8] {
&self.node_data[addr as usize..]
}
}
/// BinarySerializable alternative to read references
fn binary_deserialize_bytes(data: &[u8]) -> &[u8] {
let (len, bytes_read) = read_u32_vint_no_advance(data);
&data[bytes_read..bytes_read + len as usize]
}
/// Write bytes and return the position of the written data.
///
/// BinarySerializable alternative to write references
fn write_bytes_into(vec: &mut Vec<u8>, data: &[u8]) -> u32 {
let pos = vec.len() as u32;
let mut buf = [0u8; 8];
let len_vint_bytes = serialize_vint_u32(data.len() as u32, &mut buf);
vec.extend_from_slice(len_vint_bytes);
vec.extend_from_slice(data);
pos
}
/// Serialize and return the position
fn write_into<T: BinarySerializable>(vec: &mut Vec<u8>, value: T) -> u32 {
let pos = vec.len() as u32;
value.serialize(vec).unwrap();
pos
}
impl PartialEq for CompactDoc {
fn eq(&self, other: &Self) -> bool {
// super slow, but only here for tests
let convert_to_comparable_map = |doc: &CompactDoc| {
let mut field_value_set: HashMap<Field, HashSet<String>> = Default::default();
for field_value in doc.field_values.iter() {
let value: OwnedValue = doc.get_compact_doc_value(field_value.value_addr).into();
let value = serde_json::to_string(&value).unwrap();
field_value_set
.entry(Field::from_field_id(field_value.field as u32))
.or_default()
.insert(value);
}
field_value_set
};
let self_field_values: HashMap<Field, HashSet<String>> = convert_to_comparable_map(self);
let other_field_values: HashMap<Field, HashSet<String>> = convert_to_comparable_map(other);
self_field_values.eq(&other_field_values)
}
}
impl Eq for CompactDoc {}
impl DocumentDeserialize for CompactDoc {
fn deserialize<'de, D>(mut deserializer: D) -> Result<Self, DeserializeError>
where D: DocumentDeserializer<'de> {
let mut doc = CompactDoc::default();
// TODO: Deserializing into OwnedValue is wasteful. The deserializer should be able to work
// on slices and referenced data.
while let Some((field, value)) = deserializer.next_field::<OwnedValue>()? {
doc.add_field_value(field, &value);
}
Ok(doc)
}
}
/// A value of Compact Doc needs a reference to the container to extract its payload
#[derive(Debug, Clone, Copy)]
pub struct CompactDocValue<'a> {
container: &'a CompactDoc,
value_addr: ValueAddr,
}
impl PartialEq for CompactDocValue<'_> {
fn eq(&self, other: &Self) -> bool {
let value1: OwnedValue = (*self).into();
let value2: OwnedValue = (*other).into();
value1 == value2
}
}
impl<'a> From<CompactDocValue<'a>> for OwnedValue {
fn from(value: CompactDocValue) -> Self {
value.as_value().into()
}
}
impl<'a> Value<'a> for CompactDocValue<'a> {
type ArrayIter = CompactDocArrayIter<'a>;
type ObjectIter = CompactDocObjectIter<'a>;
fn as_value(&self) -> ReferenceValue<'a, Self> {
self.get_ref_value().unwrap()
}
}
impl<'a> CompactDocValue<'a> {
fn get_ref_value(&self) -> io::Result<ReferenceValue<'a, CompactDocValue<'a>>> {
let addr = self.value_addr.val_addr;
match self.value_addr.type_id {
ValueType::Null => Ok(ReferenceValueLeaf::Null.into()),
ValueType::Str => {
let str_ref = self.container.extract_str(addr);
Ok(ReferenceValueLeaf::Str(str_ref).into())
}
ValueType::Facet => {
let str_ref = self.container.extract_str(addr);
Ok(ReferenceValueLeaf::Facet(str_ref).into())
}
ValueType::Bytes => {
let data = self.container.extract_bytes(addr);
Ok(ReferenceValueLeaf::Bytes(data).into())
}
ValueType::U64 => self
.container
.read_from::<u64>(addr)
.map(ReferenceValueLeaf::U64)
.map(Into::into),
ValueType::I64 => self
.container
.read_from::<i64>(addr)
.map(ReferenceValueLeaf::I64)
.map(Into::into),
ValueType::F64 => self
.container
.read_from::<f64>(addr)
.map(ReferenceValueLeaf::F64)
.map(Into::into),
ValueType::Bool => Ok(ReferenceValueLeaf::Bool(addr != 0).into()),
ValueType::Date => self
.container
.read_from::<i64>(addr)
.map(|ts| ReferenceValueLeaf::Date(DateTime::from_timestamp_nanos(ts)))
.map(Into::into),
ValueType::IpAddr => self
.container
.read_from::<u128>(addr)
.map(|num| ReferenceValueLeaf::IpAddr(Ipv6Addr::from_u128(num)))
.map(Into::into),
ValueType::PreTokStr => self
.container
.read_from::<PreTokenizedString>(addr)
.map(Into::into)
.map(ReferenceValueLeaf::PreTokStr)
.map(Into::into),
ValueType::Object => Ok(ReferenceValue::Object(CompactDocObjectIter::new(
self.container,
addr,
)?)),
ValueType::Array => Ok(ReferenceValue::Array(CompactDocArrayIter::new(
self.container,
addr,
)?)),
}
}
}
/// The address in the vec
type Addr = u32;
#[derive(Clone, Copy, Default)]
#[repr(packed)]
/// The value type and the address to its payload in the container.
struct ValueAddr {
type_id: ValueType,
/// This is the address to the value in the vec, except for bool and null, which are inlined
val_addr: Addr,
}
impl BinarySerializable for ValueAddr {
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
self.type_id.serialize(writer)?;
VInt(self.val_addr as u64).serialize(writer)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let type_id = ValueType::deserialize(reader)?;
let val_addr = VInt::deserialize(reader)?.0 as u32;
Ok(ValueAddr { type_id, val_addr })
}
}
impl std::fmt::Debug for ValueAddr {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let val_addr = self.val_addr;
f.write_fmt(format_args!("{:?} at {:?}", self.type_id, val_addr))
}
}
/// A enum representing a value for tantivy to index.
///
/// Any changes need to be reflected in `BinarySerializable` for `ValueType`
///
/// We can't use [schema::Type] or [columnar::ColumnType] here, because they are missing
/// some items like Array and PreTokStr.
#[derive(Default, Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum ValueType {
/// A null value.
#[default]
Null = 0,
/// The str type is used for any text information.
Str = 1,
/// Unsigned 64-bits Integer `u64`
U64 = 2,
/// Signed 64-bits Integer `i64`
I64 = 3,
/// 64-bits Float `f64`
F64 = 4,
/// Date/time with nanoseconds precision
Date = 5,
/// Facet
Facet = 6,
/// Arbitrarily sized byte array
Bytes = 7,
/// IpV6 Address. Internally there is no IpV4, it needs to be converted to `Ipv6Addr`.
IpAddr = 8,
/// Bool value
Bool = 9,
/// Pre-tokenized str type,
PreTokStr = 10,
/// Object
Object = 11,
/// Pre-tokenized str type,
Array = 12,
}
impl BinarySerializable for ValueType {
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
(*self as u8).serialize(writer)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let num = u8::deserialize(reader)?;
let type_id = if (0..=12).contains(&num) {
unsafe { std::mem::transmute(num) }
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid value type id: {num}"),
));
};
Ok(type_id)
}
}
impl<'a, V: Value<'a>> From<&ReferenceValue<'a, V>> for ValueType {
fn from(value: &ReferenceValue<'a, V>) -> Self {
match value {
ReferenceValue::Leaf(leaf) => leaf.into(),
ReferenceValue::Array(_) => ValueType::Array,
ReferenceValue::Object(_) => ValueType::Object,
}
}
}
impl<'a> From<&ReferenceValueLeaf<'a>> for ValueType {
fn from(value: &ReferenceValueLeaf<'a>) -> Self {
match value {
ReferenceValueLeaf::Null => ValueType::Null,
ReferenceValueLeaf::Str(_) => ValueType::Str,
ReferenceValueLeaf::U64(_) => ValueType::U64,
ReferenceValueLeaf::I64(_) => ValueType::I64,
ReferenceValueLeaf::F64(_) => ValueType::F64,
ReferenceValueLeaf::Bool(_) => ValueType::Bool,
ReferenceValueLeaf::Date(_) => ValueType::Date,
ReferenceValueLeaf::IpAddr(_) => ValueType::IpAddr,
ReferenceValueLeaf::PreTokStr(_) => ValueType::PreTokStr,
ReferenceValueLeaf::Facet(_) => ValueType::Facet,
ReferenceValueLeaf::Bytes(_) => ValueType::Bytes,
}
}
}
#[derive(Debug, Clone)]
/// The Iterator for the object values in the compact document
pub struct CompactDocObjectIter<'a> {
container: &'a CompactDoc,
node_addresses_slice: &'a [u8],
}
impl<'a> CompactDocObjectIter<'a> {
fn new(container: &'a CompactDoc, addr: Addr) -> io::Result<Self> {
// Objects are `&[ValueAddr]` serialized into bytes
let node_addresses_slice = container.extract_bytes(addr);
Ok(Self {
container,
node_addresses_slice,
})
}
}
impl<'a> Iterator for CompactDocObjectIter<'a> {
type Item = (&'a str, CompactDocValue<'a>);
fn next(&mut self) -> Option<Self::Item> {
if self.node_addresses_slice.is_empty() {
return None;
}
let key_addr = ValueAddr::deserialize(&mut self.node_addresses_slice).ok()?;
let key = self.container.extract_str(key_addr.val_addr);
let value = ValueAddr::deserialize(&mut self.node_addresses_slice).ok()?;
let value = CompactDocValue {
container: self.container,
value_addr: value,
};
Some((key, value))
}
}
#[derive(Debug, Clone)]
/// The Iterator for the array values in the compact document
pub struct CompactDocArrayIter<'a> {
container: &'a CompactDoc,
node_addresses_slice: &'a [u8],
}
impl<'a> CompactDocArrayIter<'a> {
fn new(container: &'a CompactDoc, addr: Addr) -> io::Result<Self> {
// Arrays are &[ValueAddr] serialized into bytes
let node_addresses_slice = container.extract_bytes(addr);
Ok(Self {
container,
node_addresses_slice,
})
}
}
impl<'a> Iterator for CompactDocArrayIter<'a> {
type Item = CompactDocValue<'a>;
fn next(&mut self) -> Option<Self::Item> {
if self.node_addresses_slice.is_empty() {
return None;
}
let value = ValueAddr::deserialize(&mut self.node_addresses_slice).ok()?;
let value = CompactDocValue {
container: self.container,
value_addr: value,
};
Some(value)
}
}
impl Document for CompactDoc {
type Value<'a> = CompactDocValue<'a>;
type FieldsValuesIter<'a> = FieldValueIterRef<'a>;
fn iter_fields_and_values(&self) -> Self::FieldsValuesIter<'_> {
FieldValueIterRef {
slice: self.field_values.iter(),
container: self,
}
}
}
/// A helper wrapper for creating an iterator over the field values
pub struct FieldValueIterRef<'a> {
slice: std::slice::Iter<'a, FieldValueAddr>,
container: &'a CompactDoc,
}
impl<'a> Iterator for FieldValueIterRef<'a> {
type Item = (Field, CompactDocValue<'a>);
fn next(&mut self) -> Option<Self::Item> {
self.slice.next().map(|field_value| {
(
Field::from_field_id(field_value.field as u32),
CompactDocValue::<'a> {
container: self.container,
value_addr: field_value.value_addr,
},
)
})
}
}
/// Error that may happen when deserializing
@@ -725,40 +264,7 @@ mod tests {
let text_field = schema_builder.add_text_field("title", TEXT);
let mut doc = TantivyDocument::default();
doc.add_text(text_field, "My title");
assert_eq!(doc.field_values().count(), 1);
let schema = schema_builder.build();
let _val = doc.get_first(text_field).unwrap();
let _json = doc.to_named_doc(&schema);
}
#[test]
fn test_json_value() {
let json_str = r#"{
"toto": "titi",
"float": -0.2,
"bool": true,
"unsigned": 1,
"signed": -2,
"complexobject": {
"field.with.dot": 1
},
"date": "1985-04-12T23:20:50.52Z",
"my_arr": [2, 3, {"my_key": "two tokens"}, 4, {"nested_array": [2, 5, 6, [7, 8, {"a": [{"d": {"e":[99]}}, 9000]}, 9, 10], [5, 5]]}]
}"#;
let json_val: std::collections::BTreeMap<String, OwnedValue> =
serde_json::from_str(json_str).unwrap();
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", TEXT);
let mut doc = TantivyDocument::default();
doc.add_object(json_field, json_val);
let schema = schema_builder.build();
let json = doc.to_json(&schema);
let actual_json: serde_json::Value = serde_json::from_str(&json).unwrap();
let expected_json: serde_json::Value = serde_json::from_str(json_str).unwrap();
assert_eq!(actual_json["json"][0], expected_json);
assert_eq!(doc.field_values().len(), 1);
}
// TODO: Should this be re-added with the serialize method

View File

@@ -5,39 +5,21 @@
//! and don't care about some of the more specialised types or only want to customise
//! part of the document structure.
use std::collections::{btree_map, hash_map, BTreeMap, HashMap};
use std::iter::Empty;
use std::net::Ipv6Addr;
use common::DateTime;
use serde_json::Number;
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use super::facet::Facet;
use super::ReferenceValueLeaf;
use crate::schema::document::{
ArrayAccess, DeserializeError, Document, DocumentDeserialize, DocumentDeserializer,
ObjectAccess, ReferenceValue, Value, ValueDeserialize, ValueDeserializer, ValueVisitor,
};
use crate::schema::Field;
use crate::tokenizer::PreTokenizedString;
// Serde compatibility support.
pub fn can_be_rfc3339_date_time(text: &str) -> bool {
if let Some(&first_byte) = text.as_bytes().first() {
if first_byte.is_ascii_digit() {
return true;
}
}
false
}
impl<'a> Value<'a> for &'a serde_json::Value {
type ArrayIter = std::slice::Iter<'a, serde_json::Value>;
type ObjectIter = JsonObjectIter<'a>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
match self {
serde_json::Value::Null => ReferenceValueLeaf::Null.into(),
@@ -53,19 +35,7 @@ impl<'a> Value<'a> for &'a serde_json::Value {
panic!("Unsupported serde_json number {number}");
}
}
serde_json::Value::String(text) => {
if can_be_rfc3339_date_time(text) {
match OffsetDateTime::parse(text, &Rfc3339) {
Ok(dt) => {
let dt_utc = dt.to_offset(time::UtcOffset::UTC);
ReferenceValueLeaf::Date(DateTime::from_utc(dt_utc)).into()
}
Err(_) => ReferenceValueLeaf::Str(text).into(),
}
} else {
ReferenceValueLeaf::Str(text).into()
}
}
serde_json::Value::String(val) => ReferenceValueLeaf::Str(val).into(),
serde_json::Value::Array(elements) => ReferenceValue::Array(elements.iter()),
serde_json::Value::Object(object) => {
ReferenceValue::Object(JsonObjectIter(object.iter()))
@@ -74,126 +44,6 @@ impl<'a> Value<'a> for &'a serde_json::Value {
}
}
impl<'a> Value<'a> for &'a String {
type ArrayIter = Empty<&'a String>;
type ObjectIter = Empty<(&'a str, &'a String)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::Str(self))
}
}
impl<'a> Value<'a> for &'a Facet {
type ArrayIter = Empty<&'a Facet>;
type ObjectIter = Empty<(&'a str, &'a Facet)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::Facet(self.encoded_str()))
}
}
impl<'a> Value<'a> for &'a u64 {
type ArrayIter = Empty<&'a u64>;
type ObjectIter = Empty<(&'a str, &'a u64)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::U64(**self))
}
}
impl<'a> Value<'a> for &'a i64 {
type ArrayIter = Empty<&'a i64>;
type ObjectIter = Empty<(&'a str, &'a i64)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::I64(**self))
}
}
impl<'a> Value<'a> for &'a f64 {
type ArrayIter = Empty<&'a f64>;
type ObjectIter = Empty<(&'a str, &'a f64)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::F64(**self))
}
}
impl<'a> Value<'a> for &'a bool {
type ArrayIter = Empty<&'a bool>;
type ObjectIter = Empty<(&'a str, &'a bool)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::Bool(**self))
}
}
impl<'a> Value<'a> for &'a str {
type ArrayIter = Empty<&'a str>;
type ObjectIter = Empty<(&'a str, &'a str)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::Str(self))
}
}
impl<'a> Value<'a> for &'a &'a str {
type ArrayIter = Empty<&'a &'a str>;
type ObjectIter = Empty<(&'a str, &'a &'a str)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::Str(self))
}
}
impl<'a> Value<'a> for &'a [u8] {
type ArrayIter = Empty<&'a [u8]>;
type ObjectIter = Empty<(&'a str, &'a [u8])>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::Bytes(self))
}
}
impl<'a> Value<'a> for &'a &'a [u8] {
type ArrayIter = Empty<&'a &'a [u8]>;
type ObjectIter = Empty<(&'a str, &'a &'a [u8])>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::Bytes(self))
}
}
impl<'a> Value<'a> for &'a Vec<u8> {
type ArrayIter = Empty<&'a Vec<u8>>;
type ObjectIter = Empty<(&'a str, &'a Vec<u8>)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::Bytes(self))
}
}
impl<'a> Value<'a> for &'a DateTime {
type ArrayIter = Empty<&'a DateTime>;
type ObjectIter = Empty<(&'a str, &'a DateTime)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::Date(**self))
}
}
impl<'a> Value<'a> for &'a Ipv6Addr {
type ArrayIter = Empty<&'a Ipv6Addr>;
type ObjectIter = Empty<(&'a str, &'a Ipv6Addr)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::IpAddr(**self))
}
}
impl<'a> Value<'a> for &'a PreTokenizedString {
type ArrayIter = Empty<&'a PreTokenizedString>;
type ObjectIter = Empty<(&'a str, &'a PreTokenizedString)>;
#[inline]
fn as_value(&self) -> ReferenceValue<'a, Self> {
ReferenceValue::Leaf(ReferenceValueLeaf::PreTokStr(Box::new((*self).clone())))
}
}
impl ValueDeserialize for serde_json::Value {
fn deserialize<'de, D>(deserializer: D) -> Result<Self, DeserializeError>
where D: ValueDeserializer<'de> {

View File

@@ -5,24 +5,22 @@
//! - [Value] which provides tantivy with a way to access the document's values in a common way
//! without performing any additional allocations.
//! - [DocumentDeserialize] which implements the necessary code to deserialize the document from the
//! doc store. If you are fine with fetching [TantivyDocument] from the doc store, you can skip
//! implementing this trait for your type.
//! doc store.
//!
//! Tantivy provides a few out-of-box implementations of these core traits to provide
//! some simple usage if you don't want to implement these traits on a custom type yourself.
//!
//! # Out-of-box document implementations
//! - [TantivyDocument] the old document type used by Tantivy before the trait based approach was
//! - [Document] the old document type used by Tantivy before the trait based approach was
//! implemented. This type is still valid and provides all of the original behaviour you might
//! expect.
//! - `BTreeMap<Field, OwnedValue>` a mapping of field_ids to their relevant schema value using a
//! - `BTreeMap<Field, Value>` a mapping of field_ids to their relevant schema value using a
//! BTreeMap.
//! - `HashMap<Field, OwnedValue>` a mapping of field_ids to their relevant schema value using a
//! HashMap.
//! - `HashMap<Field, Value>` a mapping of field_ids to their relevant schema value using a HashMap.
//!
//! # Implementing your custom documents
//! Often in larger projects or higher performance applications you want to avoid the extra overhead
//! of converting your own types to the [TantivyDocument] type, this can often save you a
//! of converting your own types to the Tantivy [Document] type, this can often save you a
//! significant amount of time when indexing by avoiding the additional allocations.
//!
//! ### Important Note
@@ -48,7 +46,6 @@
//!
//! impl Document for MyCustomDocument {
//! // The value type produced by the `iter_fields_and_values` iterator.
//! // tantivy already implements the Value trait for serde_json::Value.
//! type Value<'a> = &'a serde_json::Value;
//! // The iterator which is produced by `iter_fields_and_values`.
//! // Often this is a simple new-type wrapper unless you like super long generics.
@@ -97,11 +94,10 @@
//! implementation for.
//!
//! ## Implementing custom values
//! In order to allow documents to return custom types, they must implement
//! the [Value] trait which provides a way for Tantivy to get a `ReferenceValue` that it can then
//! index and store.
//! Internally, Tantivy only works with `ReferenceValue` which is an enum that tries to borrow
//! as much data as it can
//! as much data as it can, in order to allow documents to return custom types, they must implement
//! the `Value` trait which provides a way for Tantivy to get a `ReferenceValue` that it can then
//! index and store.
//!
//! Values can just as easily be customised as documents by implementing the `Value` trait.
//!
@@ -109,9 +105,9 @@
//! hold references of the data held by the parent [Document] which can then be passed
//! on to the [ReferenceValue].
//!
//! This is why [Value] is implemented for `&'a serde_json::Value` and
//! [&'a tantivy::schema::document::OwnedValue](OwnedValue) but not for their owned counterparts, as
//! we cannot satisfy the lifetime bounds necessary when indexing the documents.
//! This is why `Value` is implemented for `&'a serde_json::Value` and `&'a
//! tantivy::schema::Value` but not for their owned counterparts, as we cannot satisfy the lifetime
//! bounds necessary when indexing the documents.
//!
//! ### A note about returning values
//! The custom value type does not have to be the type stored by the document, instead the
@@ -172,9 +168,7 @@ pub use self::de::{
ArrayAccess, DeserializeError, DocumentDeserialize, DocumentDeserializer, ObjectAccess,
ValueDeserialize, ValueDeserializer, ValueType, ValueVisitor,
};
pub use self::default_document::{
CompactDocArrayIter, CompactDocObjectIter, CompactDocValue, DocParsingError, TantivyDocument,
};
pub use self::default_document::{DocParsingError, TantivyDocument};
pub use self::owned_value::OwnedValue;
pub(crate) use self::se::BinaryDocumentSerializer;
pub use self::value::{ReferenceValue, ReferenceValueLeaf, Value};
@@ -235,7 +229,7 @@ pub trait Document: Send + Sync + 'static {
let field_name = schema.get_field_name(field);
let values: Vec<OwnedValue> = field_values
.into_iter()
.map(|val| OwnedValue::from(val.as_value()))
.map(|val| val.as_value().into())
.collect();
field_map.insert(field_name.to_string(), values);
}

View File

@@ -1,4 +1,4 @@
use std::collections::BTreeMap;
use std::collections::{btree_map, BTreeMap};
use std::fmt;
use std::net::Ipv6Addr;
@@ -8,7 +8,6 @@ use serde::de::{MapAccess, SeqAccess};
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use super::existing_type_impls::can_be_rfc3339_date_time;
use super::ReferenceValueLeaf;
use crate::schema::document::{
ArrayAccess, DeserializeError, ObjectAccess, ReferenceValue, Value, ValueDeserialize,
@@ -46,7 +45,7 @@ pub enum OwnedValue {
/// A set of values.
Array(Vec<Self>),
/// Dynamic object value.
Object(Vec<(String, Self)>),
Object(BTreeMap<String, Self>),
/// IpV6 Address. Internally there is no IpV4, it needs to be converted to `Ipv6Addr`.
IpAddr(Ipv6Addr),
}
@@ -66,13 +65,13 @@ impl<'a> Value<'a> for &'a OwnedValue {
match self {
OwnedValue::Null => ReferenceValueLeaf::Null.into(),
OwnedValue::Str(val) => ReferenceValueLeaf::Str(val).into(),
OwnedValue::PreTokStr(val) => ReferenceValueLeaf::PreTokStr(val.clone().into()).into(),
OwnedValue::PreTokStr(val) => ReferenceValueLeaf::PreTokStr(val).into(),
OwnedValue::U64(val) => ReferenceValueLeaf::U64(*val).into(),
OwnedValue::I64(val) => ReferenceValueLeaf::I64(*val).into(),
OwnedValue::F64(val) => ReferenceValueLeaf::F64(*val).into(),
OwnedValue::Bool(val) => ReferenceValueLeaf::Bool(*val).into(),
OwnedValue::Date(val) => ReferenceValueLeaf::Date(*val).into(),
OwnedValue::Facet(val) => ReferenceValueLeaf::Facet(val.encoded_str()).into(),
OwnedValue::Facet(val) => ReferenceValueLeaf::Facet(val).into(),
OwnedValue::Bytes(val) => ReferenceValueLeaf::Bytes(val).into(),
OwnedValue::IpAddr(val) => ReferenceValueLeaf::IpAddr(*val).into(),
OwnedValue::Array(array) => ReferenceValue::Array(array.iter()),
@@ -149,10 +148,10 @@ impl ValueDeserialize for OwnedValue {
fn visit_object<'de, A>(&self, mut access: A) -> Result<Self::Value, DeserializeError>
where A: ObjectAccess<'de> {
let mut elements = Vec::with_capacity(access.size_hint());
let mut elements = BTreeMap::new();
while let Some((key, value)) = access.next_entry()? {
elements.push((key, value));
elements.insert(key, value);
}
Ok(OwnedValue::Object(elements))
@@ -168,7 +167,6 @@ impl Eq for OwnedValue {}
impl serde::Serialize for OwnedValue {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer {
use serde::ser::SerializeMap;
match *self {
OwnedValue::Null => serializer.serialize_unit(),
OwnedValue::Str(ref v) => serializer.serialize_str(v),
@@ -182,13 +180,7 @@ impl serde::Serialize for OwnedValue {
}
OwnedValue::Facet(ref facet) => facet.serialize(serializer),
OwnedValue::Bytes(ref bytes) => serializer.serialize_str(&BASE64.encode(bytes)),
OwnedValue::Object(ref obj) => {
let mut map = serializer.serialize_map(Some(obj.len()))?;
for (k, v) in obj {
map.serialize_entry(k, v)?;
}
map.end()
}
OwnedValue::Object(ref obj) => obj.serialize(serializer),
OwnedValue::IpAddr(ref ip_v6) => {
// Ensure IpV4 addresses get serialized as IpV4, but excluding IpV6 loopback.
if let Some(ip_v4) = ip_v6.to_ipv4_mapped() {
@@ -256,10 +248,12 @@ impl<'de> serde::Deserialize<'de> for OwnedValue {
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where A: MapAccess<'de> {
let mut object = map.size_hint().map(Vec::with_capacity).unwrap_or_default();
let mut object = BTreeMap::new();
while let Some((key, value)) = map.next_entry()? {
object.push((key, value));
object.insert(key, value);
}
Ok(OwnedValue::Object(object))
}
}
@@ -278,13 +272,11 @@ impl<'a, V: Value<'a>> From<ReferenceValue<'a, V>> for OwnedValue {
ReferenceValueLeaf::I64(val) => OwnedValue::I64(val),
ReferenceValueLeaf::F64(val) => OwnedValue::F64(val),
ReferenceValueLeaf::Date(val) => OwnedValue::Date(val),
ReferenceValueLeaf::Facet(val) => {
OwnedValue::Facet(Facet::from_encoded_string(val.to_string()))
}
ReferenceValueLeaf::Facet(val) => OwnedValue::Facet(val.clone()),
ReferenceValueLeaf::Bytes(val) => OwnedValue::Bytes(val.to_vec()),
ReferenceValueLeaf::IpAddr(val) => OwnedValue::IpAddr(val),
ReferenceValueLeaf::Bool(val) => OwnedValue::Bool(val),
ReferenceValueLeaf::PreTokStr(val) => OwnedValue::PreTokStr(*val.clone()),
ReferenceValueLeaf::PreTokStr(val) => OwnedValue::PreTokStr(val.clone()),
},
ReferenceValue::Array(val) => {
OwnedValue::Array(val.map(|v| v.as_value().into()).collect())
@@ -371,11 +363,20 @@ impl From<PreTokenizedString> for OwnedValue {
impl From<BTreeMap<String, OwnedValue>> for OwnedValue {
fn from(object: BTreeMap<String, OwnedValue>) -> OwnedValue {
let key_values = object.into_iter().collect();
OwnedValue::Object(key_values)
OwnedValue::Object(object)
}
}
fn can_be_rfc3339_date_time(text: &str) -> bool {
if let Some(&first_byte) = text.as_bytes().first() {
if first_byte.is_ascii_digit() {
return true;
}
}
false
}
impl From<serde_json::Value> for OwnedValue {
fn from(value: serde_json::Value) -> Self {
match value {
@@ -416,16 +417,18 @@ impl From<serde_json::Value> for OwnedValue {
impl From<serde_json::Map<String, serde_json::Value>> for OwnedValue {
fn from(map: serde_json::Map<String, serde_json::Value>) -> Self {
let object: Vec<(String, OwnedValue)> = map
.into_iter()
.map(|(key, value)| (key, OwnedValue::from(value)))
.collect();
let mut object = BTreeMap::new();
for (key, value) in map {
object.insert(key, OwnedValue::from(value));
}
OwnedValue::Object(object)
}
}
/// A wrapper type for iterating over a serde_json object producing reference values.
pub struct ObjectMapIter<'a>(std::slice::Iter<'a, (String, OwnedValue)>);
pub struct ObjectMapIter<'a>(btree_map::Iter<'a, String, OwnedValue>);
impl<'a> Iterator for ObjectMapIter<'a> {
type Item = (&'a str, &'a OwnedValue);
@@ -463,7 +466,6 @@ mod tests {
let mut doc = TantivyDocument::default();
doc.add_bytes(bytes_field, "".as_bytes());
let json_string = doc.to_json(&schema);
assert_eq!(json_string, r#"{"my_bytes":[""]}"#);
}

View File

@@ -25,7 +25,6 @@ where W: Write
/// Attempts to serialize a given document and write the output
/// to the writer.
#[inline]
pub(crate) fn serialize_doc<D>(&mut self, doc: &D) -> io::Result<()>
where D: Document {
let stored_field_values = || {
@@ -58,8 +57,9 @@ where W: Write
return Err(io::Error::new(
io::ErrorKind::Other,
format!(
"Unexpected number of entries written to serializer, expected \
{num_field_values} entries, got {actual_length} entries",
"Unexpected number of entries written to serializer, expected {} entries, got \
{} entries",
num_field_values, actual_length,
),
));
}
@@ -121,7 +121,7 @@ where W: Write
ReferenceValueLeaf::Facet(val) => {
self.write_type_code(type_codes::HIERARCHICAL_FACET_CODE)?;
Cow::Borrowed(val).serialize(self.writer)
val.serialize(self.writer)
}
ReferenceValueLeaf::Bytes(val) => {
self.write_type_code(type_codes::BYTES_CODE)?;
@@ -428,7 +428,7 @@ mod tests {
);
let facet = Facet::from_text("/hello/world").unwrap();
let result = serialize_value(ReferenceValueLeaf::Facet(facet.encoded_str()).into());
let result = serialize_value(ReferenceValueLeaf::Facet(&facet).into());
let expected = binary_repr!(
type_codes::HIERARCHICAL_FACET_CODE => Facet::from_text("/hello/world").unwrap(),
);
@@ -441,8 +441,7 @@ mod tests {
text: "hello, world".to_string(),
tokens: vec![Token::default(), Token::default()],
};
let result =
serialize_value(ReferenceValueLeaf::PreTokStr(pre_tok_str.clone().into()).into());
let result = serialize_value(ReferenceValueLeaf::PreTokStr(&pre_tok_str).into());
let expected = binary_repr!(
type_codes::EXT_CODE, type_codes::TOK_STR_EXT_CODE => pre_tok_str,
);
@@ -679,7 +678,6 @@ mod tests {
);
}
#[inline]
fn serialize_doc<D: Document>(doc: &D, schema: &Schema) -> Vec<u8> {
let mut writer = Vec::new();

View File

@@ -3,6 +3,7 @@ use std::net::Ipv6Addr;
use common::DateTime;
use crate::schema::Facet;
use crate::tokenizer::PreTokenizedString;
/// A single field value.
@@ -27,7 +28,7 @@ pub trait Value<'a>: Send + Sync + Debug {
}
#[inline]
/// If the Value is a leaf, returns the associated leaf. Returns None otherwise.
/// If the Value is a String, returns the associated str. Returns None otherwise.
fn as_leaf(&self) -> Option<ReferenceValueLeaf<'a>> {
if let ReferenceValue::Leaf(val) = self.as_value() {
Some(val)
@@ -81,9 +82,8 @@ pub trait Value<'a>: Send + Sync + Debug {
#[inline]
/// If the Value is a pre-tokenized string, returns the associated string. Returns None
/// otherwise.
fn as_pre_tokenized_text(&self) -> Option<Box<PreTokenizedString>> {
self.as_leaf()
.and_then(|leaf| leaf.into_pre_tokenized_text())
fn as_pre_tokenized_text(&self) -> Option<&'a PreTokenizedString> {
self.as_leaf().and_then(|leaf| leaf.as_pre_tokenized_text())
}
#[inline]
@@ -94,7 +94,7 @@ pub trait Value<'a>: Send + Sync + Debug {
#[inline]
/// If the Value is a facet, returns the associated facet. Returns None otherwise.
fn as_facet(&self) -> Option<&'a str> {
fn as_facet(&self) -> Option<&'a Facet> {
self.as_leaf().and_then(|leaf| leaf.as_facet())
}
@@ -132,7 +132,7 @@ pub trait Value<'a>: Send + Sync + Debug {
}
/// A enum representing a leaf value for tantivy to index.
#[derive(Clone, Debug, PartialEq)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ReferenceValueLeaf<'a> {
/// A null value.
Null,
@@ -146,9 +146,8 @@ pub enum ReferenceValueLeaf<'a> {
F64(f64),
/// Date/time with nanoseconds precision
Date(DateTime),
/// Facet string needs to match the format of
/// [Facet::encoded_str](crate::schema::Facet::encoded_str).
Facet(&'a str),
/// Facet
Facet(&'a Facet),
/// Arbitrarily sized byte array
Bytes(&'a [u8]),
/// IpV6 Address. Internally there is no IpV4, it needs to be converted to `Ipv6Addr`.
@@ -156,70 +155,7 @@ pub enum ReferenceValueLeaf<'a> {
/// Bool value
Bool(bool),
/// Pre-tokenized str type,
PreTokStr(Box<PreTokenizedString>),
}
impl From<u64> for ReferenceValueLeaf<'_> {
#[inline]
fn from(value: u64) -> Self {
ReferenceValueLeaf::U64(value)
}
}
impl From<i64> for ReferenceValueLeaf<'_> {
#[inline]
fn from(value: i64) -> Self {
ReferenceValueLeaf::I64(value)
}
}
impl From<f64> for ReferenceValueLeaf<'_> {
#[inline]
fn from(value: f64) -> Self {
ReferenceValueLeaf::F64(value)
}
}
impl From<bool> for ReferenceValueLeaf<'_> {
#[inline]
fn from(value: bool) -> Self {
ReferenceValueLeaf::Bool(value)
}
}
impl<'a> From<&'a str> for ReferenceValueLeaf<'a> {
#[inline]
fn from(value: &'a str) -> Self {
ReferenceValueLeaf::Str(value)
}
}
impl<'a> From<&'a [u8]> for ReferenceValueLeaf<'a> {
#[inline]
fn from(value: &'a [u8]) -> Self {
ReferenceValueLeaf::Bytes(value)
}
}
impl From<DateTime> for ReferenceValueLeaf<'_> {
#[inline]
fn from(value: DateTime) -> Self {
ReferenceValueLeaf::Date(value)
}
}
impl From<Ipv6Addr> for ReferenceValueLeaf<'_> {
#[inline]
fn from(value: Ipv6Addr) -> Self {
ReferenceValueLeaf::IpAddr(value)
}
}
impl From<PreTokenizedString> for ReferenceValueLeaf<'_> {
#[inline]
fn from(val: PreTokenizedString) -> Self {
ReferenceValueLeaf::PreTokStr(Box::new(val))
}
PreTokStr(&'a PreTokenizedString),
}
impl<'a, T: Value<'a> + ?Sized> From<ReferenceValueLeaf<'a>> for ReferenceValue<'a, T> {
@@ -323,9 +259,9 @@ impl<'a> ReferenceValueLeaf<'a> {
}
#[inline]
/// If the Value is a pre-tokenized string, consumes it and returns the string.
/// Returns None otherwise.
pub fn into_pre_tokenized_text(self) -> Option<Box<PreTokenizedString>> {
/// If the Value is a pre-tokenized string, returns the associated string. Returns None
/// otherwise.
pub fn as_pre_tokenized_text(&self) -> Option<&'a PreTokenizedString> {
if let Self::PreTokStr(val) = self {
Some(val)
} else {
@@ -345,7 +281,7 @@ impl<'a> ReferenceValueLeaf<'a> {
#[inline]
/// If the Value is a facet, returns the associated facet. Returns None otherwise.
pub fn as_facet(&self) -> Option<&'a str> {
pub fn as_facet(&self) -> Option<&'a Facet> {
if let Self::Facet(val) = self {
Some(val)
} else {
@@ -386,16 +322,6 @@ where V: Value<'a>
}
}
#[inline]
/// If the Value is a leaf, consume it and return the leaf. Returns None otherwise.
pub fn into_leaf(self) -> Option<ReferenceValueLeaf<'a>> {
if let Self::Leaf(val) = self {
Some(val)
} else {
None
}
}
#[inline]
/// If the Value is a String, returns the associated str. Returns None otherwise.
pub fn as_str(&self) -> Option<&'a str> {
@@ -439,11 +365,10 @@ where V: Value<'a>
}
#[inline]
/// If the Value is a pre-tokenized string, consumes it and returns the string.
/// Returns None otherwise.
pub fn into_pre_tokenized_text(self) -> Option<Box<PreTokenizedString>> {
self.into_leaf()
.and_then(|leaf| leaf.into_pre_tokenized_text())
/// If the Value is a pre-tokenized string, returns the associated string. Returns None
/// otherwise.
pub fn as_pre_tokenized_text(&self) -> Option<&'a PreTokenizedString> {
self.as_leaf().and_then(|leaf| leaf.as_pre_tokenized_text())
}
#[inline]
@@ -454,7 +379,7 @@ where V: Value<'a>
#[inline]
/// If the Value is a facet, returns the associated facet. Returns None otherwise.
pub fn as_facet(&self) -> Option<&'a str> {
pub fn as_facet(&self) -> Option<&'a Facet> {
self.as_leaf().and_then(|leaf| leaf.as_facet())
}

View File

@@ -568,21 +568,21 @@ mod tests {
let schema = schema_builder.build();
let doc = TantivyDocument::parse_json(&schema, r#"{"id": 100}"#).unwrap();
assert_eq!(
OwnedValue::Str("100".to_string()),
doc.get_first(text_field).unwrap().into()
&OwnedValue::Str("100".to_string()),
doc.get_first(text_field).unwrap()
);
let doc = TantivyDocument::parse_json(&schema, r#"{"id": true}"#).unwrap();
assert_eq!(
OwnedValue::Str("true".to_string()),
doc.get_first(text_field).unwrap().into()
&OwnedValue::Str("true".to_string()),
doc.get_first(text_field).unwrap()
);
// Not sure if this null coercion is the best approach
let doc = TantivyDocument::parse_json(&schema, r#"{"id": null}"#).unwrap();
assert_eq!(
OwnedValue::Str("null".to_string()),
doc.get_first(text_field).unwrap().into()
&OwnedValue::Str("null".to_string()),
doc.get_first(text_field).unwrap()
);
}
@@ -595,18 +595,9 @@ mod tests {
let schema = schema_builder.build();
let doc_json = r#"{"i64": "100", "u64": "100", "f64": "100"}"#;
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
assert_eq!(
OwnedValue::I64(100),
doc.get_first(i64_field).unwrap().into()
);
assert_eq!(
OwnedValue::U64(100),
doc.get_first(u64_field).unwrap().into()
);
assert_eq!(
OwnedValue::F64(100.0),
doc.get_first(f64_field).unwrap().into()
);
assert_eq!(&OwnedValue::I64(100), doc.get_first(i64_field).unwrap());
assert_eq!(&OwnedValue::U64(100), doc.get_first(u64_field).unwrap());
assert_eq!(&OwnedValue::F64(100.0), doc.get_first(f64_field).unwrap());
}
#[test]
@@ -616,17 +607,11 @@ mod tests {
let schema = schema_builder.build();
let doc_json = r#"{"bool": "true"}"#;
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
assert_eq!(
OwnedValue::Bool(true),
doc.get_first(bool_field).unwrap().into()
);
assert_eq!(&OwnedValue::Bool(true), doc.get_first(bool_field).unwrap());
let doc_json = r#"{"bool": "false"}"#;
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
assert_eq!(
OwnedValue::Bool(false),
doc.get_first(bool_field).unwrap().into()
);
assert_eq!(&OwnedValue::Bool(false), doc.get_first(bool_field).unwrap());
}
#[test]
@@ -659,7 +644,7 @@ mod tests {
let schema = schema_builder.build();
let doc_json = r#"{"date": "2019-10-12T07:20:50.52+02:00"}"#;
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
let date = OwnedValue::from(doc.get_first(date_field).unwrap());
let date = doc.get_first(date_field).unwrap();
// Time zone is converted to UTC
assert_eq!("Date(2019-10-12T05:20:50.52Z)", format!("{date:?}"));
}

46
src/schema/field_value.rs Normal file
View File

@@ -0,0 +1,46 @@
use crate::schema::{Field, OwnedValue};
/// `FieldValue` holds together a `Field` and its `Value`.
#[allow(missing_docs)]
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct FieldValue {
pub field: Field,
pub value: OwnedValue,
}
impl FieldValue {
/// Constructor
pub fn new(field: Field, value: OwnedValue) -> FieldValue {
FieldValue { field, value }
}
/// Field accessor
pub fn field(&self) -> Field {
self.field
}
/// Value accessor
pub fn value(&self) -> &OwnedValue {
&self.value
}
}
impl From<FieldValue> for OwnedValue {
fn from(field_value: FieldValue) -> Self {
field_value.value
}
}
/// A helper wrapper for creating standard iterators
/// out of the fields iterator trait.
pub struct FieldValueIter<'a>(pub(crate) std::slice::Iter<'a, FieldValue>);
impl<'a> Iterator for FieldValueIter<'a> {
type Item = (Field, &'a OwnedValue);
fn next(&mut self) -> Option<Self::Item> {
self.0
.next()
.map(|field_value| (field_value.field, &field_value.value))
}
}

View File

@@ -1,6 +1,7 @@
use std::ops::BitOr;
use crate::schema::{DateOptions, NumericOptions, TextOptions};
use crate::schema::{NumericOptions, TextOptions};
use crate::DateOptions;
#[derive(Clone)]
pub struct StoredFlag;

147
src/schema/indexing_term.rs Normal file
View File

@@ -0,0 +1,147 @@
use std::net::Ipv6Addr;
use columnar::{MonotonicallyMappableToU128, MonotonicallyMappableToU64};
use super::date_time_options::DATE_TIME_PRECISION_INDEXED;
use super::Field;
use crate::fastfield::FastValue;
use crate::schema::Type;
use crate::DateTime;
/// IndexingTerm represents is the serialized information of a term during indexing.
/// It's a serialized representation over different types.
///
/// It actually wraps a `Vec<u8>`.
///
/// The format is as follow:
/// `[field id: u32][serialized value]`
///
/// For JSON it equals to:
/// `[field id: u32][path id: u32][type code: u8][serialized value]`
///
/// The format is chosen to easily partition the terms by field during serialization, as all terms
/// are stored in one hashmap.
#[derive(Clone)]
pub(crate) struct IndexingTerm(Vec<u8>);
/// The number of bytes used as for the field id by `Term`.
const FIELD_ID_LENGTH: usize = 4;
impl IndexingTerm {
/// Create a new IndexingTerm.
pub fn new() -> IndexingTerm {
let mut data = Vec::with_capacity(FIELD_ID_LENGTH + 32);
data.resize(FIELD_ID_LENGTH, 0u8);
IndexingTerm(data)
}
/// Is empty if there are no value bytes.
pub fn is_empty(&self) -> bool {
self.0.len() == FIELD_ID_LENGTH
}
/// Removes the value_bytes and set the field
pub(crate) fn clear_with_field(&mut self, field: Field) {
self.truncate_value_bytes(0);
self.0[0..4].clone_from_slice(field.field_id().to_be_bytes().as_ref());
}
/// Sets a u64 value in the term.
///
/// U64 are serialized using (8-byte) BigEndian
/// representation.
/// The use of BigEndian has the benefit of preserving
/// the natural order of the values.
pub fn set_u64(&mut self, val: u64) {
self.set_fast_value(val);
}
/// Sets a `DateTime` value in the term.
pub fn set_date(&mut self, val: DateTime) {
self.set_fast_value(val);
}
/// Sets a `i64` value in the term.
pub fn set_i64(&mut self, val: i64) {
self.set_fast_value(val);
}
/// Sets a `f64` value in the term.
pub fn set_f64(&mut self, val: f64) {
self.set_fast_value(val);
}
/// Sets a `bool` value in the term.
pub fn set_bool(&mut self, val: bool) {
self.set_fast_value(val);
}
fn set_fast_value<T: FastValue>(&mut self, val: T) {
self.truncate_value_bytes(0);
self.append_fast_value(val);
}
/// Sets a `Ipv6Addr` value in the term.
pub fn set_ip_addr(&mut self, val: Ipv6Addr) {
self.set_value_bytes(val.to_u128().to_be_bytes().as_ref());
}
/// Sets the value bytes of the term.
pub fn set_value_bytes(&mut self, bytes: &[u8]) {
self.truncate_value_bytes(0);
self.0.extend(bytes);
}
/// Append a type marker + fast value to a term.
/// This is used in JSON type to append a fast value after the path.
///
/// It will not clear existing bytes.
pub(crate) fn append_type_and_fast_value<T: FastValue>(&mut self, val: T) {
self.0.push(T::to_type().to_code());
self.append_fast_value(val)
}
/// Append a fast value to a term.
///
/// It will not clear existing bytes.
pub fn append_fast_value<T: FastValue>(&mut self, val: T) {
let value = if T::to_type() == Type::Date {
DateTime::from_u64(val.to_u64())
.truncate(DATE_TIME_PRECISION_INDEXED)
.to_u64()
} else {
val.to_u64()
};
self.0.extend(value.to_be_bytes().as_ref());
}
/// Truncates the value bytes of the term. Value and field type stays the same.
pub fn truncate_value_bytes(&mut self, len: usize) {
self.0.truncate(len + FIELD_ID_LENGTH);
}
/// The length of the bytes.
pub fn len_bytes(&self) -> usize {
self.0.len() - FIELD_ID_LENGTH
}
/// Appends bytes to the Term.
///
/// This function returns the segment that has just been added.
#[inline]
pub fn append_bytes(&mut self, bytes: &[u8]) {
self.0.extend_from_slice(bytes);
}
/// Returns the serialized representation of Term.
/// This includes field_id, value bytes
#[inline]
pub fn serialized_for_hashmap(&self) -> &[u8] {
self.0.as_ref()
}
}
pub fn get_field_from_indexing_term(bytes: &[u8]) -> Field {
let field_id_bytes: [u8; 4] = bytes[..4].try_into().unwrap();
Field::from_field_id(u32::from_be_bytes(field_id_bytes))
}

View File

@@ -109,11 +109,13 @@
pub mod document;
mod facet;
mod facet_options;
pub(crate) mod indexing_term;
mod schema;
pub(crate) mod term;
mod field_entry;
mod field_type;
mod field_value;
mod bytes_options;
mod date_time_options;
@@ -137,6 +139,7 @@ pub use self::facet_options::FacetOptions;
pub use self::field::Field;
pub use self::field_entry::FieldEntry;
pub use self::field_type::{FieldType, Type};
pub use self::field_value::FieldValue;
pub use self::flags::{COERCE, FAST, INDEXED, STORED};
pub use self::index_record_option::IndexRecordOption;
pub use self::ip_options::{IntoIpv6Addr, IpAddrOptions};

View File

@@ -645,15 +645,15 @@ mod tests {
let doc =
TantivyDocument::convert_named_doc(&schema, NamedFieldDocument(named_doc_map)).unwrap();
assert_eq!(
doc.get_all(title).map(OwnedValue::from).collect::<Vec<_>>(),
doc.get_all(title).collect::<Vec<_>>(),
vec![
OwnedValue::from("title1".to_string()),
OwnedValue::from("title2".to_string())
&OwnedValue::from("title1".to_string()),
&OwnedValue::from("title2".to_string())
]
);
assert_eq!(
doc.get_all(val).map(OwnedValue::from).collect::<Vec<_>>(),
vec![OwnedValue::from(14u64), OwnedValue::from(-1i64)]
doc.get_all(val).collect::<Vec<_>>(),
vec![&OwnedValue::from(14u64), &OwnedValue::from(-1i64)]
);
}
@@ -682,7 +682,7 @@ mod tests {
let schema = schema_builder.build();
{
let doc = TantivyDocument::parse_json(&schema, "{}").unwrap();
assert!(doc.field_values().next().is_none());
assert!(doc.field_values().is_empty());
}
{
let doc = TantivyDocument::parse_json(

View File

@@ -1,15 +1,13 @@
use std::hash::{Hash, Hasher};
use std::hash::Hash;
use std::net::Ipv6Addr;
use std::{fmt, str};
use columnar::{MonotonicallyMappableToU128, MonotonicallyMappableToU64};
use common::json_path_writer::{JSON_END_OF_PATH, JSON_PATH_SEGMENT_SEP_STR};
use common::JsonPathWriter;
use super::date_time_options::DATE_TIME_PRECISION_INDEXED;
use super::Field;
use crate::fastfield::FastValue;
use crate::json_utils::split_json_path;
use crate::schema::{Facet, Type};
use crate::DateTime;
@@ -20,59 +18,45 @@ use crate::DateTime;
/// 4 bytes are the field id, and the last byte is the type.
///
/// The serialized value `ValueBytes` is considered everything after the 4 first bytes (term id).
#[derive(Clone)]
pub struct Term<B = Vec<u8>>(B)
where B: AsRef<[u8]>;
#[derive(Clone, Hash, PartialEq, Ord, PartialOrd, Eq)]
pub struct Term(Vec<u8>);
impl Default for Term {
fn default() -> Self {
Self::new()
}
}
/// The number of bytes used as metadata by `Term`.
const TERM_METADATA_LENGTH: usize = 5;
impl Term {
/// Create a new Term with a buffer with a given capacity.
pub fn with_capacity(capacity: usize) -> Term {
let mut data = Vec::with_capacity(TERM_METADATA_LENGTH + capacity);
/// Create a new Term
pub fn new() -> Term {
let mut data = Vec::with_capacity(TERM_METADATA_LENGTH + 32);
data.resize(TERM_METADATA_LENGTH, 0u8);
Term(data)
}
/// Creates a term from a json path.
///
/// The json path can address a nested value in a JSON object.
/// e.g. `{"k8s": {"node": {"id": 5}}}` can be addressed via `k8s.node.id`.
///
/// In case there are dots in the field name, and the `expand_dots_enabled` parameter is not
/// set they need to be escaped with a backslash.
/// e.g. `{"k8s.node": {"id": 5}}` can be addressed via `k8s\.node.id`.
pub fn from_field_json_path(field: Field, json_path: &str, expand_dots_enabled: bool) -> Term {
let paths = split_json_path(json_path);
let mut json_path = JsonPathWriter::with_expand_dots(expand_dots_enabled);
for path in paths {
json_path.push(&path);
}
json_path.set_end();
let mut term = Term::with_type_and_field(Type::Json, field);
term.append_bytes(json_path.as_str().as_bytes());
term
}
pub(crate) fn with_type_and_field(typ: Type, field: Field) -> Term {
let mut term = Self::with_capacity(8);
term.set_field_and_type(field, typ);
term
Self::with_bytes_and_field_and_payload(typ, field, &[])
}
fn with_bytes_and_field_and_payload(typ: Type, field: Field, bytes: &[u8]) -> Term {
let mut term = Self::with_capacity(bytes.len());
let mut term = Self::new();
term.set_field_and_type(field, typ);
term.0.extend_from_slice(bytes);
term
}
/// Sets a fast value in the term.
///
/// fast values are converted to u64 and then serialized using (8-byte) BigEndian
/// representation.
/// The use of BigEndian has the benefit of preserving
/// the natural order of the values.
fn from_fast_value<T: FastValue>(field: Field, val: &T) -> Term {
let mut term = Self::with_type_and_field(T::to_type(), field);
term.set_u64(val.to_u64());
term.set_bytes(val.to_u64().to_be_bytes().as_ref());
term
}
@@ -94,7 +78,7 @@ impl Term {
/// Builds a term given a field, and a `Ipv6Addr`-value
pub fn from_field_ip_addr(field: Field, ip_addr: Ipv6Addr) -> Term {
let mut term = Self::with_type_and_field(Type::IpAddr, field);
term.set_ip_addr(ip_addr);
term.set_bytes(ip_addr.to_u128().to_be_bytes().as_ref());
term
}
@@ -139,57 +123,17 @@ impl Term {
Term::with_bytes_and_field_and_payload(Type::Bytes, field, bytes)
}
/// Removes the value_bytes and set the field and type code.
pub(crate) fn clear_with_field_and_type(&mut self, typ: Type, field: Field) {
self.truncate_value_bytes(0);
self.set_field_and_type(field, typ);
}
/// Removes the value_bytes and set the type code.
pub fn clear_with_type(&mut self, typ: Type) {
self.truncate_value_bytes(0);
self.0[4] = typ.to_code();
}
/// Sets a u64 value in the term.
///
/// U64 are serialized using (8-byte) BigEndian
/// representation.
/// The use of BigEndian has the benefit of preserving
/// the natural order of the values.
pub fn set_u64(&mut self, val: u64) {
self.set_fast_value(val);
}
/// Sets a `i64` value in the term.
pub fn set_i64(&mut self, val: i64) {
self.set_fast_value(val);
}
/// Sets a `DateTime` value in the term.
pub fn set_date(&mut self, date: DateTime) {
self.set_fast_value(date);
}
/// Sets a `f64` value in the term.
pub fn set_f64(&mut self, val: f64) {
self.set_fast_value(val);
}
/// Sets a `bool` value in the term.
pub fn set_bool(&mut self, val: bool) {
self.set_fast_value(val);
}
fn set_fast_value<T: FastValue>(&mut self, val: T) {
self.set_bytes(val.to_u64().to_be_bytes().as_ref());
}
/// Append a type marker + fast value to a term.
/// This is used in JSON type to append a fast value after the path.
///
/// It will not clear existing bytes.
pub fn append_type_and_fast_value<T: FastValue>(&mut self, val: T) {
pub(crate) fn append_type_and_fast_value<T: FastValue>(&mut self, val: T) {
self.0.push(T::to_type().to_code());
let value = if T::to_type() == Type::Date {
DateTime::from_u64(val.to_u64())
@@ -205,18 +149,13 @@ impl Term {
/// This is used in JSON type to append a str after the path.
///
/// It will not clear existing bytes.
pub fn append_type_and_str(&mut self, val: &str) {
pub(crate) fn append_type_and_str(&mut self, val: &str) {
self.0.push(Type::Str.to_code());
self.0.extend(val.as_bytes().as_ref());
}
/// Sets a `Ipv6Addr` value in the term.
pub fn set_ip_addr(&mut self, val: Ipv6Addr) {
self.set_bytes(val.to_u128().to_be_bytes().as_ref());
}
/// Sets the value of a `Bytes` field.
pub fn set_bytes(&mut self, bytes: &[u8]) {
fn set_bytes(&mut self, bytes: &[u8]) {
self.truncate_value_bytes(0);
self.0.extend(bytes);
}
@@ -226,11 +165,6 @@ impl Term {
self.0.truncate(len + TERM_METADATA_LENGTH);
}
/// The length of the bytes.
pub fn len_bytes(&self) -> usize {
self.0.len() - TERM_METADATA_LENGTH
}
/// Appends value bytes to the Term.
///
/// This function returns the segment that has just been added.
@@ -241,57 +175,19 @@ impl Term {
&mut self.0[len_before..]
}
/// Appends json path bytes to the Term.
/// If the path contains 0 bytes, they are replaced by a "0" string.
/// The 0 byte is used to mark the end of the path.
///
/// This function returns the segment that has just been added.
#[inline]
pub fn append_path(&mut self, bytes: &[u8]) -> &mut [u8] {
let len_before = self.0.len();
if bytes.contains(&0u8) {
self.0
.extend(bytes.iter().map(|&b| if b == 0 { b'0' } else { b }));
} else {
self.0.extend_from_slice(bytes);
}
&mut self.0[len_before..]
}
}
impl<B> Term<B>
where B: AsRef<[u8]>
{
/// Wraps a object holding bytes
pub fn wrap(data: B) -> Term<B> {
Term(data)
}
/// Return the type of the term.
pub fn typ(&self) -> Type {
self.value().typ()
}
/// Returns the field.
pub fn field(&self) -> Field {
let field_id_bytes: [u8; 4] = (&self.0.as_ref()[..4]).try_into().unwrap();
Field::from_field_id(u32::from_be_bytes(field_id_bytes))
}
/// Returns the serialized representation of the value.
/// (this does neither include the field id nor the value type.)
///
/// If the term is a string, its value is utf-8 encoded.
/// If the term is a u64, its value is encoded according
/// to `byteorder::BigEndian`.
pub fn serialized_value_bytes(&self) -> &[u8] {
&self.0.as_ref()[TERM_METADATA_LENGTH..]
}
/// Returns the value of the term.
/// address or JSON path + value. (this does not include the field.)
pub fn value(&self) -> ValueBytes<&[u8]> {
ValueBytes::wrap(&self.0.as_ref()[4..])
pub(crate) fn serialized_value_bytes(&self) -> &[u8] {
&self.0[TERM_METADATA_LENGTH..]
}
/// Returns the serialized representation of Term.
@@ -300,9 +196,22 @@ where B: AsRef<[u8]>
/// Do NOT rely on this byte representation in the index.
/// This value is likely to change in the future.
#[inline]
#[cfg(test)]
pub fn serialized_term(&self) -> &[u8] {
self.0.as_ref()
}
/// Returns the field.
pub fn field(&self) -> Field {
let field_id_bytes: [u8; 4] = (&self.0[..4]).try_into().unwrap();
Field::from_field_id(u32::from_be_bytes(field_id_bytes))
}
/// Returns the value of the term.
/// address or JSON path + value. (this does not include the field.)
pub fn value(&self) -> ValueBytes<&[u8]> {
ValueBytes::wrap(&self.0[4..])
}
}
/// ValueBytes represents a serialized value.
@@ -333,18 +242,10 @@ where B: AsRef<[u8]>
}
/// Return the type of the term.
pub fn typ(&self) -> Type {
pub(crate) fn typ(&self) -> Type {
Type::from_code(self.typ_code()).expect("The term has an invalid type code")
}
/// Returns the `u64` value stored in a term.
///
/// Returns `None` if the term is not of the u64 type, or if the term byte representation
/// is invalid.
pub fn as_u64(&self) -> Option<u64> {
self.get_fast_type::<u64>()
}
fn get_fast_type<T: FastValue>(&self) -> Option<T> {
if self.typ() != T::to_type() {
return None;
@@ -354,38 +255,6 @@ where B: AsRef<[u8]>
Some(T::from_u64(value_u64))
}
/// Returns the `i64` value stored in a term.
///
/// Returns `None` if the term is not of the i64 type, or if the term byte representation
/// is invalid.
pub fn as_i64(&self) -> Option<i64> {
self.get_fast_type::<i64>()
}
/// Returns the `f64` value stored in a term.
///
/// Returns `None` if the term is not of the f64 type, or if the term byte representation
/// is invalid.
pub fn as_f64(&self) -> Option<f64> {
self.get_fast_type::<f64>()
}
/// Returns the `bool` value stored in a term.
///
/// Returns `None` if the term is not of the bool type, or if the term byte representation
/// is invalid.
pub fn as_bool(&self) -> Option<bool> {
self.get_fast_type::<bool>()
}
/// Returns the `Date` value stored in a term.
///
/// Returns `None` if the term is not of the Date type, or if the term byte representation
/// is invalid.
pub fn as_date(&self) -> Option<DateTime> {
self.get_fast_type::<DateTime>()
}
/// Returns the text associated with the term.
///
/// Returns `None` if the field is not of string type
@@ -401,7 +270,7 @@ where B: AsRef<[u8]>
///
/// Returns `None` if the field is not of facet type
/// or if the bytes are not valid utf-8.
pub fn as_facet(&self) -> Option<Facet> {
pub(crate) fn as_facet(&self) -> Option<Facet> {
if self.typ() != Type::Facet {
return None;
}
@@ -412,7 +281,7 @@ where B: AsRef<[u8]>
/// Returns the bytes associated with the term.
///
/// Returns `None` if the field is not of bytes type.
pub fn as_bytes(&self) -> Option<&[u8]> {
pub(crate) fn as_bytes(&self) -> Option<&[u8]> {
if self.typ() != Type::Bytes {
return None;
}
@@ -420,7 +289,7 @@ where B: AsRef<[u8]>
}
/// Returns a `Ipv6Addr` value from the term.
pub fn as_ip_addr(&self) -> Option<Ipv6Addr> {
pub(crate) fn as_ip_addr(&self) -> Option<Ipv6Addr> {
if self.typ() != Type::IpAddr {
return None;
}
@@ -428,15 +297,6 @@ where B: AsRef<[u8]>
Some(Ipv6Addr::from_u128(ip_u128))
}
/// Returns the json path type.
///
/// Returns `None` if the value is not JSON.
pub fn json_path_type(&self) -> Option<Type> {
let json_value_bytes = self.as_json_value_bytes()?;
Some(json_value_bytes.typ())
}
/// Returns the json path bytes (including the JSON_END_OF_PATH byte),
/// and the encoded ValueBytes after the json path.
///
@@ -453,18 +313,6 @@ where B: AsRef<[u8]>
Some((json_path_bytes, ValueBytes::wrap(term)))
}
/// Returns the encoded ValueBytes after the json path.
///
/// Returns `None` if the value is not JSON.
pub(crate) fn as_json_value_bytes(&self) -> Option<ValueBytes<&[u8]>> {
if self.typ() != Type::Json {
return None;
}
let bytes = self.value_bytes();
let pos = bytes.iter().cloned().position(|b| b == JSON_END_OF_PATH)?;
Some(ValueBytes::wrap(&bytes[pos + 1..]))
}
/// Returns the serialized value of ValueBytes without the type.
fn value_bytes(&self) -> &[u8] {
&self.0.as_ref()[1..]
@@ -487,20 +335,20 @@ where B: AsRef<[u8]>
write_opt(f, s)?;
}
Type::U64 => {
write_opt(f, self.as_u64())?;
write_opt(f, self.get_fast_type::<u64>())?;
}
Type::I64 => {
write_opt(f, self.as_i64())?;
write_opt(f, self.get_fast_type::<i64>())?;
}
Type::F64 => {
write_opt(f, self.as_f64())?;
write_opt(f, self.get_fast_type::<f64>())?;
}
Type::Bool => {
write_opt(f, self.as_bool())?;
write_opt(f, self.get_fast_type::<bool>())?;
}
// TODO pretty print these types too.
Type::Date => {
write_opt(f, self.as_date())?;
write_opt(f, self.get_fast_type::<DateTime>())?;
}
Type::Facet => {
write_opt(f, self.as_facet())?;
@@ -526,40 +374,6 @@ where B: AsRef<[u8]>
}
}
impl<B> Ord for Term<B>
where B: AsRef<[u8]>
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.serialized_term().cmp(other.serialized_term())
}
}
impl<B> PartialOrd for Term<B>
where B: AsRef<[u8]>
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<B> PartialEq for Term<B>
where B: AsRef<[u8]>
{
fn eq(&self, other: &Self) -> bool {
self.serialized_term() == other.serialized_term()
}
}
impl<B> Eq for Term<B> where B: AsRef<[u8]> {}
impl<B> Hash for Term<B>
where B: AsRef<[u8]>
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.as_ref().hash(state)
}
}
fn write_opt<T: std::fmt::Debug>(f: &mut fmt::Formatter, val_opt: Option<T>) -> fmt::Result {
if let Some(val) = val_opt {
write!(f, "{val:?}")?;
@@ -567,13 +381,11 @@ fn write_opt<T: std::fmt::Debug>(f: &mut fmt::Formatter, val_opt: Option<T>) ->
Ok(())
}
impl<B> fmt::Debug for Term<B>
where B: AsRef<[u8]>
{
impl fmt::Debug for Term {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let field_id = self.field().field_id();
write!(f, "Term(field={field_id}, ")?;
let value_bytes = ValueBytes::wrap(&self.0.as_ref()[4..]);
let value_bytes = ValueBytes::wrap(&self.0[4..]);
value_bytes.debug_value_bytes(f)?;
write!(f, ")",)?;
Ok(())
@@ -595,38 +407,4 @@ mod tests {
assert_eq!(term.typ(), Type::Str);
assert_eq!(term.value().as_str(), Some("test"))
}
/// Size (in bytes) of the buffer of a fast value (u64, i64, f64, or date) term.
/// <field> + <type byte> + <value len>
///
/// - <field> is a big endian encoded u32 field id
/// - <type_byte>'s most significant bit expresses whether the term is a json term or not
/// The remaining 7 bits are used to encode the type of the value.
/// If this is a JSON term, the type is the type of the leaf of the json.
///
/// - <value> is, if this is not the json term, a binary representation specific to the type.
/// If it is a JSON Term, then it is prepended with the path that leads to this leaf value.
const FAST_VALUE_TERM_LEN: usize = 4 + 1 + 8;
#[test]
pub fn test_term_u64() {
let mut schema_builder = Schema::builder();
let count_field = schema_builder.add_u64_field("count", INDEXED);
let term = Term::from_field_u64(count_field, 983u64);
assert_eq!(term.field(), count_field);
assert_eq!(term.typ(), Type::U64);
assert_eq!(term.serialized_term().len(), FAST_VALUE_TERM_LEN);
assert_eq!(term.value().as_u64(), Some(983u64))
}
#[test]
pub fn test_term_bool() {
let mut schema_builder = Schema::builder();
let bool_field = schema_builder.add_bool_field("bool", INDEXED);
let term = Term::from_field_bool(bool_field, true);
assert_eq!(term.field(), bool_field);
assert_eq!(term.typ(), Type::Bool);
assert_eq!(term.serialized_term().len(), FAST_VALUE_TERM_LEN);
assert_eq!(term.value().as_bool(), Some(true))
}
}

View File

@@ -12,8 +12,8 @@ use std::collections::HashMap;
use common::ByteCount;
use serde::{Deserialize, Serialize};
use crate::index::SegmentComponent;
use crate::schema::Field;
use crate::SegmentComponent;
/// Enum containing any of the possible space usage results for segment components.
pub enum ComponentSpaceUsage {
@@ -115,7 +115,7 @@ impl SegmentSpaceUsage {
/// Use the components directly if this is somehow in performance critical code.
pub fn component(&self, component: SegmentComponent) -> ComponentSpaceUsage {
use self::ComponentSpaceUsage::*;
use crate::index::SegmentComponent::*;
use crate::SegmentComponent::*;
match component {
Postings => PerField(self.postings().clone()),
Positions => PerField(self.positions().clone()),

View File

@@ -59,8 +59,9 @@ pub mod tests {
use super::*;
use crate::directory::{Directory, RamDirectory, WritePtr};
use crate::fastfield::AliveBitSet;
use crate::schema::document::Value;
use crate::schema::{
self, Schema, TantivyDocument, TextFieldIndexing, TextOptions, Value, STORED, TEXT,
self, Schema, TantivyDocument, TextFieldIndexing, TextOptions, STORED, TEXT,
};
use crate::{Index, IndexWriter, Term};
@@ -91,8 +92,8 @@ pub mod tests {
StoreWriter::new(writer, compressor, blocksize, separate_thread).unwrap();
for i in 0..num_docs {
let mut doc = TantivyDocument::default();
doc.add_text(field_body, LOREM);
doc.add_text(field_title, format!("Doc {i}"));
doc.add_field_value(field_body, LOREM.to_string());
doc.add_field_value(field_title, format!("Doc {i}"));
store_writer.store(&doc, &schema).unwrap();
}
store_writer.close().unwrap();
@@ -118,11 +119,10 @@ pub mod tests {
let store = StoreReader::open(store_file, 10)?;
for i in 0..NUM_DOCS as u32 {
assert_eq!(
store
*store
.get::<TantivyDocument>(i)?
.get_first(field_title)
.unwrap()
.as_value()
.as_str()
.unwrap(),
format!("Doc {i}")
@@ -131,13 +131,7 @@ pub mod tests {
for doc in store.iter::<TantivyDocument>(Some(&alive_bitset)) {
let doc = doc?;
let title_content = doc
.get_first(field_title)
.unwrap()
.as_value()
.as_str()
.unwrap()
.to_string();
let title_content = doc.get_first(field_title).unwrap().as_str().unwrap();
if !title_content.starts_with("Doc ") {
panic!("unexpected title_content {title_content}");
}

View File

@@ -18,8 +18,6 @@ use crate::schema::document::{BinaryDocumentDeserializer, DocumentDeserialize};
use crate::space_usage::StoreSpaceUsage;
use crate::store::index::Checkpoint;
use crate::DocId;
#[cfg(feature = "quickwit")]
use crate::Executor;
pub(crate) const DOCSTORE_CACHE_CAPACITY: usize = 100;
@@ -343,11 +341,7 @@ impl StoreReader {
/// In most cases use [`get_async`](Self::get_async)
///
/// Loads and decompresses a block asynchronously.
async fn read_block_async(
&self,
checkpoint: &Checkpoint,
executor: &Executor,
) -> io::Result<Block> {
async fn read_block_async(&self, checkpoint: &Checkpoint) -> io::Result<Block> {
let cache_key = checkpoint.byte_range.start;
if let Some(block) = self.cache.get_from_cache(checkpoint.byte_range.start) {
return Ok(block);
@@ -359,12 +353,8 @@ impl StoreReader {
.read_bytes_async()
.await?;
let decompressor = self.decompressor;
let maybe_decompressed_block = executor
.spawn_blocking(move || decompressor.decompress(compressed_block.as_ref()))
.await
.expect("decompression panicked");
let decompressed_block = OwnedBytes::new(maybe_decompressed_block?);
let decompressed_block =
OwnedBytes::new(self.decompressor.decompress(compressed_block.as_ref())?);
self.cache
.put_into_cache(cache_key, decompressed_block.clone());
@@ -373,23 +363,15 @@ impl StoreReader {
}
/// Reads raw bytes of a given document asynchronously.
pub async fn get_document_bytes_async(
&self,
doc_id: DocId,
executor: &Executor,
) -> crate::Result<OwnedBytes> {
pub async fn get_document_bytes_async(&self, doc_id: DocId) -> crate::Result<OwnedBytes> {
let checkpoint = self.block_checkpoint(doc_id)?;
let block = self.read_block_async(&checkpoint, executor).await?;
let block = self.read_block_async(&checkpoint).await?;
Self::get_document_bytes_from_block(block, doc_id, &checkpoint)
}
/// Fetches a document asynchronously. Async version of [`get`](Self::get).
pub async fn get_async<D: DocumentDeserialize>(
&self,
doc_id: DocId,
executor: &Executor,
) -> crate::Result<D> {
let mut doc_bytes = self.get_document_bytes_async(doc_id, executor).await?;
pub async fn get_async<D: DocumentDeserialize>(&self, doc_id: DocId) -> crate::Result<D> {
let mut doc_bytes = self.get_document_bytes_async(doc_id).await?;
let deserializer = BinaryDocumentDeserializer::from_reader(&mut doc_bytes)
.map_err(crate::TantivyError::from)?;
@@ -403,7 +385,8 @@ mod tests {
use super::*;
use crate::directory::RamDirectory;
use crate::schema::{Field, TantivyDocument, Value};
use crate::schema::document::Value;
use crate::schema::{Field, TantivyDocument};
use crate::store::tests::write_lorem_ipsum_store;
use crate::store::Compressor;
use crate::Directory;
@@ -411,7 +394,7 @@ mod tests {
const BLOCK_SIZE: usize = 16_384;
fn get_text_field<'a>(doc: &'a TantivyDocument, field: &'a Field) -> Option<&'a str> {
doc.get_first(*field).and_then(|f| f.as_value().as_str())
doc.get_first(*field).and_then(|f| f.as_str())
}
#[test]

View File

@@ -93,7 +93,7 @@ fn open_fst_index(fst_file: FileSlice) -> io::Result<tantivy_fst::Map<OwnedBytes
let fst = Fst::new(bytes).map_err(|err| {
io::Error::new(
io::ErrorKind::InvalidData,
format!("Fst data is corrupted: {err:?}"),
format!("Fst data is corrupted: {:?}", err),
)
})?;
Ok(tantivy_fst::Map::from(fst))

View File

@@ -95,7 +95,7 @@ fn test_term_dictionary_simple() -> crate::Result<()> {
#[test]
fn test_term_dictionary_stream() -> crate::Result<()> {
let ids: Vec<_> = (0u32..10_000u32)
.map(|i| (format!("doc{i:0>6}"), i))
.map(|i| (format!("doc{:0>6}", i), i))
.collect();
let buffer: Vec<u8> = {
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
@@ -156,7 +156,7 @@ fn test_stream_high_range_prefix_suffix() -> crate::Result<()> {
#[test]
fn test_stream_range() -> crate::Result<()> {
let ids: Vec<_> = (0u32..10_000u32)
.map(|i| (format!("doc{i:0>6}"), i))
.map(|i| (format!("doc{:0>6}", i), i))
.collect();
let buffer: Vec<u8> = {
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();

View File

@@ -96,7 +96,7 @@ mod tests {
{
let mut add_token = |token: &Token| {
let facet = Facet::from_encoded(token.text.as_bytes().to_owned()).unwrap();
tokens.push(format!("{facet}"));
tokens.push(format!("{}", facet));
};
FacetTokenizer::default()
.token_stream(facet.encoded_str())
@@ -116,7 +116,7 @@ mod tests {
{
let mut add_token = |token: &Token| {
let facet = Facet::from_encoded(token.text.as_bytes().to_owned()).unwrap(); // ok test
tokens.push(format!("{facet}"));
tokens.push(format!("{}", facet));
};
FacetTokenizer::default()
.token_stream(facet.encoded_str()) // ok test