mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-07 17:42:55 +00:00
Compare commits
1 Commits
prefix-phr
...
quickwit-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f820d42151 |
@@ -15,6 +15,7 @@ rust-version = "1.63"
|
|||||||
exclude = ["benches/*.json", "benches/*.txt"]
|
exclude = ["benches/*.json", "benches/*.txt"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
# Switch back to the non-forked oneshot crate once https://github.com/faern/oneshot/pull/35 is merged
|
||||||
oneshot = "0.1.7"
|
oneshot = "0.1.7"
|
||||||
base64 = "0.22.0"
|
base64 = "0.22.0"
|
||||||
byteorder = "1.4.3"
|
byteorder = "1.4.3"
|
||||||
@@ -52,7 +53,7 @@ smallvec = "1.8.0"
|
|||||||
rayon = "1.5.2"
|
rayon = "1.5.2"
|
||||||
lru = "0.12.0"
|
lru = "0.12.0"
|
||||||
fastdivide = "0.4.0"
|
fastdivide = "0.4.0"
|
||||||
itertools = "0.13.0"
|
itertools = "0.12.0"
|
||||||
measure_time = "0.8.2"
|
measure_time = "0.8.2"
|
||||||
arc-swap = "1.5.0"
|
arc-swap = "1.5.0"
|
||||||
|
|
||||||
@@ -63,7 +64,7 @@ query-grammar = { version = "0.22.0", path = "./query-grammar", package = "tanti
|
|||||||
tantivy-bitpacker = { version = "0.6", path = "./bitpacker" }
|
tantivy-bitpacker = { version = "0.6", path = "./bitpacker" }
|
||||||
common = { version = "0.7", path = "./common/", package = "tantivy-common" }
|
common = { version = "0.7", path = "./common/", package = "tantivy-common" }
|
||||||
tokenizer-api = { version = "0.3", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
|
tokenizer-api = { version = "0.3", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
|
||||||
sketches-ddsketch = { version = "0.3.0", features = ["use_serde"] }
|
sketches-ddsketch = { version = "0.2.1", features = ["use_serde"] }
|
||||||
futures-util = { version = "0.3.28", optional = true }
|
futures-util = { version = "0.3.28", optional = true }
|
||||||
fnv = "1.0.7"
|
fnv = "1.0.7"
|
||||||
|
|
||||||
@@ -71,7 +72,7 @@ fnv = "1.0.7"
|
|||||||
winapi = "0.3.9"
|
winapi = "0.3.9"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
binggan = "0.8.0"
|
binggan = "0.6.2"
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
matches = "0.1.9"
|
matches = "0.1.9"
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ fn bench_agg(mut group: InputGroup<Index>) {
|
|||||||
register!(group, average_f64);
|
register!(group, average_f64);
|
||||||
register!(group, average_f64_u64);
|
register!(group, average_f64_u64);
|
||||||
register!(group, stats_f64);
|
register!(group, stats_f64);
|
||||||
register!(group, extendedstats_f64);
|
|
||||||
register!(group, percentiles_f64);
|
register!(group, percentiles_f64);
|
||||||
register!(group, terms_few);
|
register!(group, terms_few);
|
||||||
register!(group, terms_many);
|
register!(group, terms_many);
|
||||||
@@ -106,12 +105,7 @@ fn stats_f64(index: &Index) {
|
|||||||
});
|
});
|
||||||
exec_term_with_agg(index, agg_req)
|
exec_term_with_agg(index, agg_req)
|
||||||
}
|
}
|
||||||
fn extendedstats_f64(index: &Index) {
|
|
||||||
let agg_req = json!({
|
|
||||||
"extendedstats_f64": { "extended_stats": { "field": "score_f64", } }
|
|
||||||
});
|
|
||||||
exec_term_with_agg(index, agg_req)
|
|
||||||
}
|
|
||||||
fn percentiles_f64(index: &Index) {
|
fn percentiles_f64(index: &Index) {
|
||||||
let agg_req = json!({
|
let agg_req = json!({
|
||||||
"mypercentiles": {
|
"mypercentiles": {
|
||||||
@@ -355,7 +349,7 @@ fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
|
|||||||
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
|
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
|
||||||
|
|
||||||
let many_terms_data = (0..150_000)
|
let many_terms_data = (0..150_000)
|
||||||
.map(|num| format!("author{num}"))
|
.map(|num| format!("author{}", num))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
{
|
{
|
||||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ fn benchmark(
|
|||||||
benchmark_dynamic_json(b, input, schema, commit, parse_json)
|
benchmark_dynamic_json(b, input, schema, commit, parse_json)
|
||||||
} else {
|
} else {
|
||||||
_benchmark(b, input, schema, commit, parse_json, |schema, doc_json| {
|
_benchmark(b, input, schema, commit, parse_json, |schema, doc_json| {
|
||||||
TantivyDocument::parse_json(schema, doc_json).unwrap()
|
TantivyDocument::parse_json(&schema, doc_json).unwrap()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -90,7 +90,8 @@ fn benchmark_dynamic_json(
|
|||||||
) {
|
) {
|
||||||
let json_field = schema.get_field("json").unwrap();
|
let json_field = schema.get_field("json").unwrap();
|
||||||
_benchmark(b, input, schema, commit, parse_json, |_schema, doc_json| {
|
_benchmark(b, input, schema, commit, parse_json, |_schema, doc_json| {
|
||||||
let json_val: serde_json::Value = serde_json::from_str(doc_json).unwrap();
|
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||||
|
serde_json::from_str(doc_json).unwrap();
|
||||||
tantivy::doc!(json_field=>json_val)
|
tantivy::doc!(json_field=>json_val)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -137,16 +138,15 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
|||||||
for (prefix, schema, is_dynamic) in benches {
|
for (prefix, schema, is_dynamic) in benches {
|
||||||
for commit in [false, true] {
|
for commit in [false, true] {
|
||||||
let suffix = if commit { "with-commit" } else { "no-commit" };
|
let suffix = if commit { "with-commit" } else { "no-commit" };
|
||||||
{
|
for parse_json in [false] {
|
||||||
let parse_json = false;
|
|
||||||
// for parse_json in [false, true] {
|
// for parse_json in [false, true] {
|
||||||
let suffix = if parse_json {
|
let suffix = if parse_json {
|
||||||
format!("{suffix}-with-json-parsing")
|
format!("{}-with-json-parsing", suffix)
|
||||||
} else {
|
} else {
|
||||||
suffix.to_string()
|
format!("{}", suffix)
|
||||||
};
|
};
|
||||||
|
|
||||||
let bench_name = format!("{prefix}{suffix}");
|
let bench_name = format!("{}{}", prefix, suffix);
|
||||||
group.bench_function(bench_name, |b| {
|
group.bench_function(bench_name, |b| {
|
||||||
benchmark(b, HDFS_LOGS, schema.clone(), commit, parse_json, is_dynamic)
|
benchmark(b, HDFS_LOGS, schema.clone(), commit, parse_json, is_dynamic)
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ description = "column oriented storage for tantivy"
|
|||||||
categories = ["database-implementations", "data-structures", "compression"]
|
categories = ["database-implementations", "data-structures", "compression"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
itertools = "0.13.0"
|
itertools = "0.12.0"
|
||||||
fastdivide = "0.4.0"
|
fastdivide = "0.4.0"
|
||||||
|
|
||||||
stacker = { version= "0.3", path = "../stacker", package="tantivy-stacker"}
|
stacker = { version= "0.3", path = "../stacker", package="tantivy-stacker"}
|
||||||
@@ -23,12 +23,6 @@ downcast-rs = "1.2.0"
|
|||||||
proptest = "1"
|
proptest = "1"
|
||||||
more-asserts = "0.3.1"
|
more-asserts = "0.3.1"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
binggan = "0.8.1"
|
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "bench_merge"
|
|
||||||
harness = false
|
|
||||||
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
unstable = []
|
unstable = []
|
||||||
|
|||||||
@@ -1,101 +0,0 @@
|
|||||||
#![feature(test)]
|
|
||||||
extern crate test;
|
|
||||||
|
|
||||||
use core::fmt;
|
|
||||||
use std::fmt::{Display, Formatter};
|
|
||||||
|
|
||||||
use binggan::{black_box, BenchRunner};
|
|
||||||
use tantivy_columnar::*;
|
|
||||||
|
|
||||||
enum Card {
|
|
||||||
Multi,
|
|
||||||
Sparse,
|
|
||||||
Dense,
|
|
||||||
}
|
|
||||||
impl Display for Card {
|
|
||||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
Card::Multi => write!(f, "multi"),
|
|
||||||
Card::Sparse => write!(f, "sparse"),
|
|
||||||
Card::Dense => write!(f, "dense"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const NUM_DOCS: u32 = 100_000;
|
|
||||||
|
|
||||||
fn generate_columnar(card: Card, num_docs: u32) -> ColumnarReader {
|
|
||||||
use tantivy_columnar::ColumnarWriter;
|
|
||||||
|
|
||||||
let mut columnar_writer = ColumnarWriter::default();
|
|
||||||
|
|
||||||
match card {
|
|
||||||
Card::Multi => {
|
|
||||||
columnar_writer.record_numerical(0, "price", 10u64);
|
|
||||||
columnar_writer.record_numerical(0, "price", 10u64);
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i in 0..num_docs {
|
|
||||||
match card {
|
|
||||||
Card::Multi | Card::Sparse => {
|
|
||||||
if i % 8 == 0 {
|
|
||||||
columnar_writer.record_numerical(i, "price", i as u64);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Card::Dense => {
|
|
||||||
if i % 6 == 0 {
|
|
||||||
columnar_writer.record_numerical(i, "price", i as u64);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut wrt: Vec<u8> = Vec::new();
|
|
||||||
columnar_writer.serialize(num_docs, None, &mut wrt).unwrap();
|
|
||||||
|
|
||||||
ColumnarReader::open(wrt).unwrap()
|
|
||||||
}
|
|
||||||
fn main() {
|
|
||||||
let mut inputs = Vec::new();
|
|
||||||
|
|
||||||
let mut add_combo = |card1: Card, card2: Card| {
|
|
||||||
inputs.push((
|
|
||||||
format!("merge_{card1}_and_{card2}"),
|
|
||||||
vec![
|
|
||||||
generate_columnar(card1, NUM_DOCS),
|
|
||||||
generate_columnar(card2, NUM_DOCS),
|
|
||||||
],
|
|
||||||
));
|
|
||||||
};
|
|
||||||
|
|
||||||
add_combo(Card::Multi, Card::Multi);
|
|
||||||
add_combo(Card::Dense, Card::Dense);
|
|
||||||
add_combo(Card::Sparse, Card::Sparse);
|
|
||||||
add_combo(Card::Sparse, Card::Dense);
|
|
||||||
add_combo(Card::Multi, Card::Dense);
|
|
||||||
add_combo(Card::Multi, Card::Sparse);
|
|
||||||
|
|
||||||
let runner: BenchRunner = BenchRunner::new();
|
|
||||||
let mut group = runner.new_group();
|
|
||||||
for (input_name, columnar_readers) in inputs.iter() {
|
|
||||||
group.register_with_input(
|
|
||||||
input_name,
|
|
||||||
columnar_readers,
|
|
||||||
move |columnar_readers: &Vec<ColumnarReader>| {
|
|
||||||
let mut out = vec![];
|
|
||||||
let columnar_readers = columnar_readers.iter().collect::<Vec<_>>();
|
|
||||||
let merge_row_order = StackMergeOrder::stack(&columnar_readers[..]);
|
|
||||||
|
|
||||||
let _ = black_box(merge_columnar(
|
|
||||||
&columnar_readers,
|
|
||||||
&[],
|
|
||||||
merge_row_order.into(),
|
|
||||||
&mut out,
|
|
||||||
));
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
group.run();
|
|
||||||
}
|
|
||||||
@@ -196,7 +196,6 @@ impl Set<RowId> for OptionalIndex {
|
|||||||
} = row_addr_from_row_id(doc_id);
|
} = row_addr_from_row_id(doc_id);
|
||||||
let block_meta = self.block_metas[block_id as usize];
|
let block_meta = self.block_metas[block_id as usize];
|
||||||
let block = self.block(block_meta);
|
let block = self.block(block_meta);
|
||||||
|
|
||||||
let block_offset_row_id = match block {
|
let block_offset_row_id = match block {
|
||||||
Block::Dense(dense_block) => dense_block.rank(in_block_row_id),
|
Block::Dense(dense_block) => dense_block.rank(in_block_row_id),
|
||||||
Block::Sparse(sparse_block) => sparse_block.rank(in_block_row_id),
|
Block::Sparse(sparse_block) => sparse_block.rank(in_block_row_id),
|
||||||
|
|||||||
@@ -151,7 +151,7 @@ pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
|
|||||||
(result, vlen)
|
(result, vlen)
|
||||||
}
|
}
|
||||||
/// Write a `u32` as a vint payload.
|
/// Write a `u32` as a vint payload.
|
||||||
pub fn write_u32_vint<W: io::Write + ?Sized>(val: u32, writer: &mut W) -> io::Result<()> {
|
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||||
let mut buf = [0u8; 8];
|
let mut buf = [0u8; 8];
|
||||||
let data = serialize_vint_u32(val, &mut buf);
|
let data = serialize_vint_u32(val, &mut buf);
|
||||||
writer.write_all(data)
|
writer.write_all(data)
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
use tantivy::collector::TopDocs;
|
use tantivy::collector::TopDocs;
|
||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::{DateOptions, Document, Schema, Value, INDEXED, STORED, STRING};
|
use tantivy::schema::{DateOptions, Document, OwnedValue, Schema, INDEXED, STORED, STRING};
|
||||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
fn main() -> tantivy::Result<()> {
|
||||||
@@ -61,12 +61,10 @@ fn main() -> tantivy::Result<()> {
|
|||||||
assert_eq!(count_docs.len(), 1);
|
assert_eq!(count_docs.len(), 1);
|
||||||
for (_score, doc_address) in count_docs {
|
for (_score, doc_address) in count_docs {
|
||||||
let retrieved_doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
let retrieved_doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||||
assert!(retrieved_doc
|
assert!(matches!(
|
||||||
.get_first(occurred_at)
|
retrieved_doc.get_first(occurred_at),
|
||||||
.unwrap()
|
Some(OwnedValue::Date(_))
|
||||||
.as_value()
|
));
|
||||||
.as_datetime()
|
|
||||||
.is_some(),);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
retrieved_doc.to_json(&schema),
|
retrieved_doc.to_json(&schema),
|
||||||
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
|
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
{
|
{
|
||||||
let facets = [
|
let facets = vec![
|
||||||
Facet::from("/ingredient/egg"),
|
Facet::from("/ingredient/egg"),
|
||||||
Facet::from("/ingredient/oil"),
|
Facet::from("/ingredient/oil"),
|
||||||
Facet::from("/ingredient/garlic"),
|
Facet::from("/ingredient/garlic"),
|
||||||
@@ -94,8 +94,9 @@ fn main() -> tantivy::Result<()> {
|
|||||||
.doc::<TantivyDocument>(*doc_id)
|
.doc::<TantivyDocument>(*doc_id)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.get_first(title)
|
.get_first(title)
|
||||||
.and_then(|v| v.as_str().map(|el| el.to_string()))
|
.and_then(|v| v.as_str())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
.to_owned()
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||||
limbs and branches that arch over the pool"
|
limbs and branches that arch over the pool"
|
||||||
))?;
|
))?;
|
||||||
println!("add doc {i} from thread 1 - opstamp {opstamp}");
|
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
|
||||||
thread::sleep(Duration::from_millis(20));
|
thread::sleep(Duration::from_millis(20));
|
||||||
}
|
}
|
||||||
Result::<(), TantivyError>::Ok(())
|
Result::<(), TantivyError>::Ok(())
|
||||||
@@ -82,7 +82,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
body => "Some great book description..."
|
body => "Some great book description..."
|
||||||
))?
|
))?
|
||||||
};
|
};
|
||||||
println!("add doc {i} from thread 2 - opstamp {opstamp}");
|
println!("add doc {} from thread 2 - opstamp {}", i, opstamp);
|
||||||
thread::sleep(Duration::from_millis(10));
|
thread::sleep(Duration::from_millis(10));
|
||||||
}
|
}
|
||||||
Result::<(), TantivyError>::Ok(())
|
Result::<(), TantivyError>::Ok(())
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use std::borrow::Cow;
|
|
||||||
use std::iter::once;
|
use std::iter::once;
|
||||||
|
|
||||||
use nom::branch::alt;
|
use nom::branch::alt;
|
||||||
@@ -20,7 +19,7 @@ use crate::Occur;
|
|||||||
// Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to
|
// Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to
|
||||||
// special characters.
|
// special characters.
|
||||||
const SPECIAL_CHARS: &[char] = &[
|
const SPECIAL_CHARS: &[char] = &[
|
||||||
'+', '^', '`', ':', '{', '}', '"', '\'', '[', ']', '(', ')', '!', '\\', '*', ' ',
|
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '!', '\\', '*', ' ',
|
||||||
];
|
];
|
||||||
|
|
||||||
/// consume a field name followed by colon. Return the field name with escape sequence
|
/// consume a field name followed by colon. Return the field name with escape sequence
|
||||||
@@ -42,92 +41,36 @@ fn field_name(inp: &str) -> IResult<&str, String> {
|
|||||||
)(inp)
|
)(inp)
|
||||||
}
|
}
|
||||||
|
|
||||||
const ESCAPE_IN_WORD: &[char] = &['^', '`', ':', '{', '}', '"', '\'', '[', ']', '(', ')', '\\'];
|
|
||||||
|
|
||||||
fn interpret_escape(source: &str) -> String {
|
|
||||||
let mut res = String::with_capacity(source.len());
|
|
||||||
let mut in_escape = false;
|
|
||||||
let require_escape = |c: char| c.is_whitespace() || ESCAPE_IN_WORD.contains(&c) || c == '-';
|
|
||||||
|
|
||||||
for c in source.chars() {
|
|
||||||
if in_escape {
|
|
||||||
if !require_escape(c) {
|
|
||||||
// we re-add the escape sequence
|
|
||||||
res.push('\\');
|
|
||||||
}
|
|
||||||
res.push(c);
|
|
||||||
in_escape = false;
|
|
||||||
} else if c == '\\' {
|
|
||||||
in_escape = true;
|
|
||||||
} else {
|
|
||||||
res.push(c);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Consume a word outside of any context.
|
/// Consume a word outside of any context.
|
||||||
// TODO should support escape sequences
|
// TODO should support escape sequences
|
||||||
fn word(inp: &str) -> IResult<&str, Cow<str>> {
|
fn word(inp: &str) -> IResult<&str, &str> {
|
||||||
map_res(
|
map_res(
|
||||||
recognize(tuple((
|
recognize(tuple((
|
||||||
alt((
|
satisfy(|c| {
|
||||||
preceded(char('\\'), anychar),
|
!c.is_whitespace()
|
||||||
satisfy(|c| !c.is_whitespace() && !ESCAPE_IN_WORD.contains(&c) && c != '-'),
|
&& !['-', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||||
)),
|
}),
|
||||||
many0(alt((
|
many0(satisfy(|c: char| {
|
||||||
preceded(char('\\'), anychar),
|
!c.is_whitespace() && ![':', '^', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||||
satisfy(|c: char| !c.is_whitespace() && !ESCAPE_IN_WORD.contains(&c)),
|
})),
|
||||||
))),
|
|
||||||
))),
|
))),
|
||||||
|s| match s {
|
|s| match s {
|
||||||
"OR" | "AND" | "NOT" | "IN" => Err(Error::new(inp, ErrorKind::Tag)),
|
"OR" | "AND" | "NOT" | "IN" => Err(Error::new(inp, ErrorKind::Tag)),
|
||||||
s if s.contains('\\') => Ok(Cow::Owned(interpret_escape(s))),
|
_ => Ok(s),
|
||||||
s => Ok(Cow::Borrowed(s)),
|
|
||||||
},
|
},
|
||||||
)(inp)
|
)(inp)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn word_infallible(
|
fn word_infallible(delimiter: &str) -> impl Fn(&str) -> JResult<&str, Option<&str>> + '_ {
|
||||||
delimiter: &str,
|
|inp| {
|
||||||
emit_error: bool,
|
opt_i_err(
|
||||||
) -> impl Fn(&str) -> JResult<&str, Option<Cow<str>>> + '_ {
|
preceded(
|
||||||
// emit error is set when receiving an unescaped `:` should emit an error
|
multispace0,
|
||||||
|
recognize(many1(satisfy(|c| {
|
||||||
move |inp| {
|
!c.is_whitespace() && !delimiter.contains(c)
|
||||||
map(
|
}))),
|
||||||
opt_i_err(
|
|
||||||
preceded(
|
|
||||||
multispace0,
|
|
||||||
recognize(many1(alt((
|
|
||||||
preceded(char::<&str, _>('\\'), anychar),
|
|
||||||
satisfy(|c| !c.is_whitespace() && !delimiter.contains(c)),
|
|
||||||
)))),
|
|
||||||
),
|
|
||||||
"expected word",
|
|
||||||
),
|
),
|
||||||
|(opt_s, mut errors)| match opt_s {
|
"expected word",
|
||||||
Some(s) => {
|
|
||||||
if emit_error
|
|
||||||
&& (s
|
|
||||||
.as_bytes()
|
|
||||||
.windows(2)
|
|
||||||
.any(|window| window[0] != b'\\' && window[1] == b':')
|
|
||||||
|| s.starts_with(':'))
|
|
||||||
{
|
|
||||||
errors.push(LenientErrorInternal {
|
|
||||||
pos: inp.len(),
|
|
||||||
message: "parsed possible invalid field as term".to_string(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if s.contains('\\') {
|
|
||||||
(Some(Cow::Owned(interpret_escape(s))), errors)
|
|
||||||
} else {
|
|
||||||
(Some(Cow::Borrowed(s)), errors)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => (None, errors),
|
|
||||||
},
|
|
||||||
)(inp)
|
)(inp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -216,7 +159,7 @@ fn simple_term_infallible(
|
|||||||
(value((), char('\'')), simple_quotes),
|
(value((), char('\'')), simple_quotes),
|
||||||
),
|
),
|
||||||
// numbers are parsed with words in this case, as we allow string starting with a -
|
// numbers are parsed with words in this case, as we allow string starting with a -
|
||||||
map(word_infallible(delimiter, true), |(text, errors)| {
|
map(word_infallible(delimiter), |(text, errors)| {
|
||||||
(text.map(|text| (Delimiter::None, text.to_string())), errors)
|
(text.map(|text| (Delimiter::None, text.to_string())), errors)
|
||||||
}),
|
}),
|
||||||
)(inp)
|
)(inp)
|
||||||
@@ -379,6 +322,15 @@ fn literal_no_group_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>>
|
|||||||
|((field_name, _, leaf), mut errors)| {
|
|((field_name, _, leaf), mut errors)| {
|
||||||
(
|
(
|
||||||
leaf.map(|leaf| {
|
leaf.map(|leaf| {
|
||||||
|
if matches!(&leaf, UserInputLeaf::Literal(literal)
|
||||||
|
if literal.phrase.contains(':') && literal.delimiter == Delimiter::None)
|
||||||
|
&& field_name.is_none()
|
||||||
|
{
|
||||||
|
errors.push(LenientErrorInternal {
|
||||||
|
pos: inp.len(),
|
||||||
|
message: "parsed possible invalid field as term".to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
if matches!(&leaf, UserInputLeaf::Literal(literal)
|
if matches!(&leaf, UserInputLeaf::Literal(literal)
|
||||||
if literal.phrase == "NOT" && literal.delimiter == Delimiter::None)
|
if literal.phrase == "NOT" && literal.delimiter == Delimiter::None)
|
||||||
&& field_name.is_none()
|
&& field_name.is_none()
|
||||||
@@ -497,20 +449,20 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
|||||||
tuple_infallible((
|
tuple_infallible((
|
||||||
opt_i(anychar),
|
opt_i(anychar),
|
||||||
space0_infallible,
|
space0_infallible,
|
||||||
word_infallible("]}", false),
|
word_infallible("]}"),
|
||||||
space1_infallible,
|
space1_infallible,
|
||||||
opt_i_err(
|
opt_i_err(
|
||||||
terminated(tag("TO"), alt((value((), multispace1), value((), eof)))),
|
terminated(tag("TO"), alt((value((), multispace1), value((), eof)))),
|
||||||
"missing keyword TO",
|
"missing keyword TO",
|
||||||
),
|
),
|
||||||
word_infallible("]}", false),
|
word_infallible("]}"),
|
||||||
opt_i_err(one_of("]}"), "missing range delimiter"),
|
opt_i_err(one_of("]}"), "missing range delimiter"),
|
||||||
)),
|
)),
|
||||||
|(
|
|(
|
||||||
(lower_bound_kind, _multispace0, lower, _multispace1, to, upper, upper_bound_kind),
|
(lower_bound_kind, _multispace0, lower, _multispace1, to, upper, upper_bound_kind),
|
||||||
errs,
|
errs,
|
||||||
)| {
|
)| {
|
||||||
let lower_bound = match (lower_bound_kind, lower.as_deref()) {
|
let lower_bound = match (lower_bound_kind, lower) {
|
||||||
(_, Some("*")) => UserInputBound::Unbounded,
|
(_, Some("*")) => UserInputBound::Unbounded,
|
||||||
(_, None) => UserInputBound::Unbounded,
|
(_, None) => UserInputBound::Unbounded,
|
||||||
// if it is some, TO was actually the bound (i.e. [TO TO something])
|
// if it is some, TO was actually the bound (i.e. [TO TO something])
|
||||||
@@ -519,7 +471,7 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
|||||||
(Some('{'), Some(bound)) => UserInputBound::Exclusive(bound.to_string()),
|
(Some('{'), Some(bound)) => UserInputBound::Exclusive(bound.to_string()),
|
||||||
_ => unreachable!("precondition failed, range did not start with [ or {{"),
|
_ => unreachable!("precondition failed, range did not start with [ or {{"),
|
||||||
};
|
};
|
||||||
let upper_bound = match (upper_bound_kind, upper.as_deref()) {
|
let upper_bound = match (upper_bound_kind, upper) {
|
||||||
(_, Some("*")) => UserInputBound::Unbounded,
|
(_, Some("*")) => UserInputBound::Unbounded,
|
||||||
(_, None) => UserInputBound::Unbounded,
|
(_, None) => UserInputBound::Unbounded,
|
||||||
(Some(']'), Some(bound)) => UserInputBound::Inclusive(bound.to_string()),
|
(Some(']'), Some(bound)) => UserInputBound::Inclusive(bound.to_string()),
|
||||||
@@ -536,7 +488,7 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
|||||||
(
|
(
|
||||||
(
|
(
|
||||||
value((), tag(">=")),
|
value((), tag(">=")),
|
||||||
map(word_infallible("", false), |(bound, err)| {
|
map(word_infallible(""), |(bound, err)| {
|
||||||
(
|
(
|
||||||
(
|
(
|
||||||
bound
|
bound
|
||||||
@@ -550,7 +502,7 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
|||||||
),
|
),
|
||||||
(
|
(
|
||||||
value((), tag("<=")),
|
value((), tag("<=")),
|
||||||
map(word_infallible("", false), |(bound, err)| {
|
map(word_infallible(""), |(bound, err)| {
|
||||||
(
|
(
|
||||||
(
|
(
|
||||||
UserInputBound::Unbounded,
|
UserInputBound::Unbounded,
|
||||||
@@ -564,7 +516,7 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
|||||||
),
|
),
|
||||||
(
|
(
|
||||||
value((), tag(">")),
|
value((), tag(">")),
|
||||||
map(word_infallible("", false), |(bound, err)| {
|
map(word_infallible(""), |(bound, err)| {
|
||||||
(
|
(
|
||||||
(
|
(
|
||||||
bound
|
bound
|
||||||
@@ -578,7 +530,7 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
|||||||
),
|
),
|
||||||
(
|
(
|
||||||
value((), tag("<")),
|
value((), tag("<")),
|
||||||
map(word_infallible("", false), |(bound, err)| {
|
map(word_infallible(""), |(bound, err)| {
|
||||||
(
|
(
|
||||||
(
|
(
|
||||||
UserInputBound::Unbounded,
|
UserInputBound::Unbounded,
|
||||||
@@ -1205,12 +1157,6 @@ mod test {
|
|||||||
test_parse_query_to_ast_helper("weight: <= 70", "\"weight\":{\"*\" TO \"70\"]");
|
test_parse_query_to_ast_helper("weight: <= 70", "\"weight\":{\"*\" TO \"70\"]");
|
||||||
|
|
||||||
test_parse_query_to_ast_helper("weight: <= 70.5", "\"weight\":{\"*\" TO \"70.5\"]");
|
test_parse_query_to_ast_helper("weight: <= 70.5", "\"weight\":{\"*\" TO \"70.5\"]");
|
||||||
|
|
||||||
test_parse_query_to_ast_helper(">a", "{\"a\" TO \"*\"}");
|
|
||||||
test_parse_query_to_ast_helper(">=a", "[\"a\" TO \"*\"}");
|
|
||||||
test_parse_query_to_ast_helper("<a", "{\"*\" TO \"a\"}");
|
|
||||||
test_parse_query_to_ast_helper("<=a", "{\"*\" TO \"a\"]");
|
|
||||||
test_parse_query_to_ast_helper("<=bsd", "{\"*\" TO \"bsd\"]");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1644,21 +1590,5 @@ mod test {
|
|||||||
r#"myfield:'hello\"happy\'tax'"#,
|
r#"myfield:'hello\"happy\'tax'"#,
|
||||||
r#""myfield":'hello"happy'tax'"#,
|
r#""myfield":'hello"happy'tax'"#,
|
||||||
);
|
);
|
||||||
// we don't process escape sequence for chars which don't require it
|
|
||||||
test_parse_query_to_ast_helper(r#"abc\*"#, r#"abc\*"#);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_queries_with_colons() {
|
|
||||||
test_parse_query_to_ast_helper(r#""abc:def""#, r#""abc:def""#);
|
|
||||||
test_parse_query_to_ast_helper(r#"'abc:def'"#, r#"'abc:def'"#);
|
|
||||||
test_parse_query_to_ast_helper(r#"abc\:def"#, r#"abc:def"#);
|
|
||||||
test_parse_query_to_ast_helper(r#""abc\:def""#, r#""abc:def""#);
|
|
||||||
test_parse_query_to_ast_helper(r#"'abc\:def'"#, r#"'abc:def'"#);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_invalid_field() {
|
|
||||||
test_is_parse_err(r#"!bc:def"#, "!bc:def");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ use super::bucket::{
|
|||||||
DateHistogramAggregationReq, HistogramAggregation, RangeAggregation, TermsAggregation,
|
DateHistogramAggregationReq, HistogramAggregation, RangeAggregation, TermsAggregation,
|
||||||
};
|
};
|
||||||
use super::metric::{
|
use super::metric::{
|
||||||
AverageAggregation, CountAggregation, ExtendedStatsAggregation, MaxAggregation, MinAggregation,
|
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation,
|
||||||
PercentilesAggregationReq, StatsAggregation, SumAggregation, TopHitsAggregation,
|
PercentilesAggregationReq, StatsAggregation, SumAggregation, TopHitsAggregation,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -146,11 +146,6 @@ pub enum AggregationVariants {
|
|||||||
/// extracted values.
|
/// extracted values.
|
||||||
#[serde(rename = "stats")]
|
#[serde(rename = "stats")]
|
||||||
Stats(StatsAggregation),
|
Stats(StatsAggregation),
|
||||||
/// Computes a collection of estended statistics (`min`, `max`, `sum`, `count`, `avg`,
|
|
||||||
/// `sum_of_squares`, `variance`, `variance_sampling`, `std_deviation`,
|
|
||||||
/// `std_deviation_sampling`) over the extracted values.
|
|
||||||
#[serde(rename = "extended_stats")]
|
|
||||||
ExtendedStats(ExtendedStatsAggregation),
|
|
||||||
/// Computes the sum of the extracted values.
|
/// Computes the sum of the extracted values.
|
||||||
#[serde(rename = "sum")]
|
#[serde(rename = "sum")]
|
||||||
Sum(SumAggregation),
|
Sum(SumAggregation),
|
||||||
@@ -175,7 +170,6 @@ impl AggregationVariants {
|
|||||||
AggregationVariants::Max(max) => vec![max.field_name()],
|
AggregationVariants::Max(max) => vec![max.field_name()],
|
||||||
AggregationVariants::Min(min) => vec![min.field_name()],
|
AggregationVariants::Min(min) => vec![min.field_name()],
|
||||||
AggregationVariants::Stats(stats) => vec![stats.field_name()],
|
AggregationVariants::Stats(stats) => vec![stats.field_name()],
|
||||||
AggregationVariants::ExtendedStats(extended_stats) => vec![extended_stats.field_name()],
|
|
||||||
AggregationVariants::Sum(sum) => vec![sum.field_name()],
|
AggregationVariants::Sum(sum) => vec![sum.field_name()],
|
||||||
AggregationVariants::Percentiles(per) => vec![per.field_name()],
|
AggregationVariants::Percentiles(per) => vec![per.field_name()],
|
||||||
AggregationVariants::TopHits(top_hits) => top_hits.field_names(),
|
AggregationVariants::TopHits(top_hits) => top_hits.field_names(),
|
||||||
@@ -203,12 +197,6 @@ impl AggregationVariants {
|
|||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn as_top_hits(&self) -> Option<&TopHitsAggregation> {
|
|
||||||
match &self {
|
|
||||||
AggregationVariants::TopHits(top_hits) => Some(top_hits),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn as_percentile(&self) -> Option<&PercentilesAggregationReq> {
|
pub(crate) fn as_percentile(&self) -> Option<&PercentilesAggregationReq> {
|
||||||
match &self {
|
match &self {
|
||||||
|
|||||||
@@ -11,8 +11,8 @@ use super::bucket::{
|
|||||||
DateHistogramAggregationReq, HistogramAggregation, RangeAggregation, TermsAggregation,
|
DateHistogramAggregationReq, HistogramAggregation, RangeAggregation, TermsAggregation,
|
||||||
};
|
};
|
||||||
use super::metric::{
|
use super::metric::{
|
||||||
AverageAggregation, CountAggregation, ExtendedStatsAggregation, MaxAggregation, MinAggregation,
|
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation, StatsAggregation,
|
||||||
StatsAggregation, SumAggregation,
|
SumAggregation,
|
||||||
};
|
};
|
||||||
use super::segment_agg_result::AggregationLimits;
|
use super::segment_agg_result::AggregationLimits;
|
||||||
use super::VecWithNames;
|
use super::VecWithNames;
|
||||||
@@ -276,10 +276,6 @@ impl AggregationWithAccessor {
|
|||||||
field: ref field_name,
|
field: ref field_name,
|
||||||
..
|
..
|
||||||
})
|
})
|
||||||
| ExtendedStats(ExtendedStatsAggregation {
|
|
||||||
field: ref field_name,
|
|
||||||
..
|
|
||||||
})
|
|
||||||
| Sum(SumAggregation {
|
| Sum(SumAggregation {
|
||||||
field: ref field_name,
|
field: ref field_name,
|
||||||
..
|
..
|
||||||
@@ -339,8 +335,8 @@ fn get_missing_val(
|
|||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||||
"Missing value {missing:?} for field {field_name} is not supported for column \
|
"Missing value {:?} for field {} is not supported for column type {:?}",
|
||||||
type {column_type:?}"
|
missing, field_name, column_type
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -407,7 +403,7 @@ fn get_dynamic_columns(
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|h| h.open())
|
.map(|h| h.open())
|
||||||
.collect::<io::Result<_>>()?;
|
.collect::<io::Result<_>>()?;
|
||||||
assert!(!ff_fields.is_empty(), "field {field_name} not found");
|
assert!(!ff_fields.is_empty(), "field {} not found", field_name);
|
||||||
Ok(cols)
|
Ok(cols)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,9 +8,7 @@ use rustc_hash::FxHashMap;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::bucket::GetDocCount;
|
use super::bucket::GetDocCount;
|
||||||
use super::metric::{
|
use super::metric::{PercentilesMetricResult, SingleMetricResult, Stats, TopHitsMetricResult};
|
||||||
ExtendedStats, PercentilesMetricResult, SingleMetricResult, Stats, TopHitsMetricResult,
|
|
||||||
};
|
|
||||||
use super::{AggregationError, Key};
|
use super::{AggregationError, Key};
|
||||||
use crate::TantivyError;
|
use crate::TantivyError;
|
||||||
|
|
||||||
@@ -90,8 +88,6 @@ pub enum MetricResult {
|
|||||||
Min(SingleMetricResult),
|
Min(SingleMetricResult),
|
||||||
/// Stats metric result.
|
/// Stats metric result.
|
||||||
Stats(Stats),
|
Stats(Stats),
|
||||||
/// ExtendedStats metric result.
|
|
||||||
ExtendedStats(Box<ExtendedStats>),
|
|
||||||
/// Sum metric result.
|
/// Sum metric result.
|
||||||
Sum(SingleMetricResult),
|
Sum(SingleMetricResult),
|
||||||
/// Percentiles metric result.
|
/// Percentiles metric result.
|
||||||
@@ -108,7 +104,6 @@ impl MetricResult {
|
|||||||
MetricResult::Max(max) => Ok(max.value),
|
MetricResult::Max(max) => Ok(max.value),
|
||||||
MetricResult::Min(min) => Ok(min.value),
|
MetricResult::Min(min) => Ok(min.value),
|
||||||
MetricResult::Stats(stats) => stats.get_value(agg_property),
|
MetricResult::Stats(stats) => stats.get_value(agg_property),
|
||||||
MetricResult::ExtendedStats(extended_stats) => extended_stats.get_value(agg_property),
|
|
||||||
MetricResult::Sum(sum) => Ok(sum.value),
|
MetricResult::Sum(sum) => Ok(sum.value),
|
||||||
MetricResult::Percentiles(_) => Err(TantivyError::AggregationError(
|
MetricResult::Percentiles(_) => Err(TantivyError::AggregationError(
|
||||||
AggregationError::InvalidRequest("percentiles can't be used to order".to_string()),
|
AggregationError::InvalidRequest("percentiles can't be used to order".to_string()),
|
||||||
|
|||||||
@@ -357,7 +357,8 @@ impl SegmentTermCollector {
|
|||||||
) -> crate::Result<Self> {
|
) -> crate::Result<Self> {
|
||||||
if field_type == ColumnType::Bytes {
|
if field_type == ColumnType::Bytes {
|
||||||
return Err(TantivyError::InvalidArgument(format!(
|
return Err(TantivyError::InvalidArgument(format!(
|
||||||
"terms aggregation is not supported for column type {field_type:?}"
|
"terms aggregation is not supported for column type {:?}",
|
||||||
|
field_type
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
let term_buckets = TermBuckets::default();
|
let term_buckets = TermBuckets::default();
|
||||||
|
|||||||
@@ -19,8 +19,8 @@ use super::bucket::{
|
|||||||
GetDocCount, Order, OrderTarget, RangeAggregation, TermsAggregation,
|
GetDocCount, Order, OrderTarget, RangeAggregation, TermsAggregation,
|
||||||
};
|
};
|
||||||
use super::metric::{
|
use super::metric::{
|
||||||
IntermediateAverage, IntermediateCount, IntermediateExtendedStats, IntermediateMax,
|
IntermediateAverage, IntermediateCount, IntermediateMax, IntermediateMin, IntermediateStats,
|
||||||
IntermediateMin, IntermediateStats, IntermediateSum, PercentilesCollector, TopHitsTopNComputer,
|
IntermediateSum, PercentilesCollector, TopHitsTopNComputer,
|
||||||
};
|
};
|
||||||
use super::segment_agg_result::AggregationLimits;
|
use super::segment_agg_result::AggregationLimits;
|
||||||
use super::{format_date, AggregationError, Key, SerializedKey};
|
use super::{format_date, AggregationError, Key, SerializedKey};
|
||||||
@@ -215,9 +215,6 @@ pub(crate) fn empty_from_req(req: &Aggregation) -> IntermediateAggregationResult
|
|||||||
Stats(_) => IntermediateAggregationResult::Metric(IntermediateMetricResult::Stats(
|
Stats(_) => IntermediateAggregationResult::Metric(IntermediateMetricResult::Stats(
|
||||||
IntermediateStats::default(),
|
IntermediateStats::default(),
|
||||||
)),
|
)),
|
||||||
ExtendedStats(_) => IntermediateAggregationResult::Metric(
|
|
||||||
IntermediateMetricResult::ExtendedStats(IntermediateExtendedStats::default()),
|
|
||||||
),
|
|
||||||
Sum(_) => IntermediateAggregationResult::Metric(IntermediateMetricResult::Sum(
|
Sum(_) => IntermediateAggregationResult::Metric(IntermediateMetricResult::Sum(
|
||||||
IntermediateSum::default(),
|
IntermediateSum::default(),
|
||||||
)),
|
)),
|
||||||
@@ -225,7 +222,7 @@ pub(crate) fn empty_from_req(req: &Aggregation) -> IntermediateAggregationResult
|
|||||||
IntermediateMetricResult::Percentiles(PercentilesCollector::default()),
|
IntermediateMetricResult::Percentiles(PercentilesCollector::default()),
|
||||||
),
|
),
|
||||||
TopHits(ref req) => IntermediateAggregationResult::Metric(
|
TopHits(ref req) => IntermediateAggregationResult::Metric(
|
||||||
IntermediateMetricResult::TopHits(TopHitsTopNComputer::new(req)),
|
IntermediateMetricResult::TopHits(TopHitsTopNComputer::new(req.clone())),
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -285,8 +282,6 @@ pub enum IntermediateMetricResult {
|
|||||||
Min(IntermediateMin),
|
Min(IntermediateMin),
|
||||||
/// Intermediate stats result.
|
/// Intermediate stats result.
|
||||||
Stats(IntermediateStats),
|
Stats(IntermediateStats),
|
||||||
/// Intermediate stats result.
|
|
||||||
ExtendedStats(IntermediateExtendedStats),
|
|
||||||
/// Intermediate sum result.
|
/// Intermediate sum result.
|
||||||
Sum(IntermediateSum),
|
Sum(IntermediateSum),
|
||||||
/// Intermediate top_hits result
|
/// Intermediate top_hits result
|
||||||
@@ -311,9 +306,6 @@ impl IntermediateMetricResult {
|
|||||||
IntermediateMetricResult::Stats(intermediate_stats) => {
|
IntermediateMetricResult::Stats(intermediate_stats) => {
|
||||||
MetricResult::Stats(intermediate_stats.finalize())
|
MetricResult::Stats(intermediate_stats.finalize())
|
||||||
}
|
}
|
||||||
IntermediateMetricResult::ExtendedStats(intermediate_stats) => {
|
|
||||||
MetricResult::ExtendedStats(intermediate_stats.finalize())
|
|
||||||
}
|
|
||||||
IntermediateMetricResult::Sum(intermediate_sum) => {
|
IntermediateMetricResult::Sum(intermediate_sum) => {
|
||||||
MetricResult::Sum(intermediate_sum.finalize().into())
|
MetricResult::Sum(intermediate_sum.finalize().into())
|
||||||
}
|
}
|
||||||
@@ -354,12 +346,6 @@ impl IntermediateMetricResult {
|
|||||||
) => {
|
) => {
|
||||||
stats_left.merge_fruits(stats_right);
|
stats_left.merge_fruits(stats_right);
|
||||||
}
|
}
|
||||||
(
|
|
||||||
IntermediateMetricResult::ExtendedStats(extended_stats_left),
|
|
||||||
IntermediateMetricResult::ExtendedStats(extended_stats_right),
|
|
||||||
) => {
|
|
||||||
extended_stats_left.merge_fruits(extended_stats_right);
|
|
||||||
}
|
|
||||||
(IntermediateMetricResult::Sum(sum_left), IntermediateMetricResult::Sum(sum_right)) => {
|
(IntermediateMetricResult::Sum(sum_left), IntermediateMetricResult::Sum(sum_right)) => {
|
||||||
sum_left.merge_fruits(sum_right);
|
sum_left.merge_fruits(sum_right);
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -18,7 +18,6 @@
|
|||||||
|
|
||||||
mod average;
|
mod average;
|
||||||
mod count;
|
mod count;
|
||||||
mod extended_stats;
|
|
||||||
mod max;
|
mod max;
|
||||||
mod min;
|
mod min;
|
||||||
mod percentiles;
|
mod percentiles;
|
||||||
@@ -30,7 +29,6 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
pub use average::*;
|
pub use average::*;
|
||||||
pub use count::*;
|
pub use count::*;
|
||||||
pub use extended_stats::*;
|
|
||||||
pub use max::*;
|
pub use max::*;
|
||||||
pub use min::*;
|
pub use min::*;
|
||||||
pub use percentiles::*;
|
pub use percentiles::*;
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
use std::fmt::Debug;
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -87,15 +85,13 @@ impl Stats {
|
|||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct IntermediateStats {
|
pub struct IntermediateStats {
|
||||||
/// The number of extracted values.
|
/// The number of extracted values.
|
||||||
pub(crate) count: u64,
|
count: u64,
|
||||||
/// The sum of the extracted values.
|
/// The sum of the extracted values.
|
||||||
pub(crate) sum: f64,
|
sum: f64,
|
||||||
/// delta for sum needed for [Kahan algorithm for summation](https://en.wikipedia.org/wiki/Kahan_summation_algorithm)
|
|
||||||
pub(crate) delta: f64,
|
|
||||||
/// The min value.
|
/// The min value.
|
||||||
pub(crate) min: f64,
|
min: f64,
|
||||||
/// The max value.
|
/// The max value.
|
||||||
pub(crate) max: f64,
|
max: f64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for IntermediateStats {
|
impl Default for IntermediateStats {
|
||||||
@@ -103,7 +99,6 @@ impl Default for IntermediateStats {
|
|||||||
Self {
|
Self {
|
||||||
count: 0,
|
count: 0,
|
||||||
sum: 0.0,
|
sum: 0.0,
|
||||||
delta: 0.0,
|
|
||||||
min: f64::MAX,
|
min: f64::MAX,
|
||||||
max: f64::MIN,
|
max: f64::MIN,
|
||||||
}
|
}
|
||||||
@@ -114,13 +109,7 @@ impl IntermediateStats {
|
|||||||
/// Merges the other stats intermediate result into self.
|
/// Merges the other stats intermediate result into self.
|
||||||
pub fn merge_fruits(&mut self, other: IntermediateStats) {
|
pub fn merge_fruits(&mut self, other: IntermediateStats) {
|
||||||
self.count += other.count;
|
self.count += other.count;
|
||||||
|
self.sum += other.sum;
|
||||||
// kahan algorithm for sum
|
|
||||||
let y = other.sum - (self.delta + other.delta);
|
|
||||||
let t = self.sum + y;
|
|
||||||
self.delta = (t - self.sum) - y;
|
|
||||||
self.sum = t;
|
|
||||||
|
|
||||||
self.min = self.min.min(other.min);
|
self.min = self.min.min(other.min);
|
||||||
self.max = self.max.max(other.max);
|
self.max = self.max.max(other.max);
|
||||||
}
|
}
|
||||||
@@ -152,15 +141,9 @@ impl IntermediateStats {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub(in crate::aggregation::metric) fn collect(&mut self, value: f64) {
|
fn collect(&mut self, value: f64) {
|
||||||
self.count += 1;
|
self.count += 1;
|
||||||
|
self.sum += value;
|
||||||
// kahan algorithm for sum
|
|
||||||
let y = value - self.delta;
|
|
||||||
let t = self.sum + y;
|
|
||||||
self.delta = (t - self.sum) - y;
|
|
||||||
self.sum = t;
|
|
||||||
|
|
||||||
self.min = self.min.min(value);
|
self.min = self.min.min(value);
|
||||||
self.max = self.max.max(value);
|
self.max = self.max.max(value);
|
||||||
}
|
}
|
||||||
@@ -305,6 +288,7 @@ impl SegmentAggregationCollector for SegmentStatsCollector {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::aggregation::agg_req::{Aggregation, Aggregations};
|
use crate::aggregation::agg_req::{Aggregation, Aggregations};
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::net::Ipv6Addr;
|
use std::net::Ipv6Addr;
|
||||||
|
|
||||||
use columnar::{Column, ColumnType, ColumnarReader, DynamicColumn};
|
use columnar::{ColumnarReader, DynamicColumn};
|
||||||
use common::json_path_writer::JSON_PATH_SEGMENT_SEP_STR;
|
use common::json_path_writer::JSON_PATH_SEGMENT_SEP_STR;
|
||||||
use common::DateTime;
|
use common::DateTime;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
@@ -131,8 +131,8 @@ impl<'de> Deserialize<'de> for KeyOrder {
|
|||||||
))?;
|
))?;
|
||||||
if key_order.next().is_some() {
|
if key_order.next().is_some() {
|
||||||
return Err(serde::de::Error::custom(format!(
|
return Err(serde::de::Error::custom(format!(
|
||||||
"Expected exactly one key-value pair in sort parameter of top_hits, found \
|
"Expected exactly one key-value pair in sort parameter of top_hits, found {:?}",
|
||||||
{key_order:?}"
|
key_order
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
Ok(Self { field, order })
|
Ok(Self { field, order })
|
||||||
@@ -144,22 +144,27 @@ fn globbed_string_to_regex(glob: &str) -> Result<Regex, crate::TantivyError> {
|
|||||||
// Replace `*` glob with `.*` regex
|
// Replace `*` glob with `.*` regex
|
||||||
let sanitized = format!("^{}$", regex::escape(glob).replace(r"\*", ".*"));
|
let sanitized = format!("^{}$", regex::escape(glob).replace(r"\*", ".*"));
|
||||||
Regex::new(&sanitized.replace('*', ".*")).map_err(|e| {
|
Regex::new(&sanitized.replace('*', ".*")).map_err(|e| {
|
||||||
crate::TantivyError::SchemaError(format!("Invalid regex '{glob}' in docvalue_fields: {e}"))
|
crate::TantivyError::SchemaError(format!(
|
||||||
|
"Invalid regex '{}' in docvalue_fields: {}",
|
||||||
|
glob, e
|
||||||
|
))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn use_doc_value_fields_err(parameter: &str) -> crate::Result<()> {
|
fn use_doc_value_fields_err(parameter: &str) -> crate::Result<()> {
|
||||||
Err(crate::TantivyError::AggregationError(
|
Err(crate::TantivyError::AggregationError(
|
||||||
AggregationError::InvalidRequest(format!(
|
AggregationError::InvalidRequest(format!(
|
||||||
"The `{parameter}` parameter is not supported, only `docvalue_fields` is supported in \
|
"The `{}` parameter is not supported, only `docvalue_fields` is supported in \
|
||||||
`top_hits` aggregation"
|
`top_hits` aggregation",
|
||||||
|
parameter
|
||||||
)),
|
)),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
fn unsupported_err(parameter: &str) -> crate::Result<()> {
|
fn unsupported_err(parameter: &str) -> crate::Result<()> {
|
||||||
Err(crate::TantivyError::AggregationError(
|
Err(crate::TantivyError::AggregationError(
|
||||||
AggregationError::InvalidRequest(format!(
|
AggregationError::InvalidRequest(format!(
|
||||||
"The `{parameter}` parameter is not supported in the `top_hits` aggregation"
|
"The `{}` parameter is not supported in the `top_hits` aggregation",
|
||||||
|
parameter
|
||||||
)),
|
)),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@@ -212,7 +217,8 @@ impl TopHitsAggregation {
|
|||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
assert!(
|
assert!(
|
||||||
!fields.is_empty(),
|
!fields.is_empty(),
|
||||||
"No fields matched the glob '{field}' in docvalue_fields"
|
"No fields matched the glob '{}' in docvalue_fields",
|
||||||
|
field
|
||||||
);
|
);
|
||||||
Ok(fields)
|
Ok(fields)
|
||||||
})
|
})
|
||||||
@@ -248,7 +254,7 @@ impl TopHitsAggregation {
|
|||||||
.map(|field| {
|
.map(|field| {
|
||||||
let accessors = accessors
|
let accessors = accessors
|
||||||
.get(field)
|
.get(field)
|
||||||
.unwrap_or_else(|| panic!("field '{field}' not found in accessors"));
|
.unwrap_or_else(|| panic!("field '{}' not found in accessors", field));
|
||||||
|
|
||||||
let values: Vec<FastFieldValue> = accessors
|
let values: Vec<FastFieldValue> = accessors
|
||||||
.iter()
|
.iter()
|
||||||
@@ -443,10 +449,10 @@ impl std::cmp::PartialEq for TopHitsTopNComputer {
|
|||||||
|
|
||||||
impl TopHitsTopNComputer {
|
impl TopHitsTopNComputer {
|
||||||
/// Create a new TopHitsCollector
|
/// Create a new TopHitsCollector
|
||||||
pub fn new(req: &TopHitsAggregation) -> Self {
|
pub fn new(req: TopHitsAggregation) -> Self {
|
||||||
Self {
|
Self {
|
||||||
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
|
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
|
||||||
req: req.clone(),
|
req,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -491,6 +497,7 @@ impl TopHitsTopNComputer {
|
|||||||
pub(crate) struct TopHitsSegmentCollector {
|
pub(crate) struct TopHitsSegmentCollector {
|
||||||
segment_ordinal: SegmentOrdinal,
|
segment_ordinal: SegmentOrdinal,
|
||||||
accessor_idx: usize,
|
accessor_idx: usize,
|
||||||
|
req: TopHitsAggregation,
|
||||||
top_n: TopNComputer<Vec<DocValueAndOrder>, DocAddress, false>,
|
top_n: TopNComputer<Vec<DocValueAndOrder>, DocAddress, false>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -501,6 +508,7 @@ impl TopHitsSegmentCollector {
|
|||||||
segment_ordinal: SegmentOrdinal,
|
segment_ordinal: SegmentOrdinal,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
req: req.clone(),
|
||||||
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
|
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
|
||||||
segment_ordinal,
|
segment_ordinal,
|
||||||
accessor_idx,
|
accessor_idx,
|
||||||
@@ -509,13 +517,14 @@ impl TopHitsSegmentCollector {
|
|||||||
fn into_top_hits_collector(
|
fn into_top_hits_collector(
|
||||||
self,
|
self,
|
||||||
value_accessors: &HashMap<String, Vec<DynamicColumn>>,
|
value_accessors: &HashMap<String, Vec<DynamicColumn>>,
|
||||||
req: &TopHitsAggregation,
|
|
||||||
) -> TopHitsTopNComputer {
|
) -> TopHitsTopNComputer {
|
||||||
let mut top_hits_computer = TopHitsTopNComputer::new(req);
|
let mut top_hits_computer = TopHitsTopNComputer::new(self.req.clone());
|
||||||
let top_results = self.top_n.into_vec();
|
let top_results = self.top_n.into_vec();
|
||||||
|
|
||||||
for res in top_results {
|
for res in top_results {
|
||||||
let doc_value_fields = req.get_document_field_data(value_accessors, res.doc.doc_id);
|
let doc_value_fields = self
|
||||||
|
.req
|
||||||
|
.get_document_field_data(value_accessors, res.doc.doc_id);
|
||||||
top_hits_computer.collect(
|
top_hits_computer.collect(
|
||||||
DocSortValuesAndFields {
|
DocSortValuesAndFields {
|
||||||
sorts: res.feature,
|
sorts: res.feature,
|
||||||
@@ -527,15 +536,34 @@ impl TopHitsSegmentCollector {
|
|||||||
|
|
||||||
top_hits_computer
|
top_hits_computer
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// TODO add a specialized variant for a single sort field
|
impl SegmentAggregationCollector for TopHitsSegmentCollector {
|
||||||
fn collect_with(
|
fn add_intermediate_aggregation_result(
|
||||||
|
self: Box<Self>,
|
||||||
|
agg_with_accessor: &crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||||
|
results: &mut crate::aggregation::intermediate_agg_result::IntermediateAggregationResults,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||||
|
|
||||||
|
let value_accessors = &agg_with_accessor.aggs.values[self.accessor_idx].value_accessors;
|
||||||
|
|
||||||
|
let intermediate_result =
|
||||||
|
IntermediateMetricResult::TopHits(self.into_top_hits_collector(value_accessors));
|
||||||
|
results.push(
|
||||||
|
name,
|
||||||
|
IntermediateAggregationResult::Metric(intermediate_result),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect(
|
||||||
&mut self,
|
&mut self,
|
||||||
doc_id: crate::DocId,
|
doc_id: crate::DocId,
|
||||||
req: &TopHitsAggregation,
|
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||||
accessors: &[(Column<u64>, ColumnType)],
|
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let sorts: Vec<DocValueAndOrder> = req
|
let accessors = &agg_with_accessor.aggs.values[self.accessor_idx].accessors;
|
||||||
|
let sorts: Vec<DocValueAndOrder> = self
|
||||||
|
.req
|
||||||
.sort
|
.sort
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
@@ -560,62 +588,15 @@ impl TopHitsSegmentCollector {
|
|||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl SegmentAggregationCollector for TopHitsSegmentCollector {
|
|
||||||
fn add_intermediate_aggregation_result(
|
|
||||||
self: Box<Self>,
|
|
||||||
agg_with_accessor: &crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
|
||||||
results: &mut crate::aggregation::intermediate_agg_result::IntermediateAggregationResults,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
|
||||||
|
|
||||||
let value_accessors = &agg_with_accessor.aggs.values[self.accessor_idx].value_accessors;
|
|
||||||
let tophits_req = &agg_with_accessor.aggs.values[self.accessor_idx]
|
|
||||||
.agg
|
|
||||||
.agg
|
|
||||||
.as_top_hits()
|
|
||||||
.expect("aggregation request must be of type top hits");
|
|
||||||
|
|
||||||
let intermediate_result = IntermediateMetricResult::TopHits(
|
|
||||||
self.into_top_hits_collector(value_accessors, tophits_req),
|
|
||||||
);
|
|
||||||
results.push(
|
|
||||||
name,
|
|
||||||
IntermediateAggregationResult::Metric(intermediate_result),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO: Consider a caching layer to reduce the call overhead
|
|
||||||
fn collect(
|
|
||||||
&mut self,
|
|
||||||
doc_id: crate::DocId,
|
|
||||||
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let tophits_req = &agg_with_accessor.aggs.values[self.accessor_idx]
|
|
||||||
.agg
|
|
||||||
.agg
|
|
||||||
.as_top_hits()
|
|
||||||
.expect("aggregation request must be of type top hits");
|
|
||||||
let accessors = &agg_with_accessor.aggs.values[self.accessor_idx].accessors;
|
|
||||||
self.collect_with(doc_id, tophits_req, accessors)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn collect_block(
|
fn collect_block(
|
||||||
&mut self,
|
&mut self,
|
||||||
docs: &[crate::DocId],
|
docs: &[crate::DocId],
|
||||||
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let tophits_req = &agg_with_accessor.aggs.values[self.accessor_idx]
|
|
||||||
.agg
|
|
||||||
.agg
|
|
||||||
.as_top_hits()
|
|
||||||
.expect("aggregation request must be of type top hits");
|
|
||||||
let accessors = &agg_with_accessor.aggs.values[self.accessor_idx].accessors;
|
|
||||||
// TODO: Consider getting fields with the column block accessor.
|
// TODO: Consider getting fields with the column block accessor.
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
self.collect_with(*doc, tophits_req, accessors)?;
|
self.collect(*doc, agg_with_accessor)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -158,14 +158,15 @@ use serde::de::{self, Visitor};
|
|||||||
use serde::{Deserialize, Deserializer, Serialize};
|
use serde::{Deserialize, Deserializer, Serialize};
|
||||||
|
|
||||||
fn parse_str_into_f64<E: de::Error>(value: &str) -> Result<f64, E> {
|
fn parse_str_into_f64<E: de::Error>(value: &str) -> Result<f64, E> {
|
||||||
let parsed = value
|
let parsed = value.parse::<f64>().map_err(|_err| {
|
||||||
.parse::<f64>()
|
de::Error::custom(format!("Failed to parse f64 from string: {:?}", value))
|
||||||
.map_err(|_err| de::Error::custom(format!("Failed to parse f64 from string: {value:?}")))?;
|
})?;
|
||||||
|
|
||||||
// Check if the parsed value is NaN or infinity
|
// Check if the parsed value is NaN or infinity
|
||||||
if parsed.is_nan() || parsed.is_infinite() {
|
if parsed.is_nan() || parsed.is_infinite() {
|
||||||
Err(de::Error::custom(format!(
|
Err(de::Error::custom(format!(
|
||||||
"Value is not a valid f64 (NaN or Infinity): {value:?}"
|
"Value is not a valid f64 (NaN or Infinity): {:?}",
|
||||||
|
value
|
||||||
)))
|
)))
|
||||||
} else {
|
} else {
|
||||||
Ok(parsed)
|
Ok(parsed)
|
||||||
|
|||||||
@@ -11,12 +11,12 @@ use super::agg_req_with_accessor::{AggregationWithAccessor, AggregationsWithAcce
|
|||||||
use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector, SegmentTermCollector};
|
use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector, SegmentTermCollector};
|
||||||
use super::intermediate_agg_result::IntermediateAggregationResults;
|
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||||
use super::metric::{
|
use super::metric::{
|
||||||
AverageAggregation, CountAggregation, ExtendedStatsAggregation, MaxAggregation, MinAggregation,
|
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation,
|
||||||
SegmentPercentilesCollector, SegmentStatsCollector, SegmentStatsType, StatsAggregation,
|
SegmentPercentilesCollector, SegmentStatsCollector, SegmentStatsType, StatsAggregation,
|
||||||
SumAggregation,
|
SumAggregation,
|
||||||
};
|
};
|
||||||
use crate::aggregation::bucket::TermMissingAgg;
|
use crate::aggregation::bucket::TermMissingAgg;
|
||||||
use crate::aggregation::metric::{SegmentExtendedStatsCollector, TopHitsSegmentCollector};
|
use crate::aggregation::metric::TopHitsSegmentCollector;
|
||||||
|
|
||||||
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
|
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
|
||||||
fn add_intermediate_aggregation_result(
|
fn add_intermediate_aggregation_result(
|
||||||
@@ -148,9 +148,6 @@ pub(crate) fn build_single_agg_segment_collector(
|
|||||||
accessor_idx,
|
accessor_idx,
|
||||||
*missing,
|
*missing,
|
||||||
))),
|
))),
|
||||||
ExtendedStats(ExtendedStatsAggregation { missing, sigma, .. }) => Ok(Box::new(
|
|
||||||
SegmentExtendedStatsCollector::from_req(req.field_type, *sigma, accessor_idx, *missing),
|
|
||||||
)),
|
|
||||||
Sum(SumAggregation { missing, .. }) => Ok(Box::new(SegmentStatsCollector::from_req(
|
Sum(SumAggregation { missing, .. }) => Ok(Box::new(SegmentStatsCollector::from_req(
|
||||||
req.field_type,
|
req.field_type,
|
||||||
SegmentStatsType::Sum,
|
SegmentStatsType::Sum,
|
||||||
|
|||||||
@@ -598,7 +598,7 @@ mod tests {
|
|||||||
let mid = n % 4;
|
let mid = n % 4;
|
||||||
n /= 4;
|
n /= 4;
|
||||||
let leaf = n % 5;
|
let leaf = n % 5;
|
||||||
Facet::from(&format!("/top{top}/mid{mid}/leaf{leaf}"))
|
Facet::from(&format!("/top{}/mid{}/leaf{}", top, mid, leaf))
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
for i in 0..num_facets * 10 {
|
for i in 0..num_facets * 10 {
|
||||||
@@ -737,7 +737,7 @@ mod tests {
|
|||||||
vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
|
vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.flat_map(|(c, count)| {
|
.flat_map(|(c, count)| {
|
||||||
let facet = Facet::from(&format!("/facet/{c}"));
|
let facet = Facet::from(&format!("/facet/{}", c));
|
||||||
let doc = doc!(facet_field => facet);
|
let doc = doc!(facet_field => facet);
|
||||||
iter::repeat(doc).take(count)
|
iter::repeat(doc).take(count)
|
||||||
})
|
})
|
||||||
@@ -785,7 +785,7 @@ mod tests {
|
|||||||
let docs: Vec<TantivyDocument> = vec![("b", 2), ("a", 2), ("c", 4)]
|
let docs: Vec<TantivyDocument> = vec![("b", 2), ("a", 2), ("c", 4)]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.flat_map(|(c, count)| {
|
.flat_map(|(c, count)| {
|
||||||
let facet = Facet::from(&format!("/facet/{c}"));
|
let facet = Facet::from(&format!("/facet/{}", c));
|
||||||
let doc = doc!(facet_field => facet);
|
let doc = doc!(facet_field => facet);
|
||||||
iter::repeat(doc).take(count)
|
iter::repeat(doc).take(count)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -871,10 +871,7 @@ mod tests {
|
|||||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||||
use crate::time::format_description::well_known::Rfc3339;
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
use crate::time::OffsetDateTime;
|
use crate::time::OffsetDateTime;
|
||||||
use crate::{
|
use crate::{DateTime, DocAddress, DocId, Index, IndexWriter, Order, Score, SegmentReader};
|
||||||
assert_nearly_equals, DateTime, DocAddress, DocId, Index, IndexWriter, Order, Score,
|
|
||||||
SegmentReader,
|
|
||||||
};
|
|
||||||
|
|
||||||
fn make_index() -> crate::Result<Index> {
|
fn make_index() -> crate::Result<Index> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|||||||
@@ -184,56 +184,28 @@ mod tests {
|
|||||||
fn test_cancel_cpu_intensive_tasks() {
|
fn test_cancel_cpu_intensive_tasks() {
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
let counter: Arc<AtomicU64> = Default::default();
|
let counter: Arc<AtomicU64> = Default::default();
|
||||||
|
|
||||||
let other_counter: Arc<AtomicU64> = Default::default();
|
|
||||||
|
|
||||||
let mut futures = Vec::new();
|
let mut futures = Vec::new();
|
||||||
let mut other_futures = Vec::new();
|
|
||||||
|
|
||||||
let (tx, rx) = crossbeam_channel::bounded::<()>(0);
|
|
||||||
let rx = Arc::new(rx);
|
|
||||||
let executor = Executor::multi_thread(3, "search-test").unwrap();
|
let executor = Executor::multi_thread(3, "search-test").unwrap();
|
||||||
for _ in 0..1000 {
|
for _ in 0..1_000 {
|
||||||
let counter_clone: Arc<AtomicU64> = counter.clone();
|
let counter_clone = counter.clone();
|
||||||
let other_counter_clone: Arc<AtomicU64> = other_counter.clone();
|
|
||||||
|
|
||||||
let rx_clone = rx.clone();
|
|
||||||
let rx_clone2 = rx.clone();
|
|
||||||
let fut = executor.spawn_blocking(move || {
|
let fut = executor.spawn_blocking(move || {
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
std::thread::sleep(Duration::from_millis(4));
|
||||||
let _ = rx_clone.recv();
|
counter_clone.fetch_add(1, Ordering::SeqCst)
|
||||||
});
|
});
|
||||||
futures.push(fut);
|
futures.push(fut);
|
||||||
let other_fut = executor.spawn_blocking(move || {
|
|
||||||
other_counter_clone.fetch_add(1, Ordering::SeqCst);
|
|
||||||
let _ = rx_clone2.recv();
|
|
||||||
});
|
|
||||||
other_futures.push(other_fut);
|
|
||||||
}
|
}
|
||||||
|
std::thread::sleep(Duration::from_millis(5));
|
||||||
// We execute 100 futures.
|
// The first few num_cores tasks should run, but the other should get cancelled.
|
||||||
for _ in 0..100 {
|
drop(futures);
|
||||||
tx.send(()).unwrap();
|
while Arc::strong_count(&counter) > 1 {
|
||||||
|
std::thread::sleep(Duration::from_millis(10));
|
||||||
}
|
}
|
||||||
|
// with ideal timing, we expect the result to always be 6, but as long as we run some, and
|
||||||
let counter_val = counter.load(Ordering::SeqCst);
|
// cancelled most, the test is a success
|
||||||
let other_counter_val = other_counter.load(Ordering::SeqCst);
|
assert!(counter.load(Ordering::SeqCst) > 0);
|
||||||
assert!(counter_val >= 30);
|
assert!(counter.load(Ordering::SeqCst) < 50);
|
||||||
assert!(other_counter_val >= 30);
|
|
||||||
|
|
||||||
drop(other_futures);
|
|
||||||
|
|
||||||
// We execute 100 futures.
|
|
||||||
for _ in 0..100 {
|
|
||||||
tx.send(()).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let counter_val2 = counter.load(Ordering::SeqCst);
|
|
||||||
assert!(counter_val2 >= counter_val + 100 - 6);
|
|
||||||
|
|
||||||
let other_counter_val2 = other_counter.load(Ordering::SeqCst);
|
|
||||||
assert!(other_counter_val2 <= other_counter_val + 6);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -338,14 +338,14 @@ mod tests {
|
|||||||
let mut term = Term::from_field_json_path(field, "attributes.color", false);
|
let mut term = Term::from_field_json_path(field, "attributes.color", false);
|
||||||
term.append_type_and_str("red");
|
term.append_type_and_str("red");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{term:?}"),
|
format!("{:?}", term),
|
||||||
"Term(field=1, type=Json, path=attributes.color, type=Str, \"red\")"
|
"Term(field=1, type=Json, path=attributes.color, type=Str, \"red\")"
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut term = Term::from_field_json_path(field, "attributes.dimensions.width", false);
|
let mut term = Term::from_field_json_path(field, "attributes.dimensions.width", false);
|
||||||
term.append_type_and_fast_value(400i64);
|
term.append_type_and_fast_value(400i64);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{term:?}"),
|
format!("{:?}", term),
|
||||||
"Term(field=1, type=Json, path=attributes.dimensions.width, type=I64, 400)"
|
"Term(field=1, type=Json, path=attributes.dimensions.width, type=I64, 400)"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -566,7 +566,7 @@ mod tests {
|
|||||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
let num_paths = 10;
|
let num_paths = 10;
|
||||||
let paths: Vec<PathBuf> = (0..num_paths)
|
let paths: Vec<PathBuf> = (0..num_paths)
|
||||||
.map(|i| PathBuf::from(&*format!("file_{i}")))
|
.map(|i| PathBuf::from(&*format!("file_{}", i)))
|
||||||
.collect();
|
.collect();
|
||||||
{
|
{
|
||||||
for path in &paths {
|
for path in &paths {
|
||||||
|
|||||||
@@ -62,7 +62,8 @@ impl FacetReader {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::schema::{Facet, FacetOptions, SchemaBuilder, Value, STORED};
|
use crate::schema::document::Value;
|
||||||
|
use crate::schema::{Facet, FacetOptions, SchemaBuilder, STORED};
|
||||||
use crate::{DocAddress, Index, IndexWriter, TantivyDocument};
|
use crate::{DocAddress, Index, IndexWriter, TantivyDocument};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -88,9 +89,7 @@ mod tests {
|
|||||||
let doc = searcher
|
let doc = searcher
|
||||||
.doc::<TantivyDocument>(DocAddress::new(0u32, 0u32))
|
.doc::<TantivyDocument>(DocAddress::new(0u32, 0u32))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let value = doc
|
let value = doc.get_first(facet_field).and_then(|v| v.as_facet());
|
||||||
.get_first(facet_field)
|
|
||||||
.and_then(|v| v.as_value().as_facet());
|
|
||||||
assert_eq!(value, None);
|
assert_eq!(value, None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -252,8 +252,9 @@ impl IndexBuilder {
|
|||||||
let field_type = entry.field_type().value_type();
|
let field_type = entry.field_type().value_type();
|
||||||
if !supported_field_types.contains(&field_type) {
|
if !supported_field_types.contains(&field_type) {
|
||||||
return Err(TantivyError::InvalidArgument(format!(
|
return Err(TantivyError::InvalidArgument(format!(
|
||||||
"Unsupported field type in sort_by_field: {field_type:?}. Supported field \
|
"Unsupported field type in sort_by_field: {:?}. Supported field types: \
|
||||||
types: {supported_field_types:?} ",
|
{:?} ",
|
||||||
|
field_type, supported_field_types,
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -318,14 +318,14 @@ impl SegmentReader {
|
|||||||
if create_canonical {
|
if create_canonical {
|
||||||
// Without expand dots enabled dots need to be escaped.
|
// Without expand dots enabled dots need to be escaped.
|
||||||
let escaped_json_path = json_path.replace('.', "\\.");
|
let escaped_json_path = json_path.replace('.', "\\.");
|
||||||
let full_path = format!("{field_name}.{escaped_json_path}");
|
let full_path = format!("{}.{}", field_name, escaped_json_path);
|
||||||
let full_path_unescaped = format!("{}.{}", field_name, &json_path);
|
let full_path_unescaped = format!("{}.{}", field_name, &json_path);
|
||||||
map_to_canonical.insert(full_path_unescaped, full_path.to_string());
|
map_to_canonical.insert(full_path_unescaped, full_path.to_string());
|
||||||
full_path
|
full_path
|
||||||
} else {
|
} else {
|
||||||
// With expand dots enabled, we can use '.' instead of '\u{1}'.
|
// With expand dots enabled, we can use '.' instead of '\u{1}'.
|
||||||
json_path_sep_to_dot(&mut json_path);
|
json_path_sep_to_dot(&mut json_path);
|
||||||
format!("{field_name}.{json_path}")
|
format!("{}.{}", field_name, json_path)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
indexed_fields.extend(
|
indexed_fields.extend(
|
||||||
|
|||||||
@@ -306,10 +306,12 @@ mod tests_indexsorting {
|
|||||||
let my_string_field = index.schema().get_field("string_field").unwrap();
|
let my_string_field = index.schema().get_field("string_field").unwrap();
|
||||||
let searcher = index.reader()?.searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
{
|
{
|
||||||
assert!(searcher
|
assert_eq!(
|
||||||
.doc::<TantivyDocument>(DocAddress::new(0, 0))?
|
searcher
|
||||||
.get_first(my_string_field)
|
.doc::<TantivyDocument>(DocAddress::new(0, 0))?
|
||||||
.is_none());
|
.get_first(my_string_field),
|
||||||
|
None
|
||||||
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
searcher
|
searcher
|
||||||
.doc::<TantivyDocument>(DocAddress::new(0, 3))?
|
.doc::<TantivyDocument>(DocAddress::new(0, 3))?
|
||||||
@@ -342,7 +344,7 @@ mod tests_indexsorting {
|
|||||||
Some("blublub")
|
Some("blublub")
|
||||||
);
|
);
|
||||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 4))?;
|
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 4))?;
|
||||||
assert!(doc.get_first(my_string_field).is_none());
|
assert_eq!(doc.get_first(my_string_field), None);
|
||||||
}
|
}
|
||||||
// sort by field desc
|
// sort by field desc
|
||||||
let index = create_test_index(
|
let index = create_test_index(
|
||||||
|
|||||||
@@ -814,9 +814,10 @@ mod tests {
|
|||||||
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
|
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::{BooleanQuery, Occur, Query, QueryParser, TermQuery};
|
use crate::query::{BooleanQuery, Occur, Query, QueryParser, TermQuery};
|
||||||
|
use crate::schema::document::Value;
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
self, Facet, FacetOptions, IndexRecordOption, IpAddrOptions, NumericOptions, Schema,
|
self, Facet, FacetOptions, IndexRecordOption, IpAddrOptions, NumericOptions, Schema,
|
||||||
TextFieldIndexing, TextOptions, Value, FAST, INDEXED, STORED, STRING, TEXT,
|
TextFieldIndexing, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
|
||||||
};
|
};
|
||||||
use crate::store::DOCSTORE_CACHE_CAPACITY;
|
use crate::store::DOCSTORE_CACHE_CAPACITY;
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -1979,13 +1980,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
// test store iterator
|
// test store iterator
|
||||||
for doc in store_reader.iter::<TantivyDocument>(segment_reader.alive_bitset()) {
|
for doc in store_reader.iter::<TantivyDocument>(segment_reader.alive_bitset()) {
|
||||||
let id = doc
|
let id = doc.unwrap().get_first(id_field).unwrap().as_u64().unwrap();
|
||||||
.unwrap()
|
|
||||||
.get_first(id_field)
|
|
||||||
.unwrap()
|
|
||||||
.as_value()
|
|
||||||
.as_u64()
|
|
||||||
.unwrap();
|
|
||||||
assert!(expected_ids_and_num_occurrences.contains_key(&id));
|
assert!(expected_ids_and_num_occurrences.contains_key(&id));
|
||||||
}
|
}
|
||||||
// test store random access
|
// test store random access
|
||||||
@@ -2018,7 +2013,7 @@ mod tests {
|
|||||||
let mut bool2 = doc.get_all(multi_bools);
|
let mut bool2 = doc.get_all(multi_bools);
|
||||||
assert_eq!(bool, bool2.next().unwrap().as_bool().unwrap());
|
assert_eq!(bool, bool2.next().unwrap().as_bool().unwrap());
|
||||||
assert_ne!(bool, bool2.next().unwrap().as_bool().unwrap());
|
assert_ne!(bool, bool2.next().unwrap().as_bool().unwrap());
|
||||||
assert!(bool2.next().is_none())
|
assert_eq!(None, bool2.next())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -787,8 +787,6 @@ impl IndexMerger {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use columnar::Column;
|
use columnar::Column;
|
||||||
use proptest::prop_oneof;
|
|
||||||
use proptest::strategy::Strategy;
|
|
||||||
use schema::FAST;
|
use schema::FAST;
|
||||||
|
|
||||||
use crate::collector::tests::{
|
use crate::collector::tests::{
|
||||||
@@ -796,11 +794,11 @@ mod tests {
|
|||||||
};
|
};
|
||||||
use crate::collector::{Count, FacetCollector};
|
use crate::collector::{Count, FacetCollector};
|
||||||
use crate::index::{Index, SegmentId};
|
use crate::index::{Index, SegmentId};
|
||||||
use crate::indexer::NoMergePolicy;
|
|
||||||
use crate::query::{AllQuery, BooleanQuery, EnableScoring, Scorer, TermQuery};
|
use crate::query::{AllQuery, BooleanQuery, EnableScoring, Scorer, TermQuery};
|
||||||
|
use crate::schema::document::Value;
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
Facet, FacetOptions, IndexRecordOption, NumericOptions, TantivyDocument, Term,
|
Facet, FacetOptions, IndexRecordOption, NumericOptions, TantivyDocument, Term,
|
||||||
TextFieldIndexing, Value, INDEXED, TEXT,
|
TextFieldIndexing, INDEXED, TEXT,
|
||||||
};
|
};
|
||||||
use crate::time::OffsetDateTime;
|
use crate::time::OffsetDateTime;
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -912,24 +910,15 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 0))?;
|
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 0))?;
|
||||||
assert_eq!(
|
assert_eq!(doc.get_first(text_field).unwrap().as_str(), Some("af b"));
|
||||||
doc.get_first(text_field).unwrap().as_value().as_str(),
|
|
||||||
Some("af b")
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 1))?;
|
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 1))?;
|
||||||
assert_eq!(
|
assert_eq!(doc.get_first(text_field).unwrap().as_str(), Some("a b c"));
|
||||||
doc.get_first(text_field).unwrap().as_value().as_str(),
|
|
||||||
Some("a b c")
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 2))?;
|
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 2))?;
|
||||||
assert_eq!(
|
assert_eq!(doc.get_first(text_field).unwrap().as_str(), Some("a b c d"));
|
||||||
doc.get_first(text_field).unwrap().as_value().as_str(),
|
|
||||||
Some("a b c d")
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 3))?;
|
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 3))?;
|
||||||
@@ -1534,112 +1523,6 @@ mod tests {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
|
||||||
enum IndexingOp {
|
|
||||||
ZeroVal,
|
|
||||||
OneVal { val: u64 },
|
|
||||||
TwoVal { val: u64 },
|
|
||||||
Commit,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn balanced_operation_strategy() -> impl Strategy<Value = IndexingOp> {
|
|
||||||
prop_oneof![
|
|
||||||
(0u64..1u64).prop_map(|_| IndexingOp::ZeroVal),
|
|
||||||
(0u64..1u64).prop_map(|val| IndexingOp::OneVal { val }),
|
|
||||||
(0u64..1u64).prop_map(|val| IndexingOp::TwoVal { val }),
|
|
||||||
(0u64..1u64).prop_map(|_| IndexingOp::Commit),
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
use proptest::prelude::*;
|
|
||||||
proptest! {
|
|
||||||
#[test]
|
|
||||||
fn test_merge_columnar_int_proptest(ops in proptest::collection::vec(balanced_operation_strategy(), 1..20)) {
|
|
||||||
assert!(test_merge_int_fields(&ops[..]).is_ok());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn test_merge_int_fields(ops: &[IndexingOp]) -> crate::Result<()> {
|
|
||||||
if ops.iter().all(|op| *op == IndexingOp::Commit) {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
let expected_doc_and_vals: Vec<(u32, Vec<u64>)> = ops
|
|
||||||
.iter()
|
|
||||||
.filter(|op| *op != &IndexingOp::Commit)
|
|
||||||
.map(|op| match op {
|
|
||||||
IndexingOp::ZeroVal => vec![],
|
|
||||||
IndexingOp::OneVal { val } => vec![*val],
|
|
||||||
IndexingOp::TwoVal { val } => vec![*val, *val],
|
|
||||||
IndexingOp::Commit => unreachable!(),
|
|
||||||
})
|
|
||||||
.enumerate()
|
|
||||||
.map(|(id, val)| (id as u32, val))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut schema_builder = schema::Schema::builder();
|
|
||||||
let int_options = NumericOptions::default().set_fast().set_indexed();
|
|
||||||
let int_field = schema_builder.add_u64_field("intvals", int_options);
|
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
|
||||||
{
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
|
||||||
let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| {
|
|
||||||
let mut doc = TantivyDocument::default();
|
|
||||||
for &val in int_vals {
|
|
||||||
doc.add_u64(int_field, val);
|
|
||||||
}
|
|
||||||
index_writer.add_document(doc).unwrap();
|
|
||||||
};
|
|
||||||
|
|
||||||
for op in ops {
|
|
||||||
match op {
|
|
||||||
IndexingOp::ZeroVal => index_doc(&mut index_writer, &[]),
|
|
||||||
IndexingOp::OneVal { val } => index_doc(&mut index_writer, &[*val]),
|
|
||||||
IndexingOp::TwoVal { val } => index_doc(&mut index_writer, &[*val, *val]),
|
|
||||||
IndexingOp::Commit => {
|
|
||||||
index_writer.commit().expect("commit failed");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
index_writer.commit().expect("commit failed");
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let mut segment_ids = index.searchable_segment_ids()?;
|
|
||||||
segment_ids.sort();
|
|
||||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
|
||||||
index_writer.merge(&segment_ids).wait()?;
|
|
||||||
index_writer.wait_merging_threads()?;
|
|
||||||
}
|
|
||||||
let reader = index.reader()?;
|
|
||||||
reader.reload()?;
|
|
||||||
|
|
||||||
let mut vals: Vec<u64> = Vec::new();
|
|
||||||
let mut test_vals = move |col: &Column<u64>, doc: DocId, expected: &[u64]| {
|
|
||||||
vals.clear();
|
|
||||||
vals.extend(col.values_for_doc(doc));
|
|
||||||
assert_eq!(&vals[..], expected);
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut test_col = move |col: &Column<u64>, column_expected: &[(u32, Vec<u64>)]| {
|
|
||||||
for (doc_id, vals) in column_expected.iter() {
|
|
||||||
test_vals(col, *doc_id, vals);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
{
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
let segment = searcher.segment_reader(0u32);
|
|
||||||
let col = segment
|
|
||||||
.fast_fields()
|
|
||||||
.column_opt::<u64>("intvals")
|
|
||||||
.unwrap()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
test_col(&col, &expected_doc_and_vals);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_merge_multivalued_int_fields_simple() -> crate::Result<()> {
|
fn test_merge_multivalued_int_fields_simple() -> crate::Result<()> {
|
||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
|
|||||||
@@ -5,9 +5,10 @@ mod tests {
|
|||||||
use crate::index::Index;
|
use crate::index::Index;
|
||||||
use crate::postings::Postings;
|
use crate::postings::Postings;
|
||||||
use crate::query::QueryParser;
|
use crate::query::QueryParser;
|
||||||
|
use crate::schema::document::Value;
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
self, BytesOptions, Facet, FacetOptions, IndexRecordOption, NumericOptions,
|
self, BytesOptions, Facet, FacetOptions, IndexRecordOption, NumericOptions,
|
||||||
TextFieldIndexing, TextOptions, Value,
|
TextFieldIndexing, TextOptions,
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
DocAddress, DocSet, IndexSettings, IndexSortByField, IndexWriter, Order, TantivyDocument,
|
DocAddress, DocSet, IndexSettings, IndexSortByField, IndexWriter, Order, TantivyDocument,
|
||||||
@@ -280,16 +281,13 @@ mod tests {
|
|||||||
.doc::<TantivyDocument>(DocAddress::new(0, blubber_pos))
|
.doc::<TantivyDocument>(DocAddress::new(0, blubber_pos))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
doc.get_first(my_text_field).unwrap().as_value().as_str(),
|
doc.get_first(my_text_field).unwrap().as_str(),
|
||||||
Some("blubber")
|
Some("blubber")
|
||||||
);
|
);
|
||||||
let doc = searcher
|
let doc = searcher
|
||||||
.doc::<TantivyDocument>(DocAddress::new(0, 0))
|
.doc::<TantivyDocument>(DocAddress::new(0, 0))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_eq!(doc.get_first(int_field).unwrap().as_u64(), Some(1000));
|
||||||
doc.get_first(int_field).unwrap().as_value().as_u64(),
|
|
||||||
Some(1000)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -216,7 +216,7 @@ mod tests_mmap {
|
|||||||
let test_query = |query_str: &str| {
|
let test_query = |query_str: &str| {
|
||||||
let query = parse_query.parse_query(query_str).unwrap();
|
let query = parse_query.parse_query(query_str).unwrap();
|
||||||
let num_docs = searcher.search(&query, &Count).unwrap();
|
let num_docs = searcher.search(&query, &Count).unwrap();
|
||||||
assert_eq!(num_docs, 1, "{query_str}");
|
assert_eq!(num_docs, 1, "{}", query_str);
|
||||||
};
|
};
|
||||||
test_query(format!("json.{field_name_out}:test1").as_str());
|
test_query(format!("json.{field_name_out}:test1").as_str());
|
||||||
test_query(format!("json.a{field_name_out}:test2").as_str());
|
test_query(format!("json.a{field_name_out}:test2").as_str());
|
||||||
@@ -590,10 +590,10 @@ mod tests_mmap {
|
|||||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||||
// Test if field name can be queried
|
// Test if field name can be queried
|
||||||
for (indexed_field, val) in fields_and_vals.iter() {
|
for (indexed_field, val) in fields_and_vals.iter() {
|
||||||
let query_str = &format!("{indexed_field}:{val}");
|
let query_str = &format!("{}:{}", indexed_field, val);
|
||||||
let query = query_parser.parse_query(query_str).unwrap();
|
let query = query_parser.parse_query(query_str).unwrap();
|
||||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2)).unwrap();
|
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2)).unwrap();
|
||||||
assert!(!count_docs.is_empty(), "{indexed_field}:{val}");
|
assert!(!count_docs.is_empty(), "{}:{}", indexed_field, val);
|
||||||
}
|
}
|
||||||
// Test if field name can be used for aggregation
|
// Test if field name can be used for aggregation
|
||||||
for (field_name, val) in fields_and_vals.iter() {
|
for (field_name, val) in fields_and_vals.iter() {
|
||||||
|
|||||||
@@ -499,9 +499,10 @@ mod tests {
|
|||||||
use crate::fastfield::FastValue;
|
use crate::fastfield::FastValue;
|
||||||
use crate::postings::{Postings, TermInfo};
|
use crate::postings::{Postings, TermInfo};
|
||||||
use crate::query::{PhraseQuery, QueryParser};
|
use crate::query::{PhraseQuery, QueryParser};
|
||||||
|
use crate::schema::document::Value;
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
Document, IndexRecordOption, OwnedValue, Schema, TextFieldIndexing, TextOptions, Value,
|
Document, IndexRecordOption, OwnedValue, Schema, TextFieldIndexing, TextOptions, STORED,
|
||||||
STORED, STRING, TEXT,
|
STRING, TEXT,
|
||||||
};
|
};
|
||||||
use crate::store::{Compressor, StoreReader, StoreWriter};
|
use crate::store::{Compressor, StoreReader, StoreWriter};
|
||||||
use crate::time::format_description::well_known::Rfc3339;
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
@@ -554,15 +555,9 @@ mod tests {
|
|||||||
let reader = StoreReader::open(directory.open_read(path).unwrap(), 0).unwrap();
|
let reader = StoreReader::open(directory.open_read(path).unwrap(), 0).unwrap();
|
||||||
let doc = reader.get::<TantivyDocument>(0).unwrap();
|
let doc = reader.get::<TantivyDocument>(0).unwrap();
|
||||||
|
|
||||||
assert_eq!(doc.field_values().count(), 2);
|
assert_eq!(doc.field_values().len(), 2);
|
||||||
assert_eq!(
|
assert_eq!(doc.field_values()[0].value().as_str(), Some("A"));
|
||||||
doc.get_all(text_field).next().unwrap().as_value().as_str(),
|
assert_eq!(doc.field_values()[1].value().as_str(), Some("title"));
|
||||||
Some("A")
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
doc.get_all(text_field).nth(1).unwrap().as_value().as_str(),
|
|
||||||
Some("title")
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_simple_json_indexing() {
|
fn test_simple_json_indexing() {
|
||||||
@@ -646,7 +641,7 @@ mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
|
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let json_val: serde_json::Value = serde_json::from_str(
|
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
|
||||||
r#"{
|
r#"{
|
||||||
"toto": "titi",
|
"toto": "titi",
|
||||||
"float": -0.2,
|
"float": -0.2,
|
||||||
@@ -674,10 +669,14 @@ mod tests {
|
|||||||
doc_id: 0u32,
|
doc_id: 0u32,
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let serdeser_json_val = serde_json::from_str::<serde_json::Value>(&doc.to_json(&schema))
|
let serdeser_json_val = serde_json::from_str::<serde_json::Map<String, serde_json::Value>>(
|
||||||
|
&doc.to_json(&schema),
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
.get("json")
|
||||||
|
.unwrap()[0]
|
||||||
|
.as_object()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.get("json")
|
|
||||||
.unwrap()[0]
|
|
||||||
.clone();
|
.clone();
|
||||||
assert_eq!(json_val, serdeser_json_val);
|
assert_eq!(json_val, serdeser_json_val);
|
||||||
let segment_reader = searcher.segment_reader(0u32);
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
@@ -841,7 +840,7 @@ mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let json_field = schema_builder.add_json_field("json", STRING);
|
let json_field = schema_builder.add_json_field("json", STRING);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let json_val: serde_json::Value =
|
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||||
serde_json::from_str(r#"{"mykey": "two tokens"}"#).unwrap();
|
serde_json::from_str(r#"{"mykey": "two tokens"}"#).unwrap();
|
||||||
let doc = doc!(json_field=>json_val);
|
let doc = doc!(json_field=>json_val);
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
@@ -881,7 +880,7 @@ mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let json_field = schema_builder.add_json_field("json", TEXT);
|
let json_field = schema_builder.add_json_field("json", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let json_val: serde_json::Value = serde_json::from_str(
|
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
|
||||||
r#"{"mykey": [{"field": "hello happy tax payer"}, {"field": "nothello"}]}"#,
|
r#"{"mykey": [{"field": "hello happy tax payer"}, {"field": "nothello"}]}"#,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|||||||
35
src/lib.rs
35
src/lib.rs
@@ -397,20 +397,16 @@ pub mod tests {
|
|||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! assert_nearly_equals {
|
macro_rules! assert_nearly_equals {
|
||||||
($left:expr, $right:expr) => {{
|
($left:expr, $right:expr) => {{
|
||||||
assert_nearly_equals!($left, $right, 0.0005);
|
match (&$left, &$right) {
|
||||||
}};
|
(left_val, right_val) => {
|
||||||
($left:expr, $right:expr, $epsilon:expr) => {{
|
|
||||||
match (&$left, &$right, &$epsilon) {
|
|
||||||
(left_val, right_val, epsilon_val) => {
|
|
||||||
let diff = (left_val - right_val).abs();
|
let diff = (left_val - right_val).abs();
|
||||||
|
let add = left_val.abs() + right_val.abs();
|
||||||
if diff > *epsilon_val {
|
if diff > 0.0005 * add {
|
||||||
panic!(
|
panic!(
|
||||||
r#"assertion failed: `abs(left-right)>epsilon`
|
r#"assertion failed: `(left ~= right)`
|
||||||
left: `{:?}`,
|
left: `{:?}`,
|
||||||
right: `{:?}`,
|
right: `{:?}`"#,
|
||||||
epsilon: `{:?}`"#,
|
&*left_val, &*right_val
|
||||||
&*left_val, &*right_val, &*epsilon_val
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -939,7 +935,7 @@ pub mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
|
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let json_val: serde_json::Value = serde_json::from_str(
|
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
|
||||||
r#"{
|
r#"{
|
||||||
"signed": 2,
|
"signed": 2,
|
||||||
"float": 2.0,
|
"float": 2.0,
|
||||||
@@ -1029,16 +1025,13 @@ pub mod tests {
|
|||||||
text_field => "some other value",
|
text_field => "some other value",
|
||||||
other_text_field => "short");
|
other_text_field => "short");
|
||||||
assert_eq!(document.len(), 3);
|
assert_eq!(document.len(), 3);
|
||||||
let values: Vec<OwnedValue> = document.get_all(text_field).map(OwnedValue::from).collect();
|
let values: Vec<&OwnedValue> = document.get_all(text_field).collect();
|
||||||
assert_eq!(values.len(), 2);
|
assert_eq!(values.len(), 2);
|
||||||
assert_eq!(values[0].as_ref().as_str(), Some("tantivy"));
|
assert_eq!(values[0].as_str(), Some("tantivy"));
|
||||||
assert_eq!(values[1].as_ref().as_str(), Some("some other value"));
|
assert_eq!(values[1].as_str(), Some("some other value"));
|
||||||
let values: Vec<OwnedValue> = document
|
let values: Vec<&OwnedValue> = document.get_all(other_text_field).collect();
|
||||||
.get_all(other_text_field)
|
|
||||||
.map(OwnedValue::from)
|
|
||||||
.collect();
|
|
||||||
assert_eq!(values.len(), 1);
|
assert_eq!(values.len(), 1);
|
||||||
assert_eq!(values[0].as_ref().as_str(), Some("short"));
|
assert_eq!(values[0].as_str(), Some("short"));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -41,7 +41,6 @@
|
|||||||
/// );
|
/// );
|
||||||
/// # }
|
/// # }
|
||||||
/// ```
|
/// ```
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! doc(
|
macro_rules! doc(
|
||||||
() => {
|
() => {
|
||||||
@@ -53,7 +52,7 @@ macro_rules! doc(
|
|||||||
{
|
{
|
||||||
let mut document = $crate::TantivyDocument::default();
|
let mut document = $crate::TantivyDocument::default();
|
||||||
$(
|
$(
|
||||||
document.add_field_value($field, &$value);
|
document.add_field_value($field, $value);
|
||||||
)*
|
)*
|
||||||
document
|
document
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -138,7 +138,8 @@ impl FuzzyTermQuery {
|
|||||||
if json_path_type != Type::Str {
|
if json_path_type != Type::Str {
|
||||||
return Err(InvalidArgument(format!(
|
return Err(InvalidArgument(format!(
|
||||||
"The fuzzy term query requires a string path type for a json term. Found \
|
"The fuzzy term query requires a string path type for a json term. Found \
|
||||||
{json_path_type:?}"
|
{:?}",
|
||||||
|
json_path_type
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use crate::docset::{DocSet, TERMINATED};
|
|||||||
use crate::fieldnorm::FieldNormReader;
|
use crate::fieldnorm::FieldNormReader;
|
||||||
use crate::postings::Postings;
|
use crate::postings::Postings;
|
||||||
use crate::query::bm25::Bm25Weight;
|
use crate::query::bm25::Bm25Weight;
|
||||||
use crate::query::phrase_query::{intersection_count, intersection_exists, PhraseScorer};
|
use crate::query::phrase_query::{intersection_count, PhraseScorer};
|
||||||
use crate::query::Scorer;
|
use crate::query::Scorer;
|
||||||
use crate::{DocId, Score};
|
use crate::{DocId, Score};
|
||||||
|
|
||||||
@@ -92,17 +92,14 @@ impl<TPostings: Postings> Scorer for PhraseKind<TPostings> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PhrasePrefixScorer<TPostings: Postings, const SCORING_ENABLED: bool> {
|
pub struct PhrasePrefixScorer<TPostings: Postings> {
|
||||||
phrase_scorer: PhraseKind<TPostings>,
|
phrase_scorer: PhraseKind<TPostings>,
|
||||||
suffixes: Vec<TPostings>,
|
suffixes: Vec<TPostings>,
|
||||||
suffix_offset: u32,
|
suffix_offset: u32,
|
||||||
phrase_count: u32,
|
phrase_count: u32,
|
||||||
suffix_position_buffer: Vec<u32>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TPostings: Postings, const SCORING_ENABLED: bool>
|
impl<TPostings: Postings> PhrasePrefixScorer<TPostings> {
|
||||||
PhrasePrefixScorer<TPostings, SCORING_ENABLED>
|
|
||||||
{
|
|
||||||
// If similarity_weight is None, then scoring is disabled.
|
// If similarity_weight is None, then scoring is disabled.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
mut term_postings: Vec<(usize, TPostings)>,
|
mut term_postings: Vec<(usize, TPostings)>,
|
||||||
@@ -110,7 +107,7 @@ impl<TPostings: Postings, const SCORING_ENABLED: bool>
|
|||||||
fieldnorm_reader: FieldNormReader,
|
fieldnorm_reader: FieldNormReader,
|
||||||
suffixes: Vec<TPostings>,
|
suffixes: Vec<TPostings>,
|
||||||
suffix_pos: usize,
|
suffix_pos: usize,
|
||||||
) -> PhrasePrefixScorer<TPostings, SCORING_ENABLED> {
|
) -> PhrasePrefixScorer<TPostings> {
|
||||||
// correct indices so we can merge with our suffix term the PhraseScorer doesn't know about
|
// correct indices so we can merge with our suffix term the PhraseScorer doesn't know about
|
||||||
let max_offset = term_postings
|
let max_offset = term_postings
|
||||||
.iter()
|
.iter()
|
||||||
@@ -143,7 +140,6 @@ impl<TPostings: Postings, const SCORING_ENABLED: bool>
|
|||||||
suffixes,
|
suffixes,
|
||||||
suffix_offset: (max_offset - suffix_pos) as u32,
|
suffix_offset: (max_offset - suffix_pos) as u32,
|
||||||
phrase_count: 0,
|
phrase_count: 0,
|
||||||
suffix_position_buffer: Vec::with_capacity(100),
|
|
||||||
};
|
};
|
||||||
if phrase_prefix_scorer.doc() != TERMINATED && !phrase_prefix_scorer.matches_prefix() {
|
if phrase_prefix_scorer.doc() != TERMINATED && !phrase_prefix_scorer.matches_prefix() {
|
||||||
phrase_prefix_scorer.advance();
|
phrase_prefix_scorer.advance();
|
||||||
@@ -157,6 +153,7 @@ impl<TPostings: Postings, const SCORING_ENABLED: bool>
|
|||||||
|
|
||||||
fn matches_prefix(&mut self) -> bool {
|
fn matches_prefix(&mut self) -> bool {
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
|
let mut positions = Vec::new();
|
||||||
let current_doc = self.doc();
|
let current_doc = self.doc();
|
||||||
let pos_matching = self.phrase_scorer.get_intersection();
|
let pos_matching = self.phrase_scorer.get_intersection();
|
||||||
for suffix in &mut self.suffixes {
|
for suffix in &mut self.suffixes {
|
||||||
@@ -165,27 +162,16 @@ impl<TPostings: Postings, const SCORING_ENABLED: bool>
|
|||||||
}
|
}
|
||||||
let doc = suffix.seek(current_doc);
|
let doc = suffix.seek(current_doc);
|
||||||
if doc == current_doc {
|
if doc == current_doc {
|
||||||
suffix.positions_with_offset(self.suffix_offset, &mut self.suffix_position_buffer);
|
suffix.positions_with_offset(self.suffix_offset, &mut positions);
|
||||||
if SCORING_ENABLED {
|
count += intersection_count(pos_matching, &positions);
|
||||||
count += intersection_count(pos_matching, &self.suffix_position_buffer);
|
|
||||||
} else {
|
|
||||||
if intersection_exists(pos_matching, &self.suffix_position_buffer) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !SCORING_ENABLED {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
self.phrase_count = count as u32;
|
self.phrase_count = count as u32;
|
||||||
count != 0
|
count != 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TPostings: Postings, const SCORING_ENABLED: bool> DocSet
|
impl<TPostings: Postings> DocSet for PhrasePrefixScorer<TPostings> {
|
||||||
for PhrasePrefixScorer<TPostings, SCORING_ENABLED>
|
|
||||||
{
|
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> DocId {
|
||||||
loop {
|
loop {
|
||||||
let doc = self.phrase_scorer.advance();
|
let doc = self.phrase_scorer.advance();
|
||||||
@@ -212,15 +198,9 @@ impl<TPostings: Postings, const SCORING_ENABLED: bool> DocSet
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TPostings: Postings, const SCORING_ENABLED: bool> Scorer
|
impl<TPostings: Postings> Scorer for PhrasePrefixScorer<TPostings> {
|
||||||
for PhrasePrefixScorer<TPostings, SCORING_ENABLED>
|
|
||||||
{
|
|
||||||
fn score(&mut self) -> Score {
|
fn score(&mut self) -> Score {
|
||||||
if SCORING_ENABLED {
|
|
||||||
self.phrase_scorer.score()
|
|
||||||
} else {
|
|
||||||
1.0f32
|
|
||||||
}
|
|
||||||
// TODO modify score??
|
// TODO modify score??
|
||||||
|
self.phrase_scorer.score()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,11 +42,11 @@ impl PhrasePrefixWeight {
|
|||||||
Ok(FieldNormReader::constant(reader.max_doc(), 1))
|
Ok(FieldNormReader::constant(reader.max_doc(), 1))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn phrase_prefix_scorer<const SCORING_ENABLED: bool>(
|
pub(crate) fn phrase_scorer(
|
||||||
&self,
|
&self,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
boost: Score,
|
boost: Score,
|
||||||
) -> crate::Result<Option<PhrasePrefixScorer<SegmentPostings, SCORING_ENABLED>>> {
|
) -> crate::Result<Option<PhrasePrefixScorer<SegmentPostings>>> {
|
||||||
let similarity_weight_opt = self
|
let similarity_weight_opt = self
|
||||||
.similarity_weight_opt
|
.similarity_weight_opt
|
||||||
.as_ref()
|
.as_ref()
|
||||||
@@ -128,20 +128,15 @@ impl PhrasePrefixWeight {
|
|||||||
|
|
||||||
impl Weight for PhrasePrefixWeight {
|
impl Weight for PhrasePrefixWeight {
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||||
if self.similarity_weight_opt.is_some() {
|
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
|
||||||
if let Some(scorer) = self.phrase_prefix_scorer::<true>(reader, boost)? {
|
Ok(Box::new(scorer))
|
||||||
return Ok(Box::new(scorer));
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
if let Some(scorer) = self.phrase_prefix_scorer::<false>(reader, boost)? {
|
Ok(Box::new(EmptyScorer))
|
||||||
return Ok(Box::new(scorer));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(Box::new(EmptyScorer))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||||
let scorer_opt = self.phrase_prefix_scorer::<true>(reader, 1.0)?;
|
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
|
||||||
if scorer_opt.is_none() {
|
if scorer_opt.is_none() {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
@@ -205,7 +200,7 @@ mod tests {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut phrase_scorer = phrase_weight
|
let mut phrase_scorer = phrase_weight
|
||||||
.phrase_prefix_scorer::<true>(searcher.segment_reader(0u32), 1.0)?
|
.phrase_scorer(searcher.segment_reader(0u32), 1.0)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(phrase_scorer.doc(), 1);
|
assert_eq!(phrase_scorer.doc(), 1);
|
||||||
assert_eq!(phrase_scorer.phrase_count(), 2);
|
assert_eq!(phrase_scorer.phrase_count(), 2);
|
||||||
@@ -216,38 +211,6 @@ mod tests {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_phrase_no_count() -> crate::Result<()> {
|
|
||||||
let index = create_index(&[
|
|
||||||
"aa bb dd cc",
|
|
||||||
"aa aa bb c dd aa bb cc aa bb dc",
|
|
||||||
" aa bb cd",
|
|
||||||
])?;
|
|
||||||
let schema = index.schema();
|
|
||||||
let text_field = schema.get_field("text").unwrap();
|
|
||||||
let searcher = index.reader()?.searcher();
|
|
||||||
let phrase_query = PhrasePrefixQuery::new(vec![
|
|
||||||
Term::from_field_text(text_field, "aa"),
|
|
||||||
Term::from_field_text(text_field, "bb"),
|
|
||||||
Term::from_field_text(text_field, "c"),
|
|
||||||
]);
|
|
||||||
let enable_scoring = EnableScoring::enabled_from_searcher(&searcher);
|
|
||||||
let phrase_weight = phrase_query
|
|
||||||
.phrase_prefix_query_weight(enable_scoring)
|
|
||||||
.unwrap()
|
|
||||||
.unwrap();
|
|
||||||
let mut phrase_scorer = phrase_weight
|
|
||||||
.phrase_prefix_scorer::<false>(searcher.segment_reader(0u32), 1.0)?
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(phrase_scorer.doc(), 1);
|
|
||||||
assert_eq!(phrase_scorer.phrase_count(), 0);
|
|
||||||
assert_eq!(phrase_scorer.advance(), 2);
|
|
||||||
assert_eq!(phrase_scorer.doc(), 2);
|
|
||||||
assert_eq!(phrase_scorer.phrase_count(), 0);
|
|
||||||
assert_eq!(phrase_scorer.advance(), TERMINATED);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_phrase_count_mid() -> crate::Result<()> {
|
pub fn test_phrase_count_mid() -> crate::Result<()> {
|
||||||
let index = create_index(&["aa dd cc", "aa aa bb c dd aa bb cc aa dc", " aa bb cd"])?;
|
let index = create_index(&["aa dd cc", "aa aa bb c dd aa bb cc aa dc", " aa bb cd"])?;
|
||||||
@@ -264,7 +227,7 @@ mod tests {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut phrase_scorer = phrase_weight
|
let mut phrase_scorer = phrase_weight
|
||||||
.phrase_prefix_scorer::<true>(searcher.segment_reader(0u32), 1.0)?
|
.phrase_scorer(searcher.segment_reader(0u32), 1.0)?
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(phrase_scorer.doc(), 1);
|
assert_eq!(phrase_scorer.doc(), 1);
|
||||||
assert_eq!(phrase_scorer.phrase_count(), 2);
|
assert_eq!(phrase_scorer.phrase_count(), 2);
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ mod phrase_scorer;
|
|||||||
mod phrase_weight;
|
mod phrase_weight;
|
||||||
|
|
||||||
pub use self::phrase_query::PhraseQuery;
|
pub use self::phrase_query::PhraseQuery;
|
||||||
|
pub(crate) use self::phrase_scorer::intersection_count;
|
||||||
pub use self::phrase_scorer::PhraseScorer;
|
pub use self::phrase_scorer::PhraseScorer;
|
||||||
pub(crate) use self::phrase_scorer::{intersection_count, intersection_exists};
|
|
||||||
pub use self::phrase_weight::PhraseWeight;
|
pub use self::phrase_weight::PhraseWeight;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ pub struct PhraseScorer<TPostings: Postings> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if and only if the two sorted arrays contain a common element
|
/// Returns true if and only if the two sorted arrays contain a common element
|
||||||
pub(crate) fn intersection_exists(left: &[u32], right: &[u32]) -> bool {
|
fn intersection_exists(left: &[u32], right: &[u32]) -> bool {
|
||||||
let mut left_index = 0;
|
let mut left_index = 0;
|
||||||
let mut right_index = 0;
|
let mut right_index = 0;
|
||||||
while left_index < left.len() && right_index < right.len() {
|
while left_index < left.len() && right_index < right.len() {
|
||||||
|
|||||||
@@ -185,7 +185,7 @@ mod test {
|
|||||||
Err(crate::TantivyError::InvalidArgument(msg)) => {
|
Err(crate::TantivyError::InvalidArgument(msg)) => {
|
||||||
assert!(msg.contains("error: unclosed group"))
|
assert!(msg.contains("error: unclosed group"))
|
||||||
}
|
}
|
||||||
res => panic!("unexpected result: {res:?}"),
|
res => panic!("unexpected result: {:?}", res),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,64 +1,93 @@
|
|||||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
use std::io::{self, Read, Write};
|
|
||||||
use std::net::Ipv6Addr;
|
use std::net::Ipv6Addr;
|
||||||
|
|
||||||
use columnar::MonotonicallyMappableToU128;
|
use common::DateTime;
|
||||||
use common::{read_u32_vint_no_advance, serialize_vint_u32, BinarySerializable, DateTime, VInt};
|
|
||||||
use serde_json::Map;
|
use serde_json::Map;
|
||||||
pub use CompactDoc as TantivyDocument;
|
|
||||||
|
|
||||||
use super::{ReferenceValue, ReferenceValueLeaf, Value};
|
|
||||||
use crate::schema::document::{
|
use crate::schema::document::{
|
||||||
DeserializeError, Document, DocumentDeserialize, DocumentDeserializer,
|
DeserializeError, Document, DocumentDeserialize, DocumentDeserializer,
|
||||||
};
|
};
|
||||||
use crate::schema::field_type::ValueParsingError;
|
use crate::schema::field_type::ValueParsingError;
|
||||||
use crate::schema::{Facet, Field, NamedFieldDocument, OwnedValue, Schema};
|
use crate::schema::field_value::FieldValueIter;
|
||||||
|
use crate::schema::{Facet, Field, FieldValue, NamedFieldDocument, OwnedValue, Schema};
|
||||||
use crate::tokenizer::PreTokenizedString;
|
use crate::tokenizer::PreTokenizedString;
|
||||||
|
|
||||||
#[repr(packed)]
|
/// TantivyDocument provides a default implementation of the `Document` trait.
|
||||||
#[derive(Debug, Clone)]
|
/// It is the object that can be indexed and then searched for.
|
||||||
/// A field value pair in the compact tantivy document
|
///
|
||||||
struct FieldValueAddr {
|
/// Documents are fundamentally a collection of unordered couples `(field, value)`.
|
||||||
pub field: u16,
|
/// In this list, one field may appear more than once.
|
||||||
pub value_addr: ValueAddr,
|
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)]
|
||||||
|
pub struct TantivyDocument {
|
||||||
|
field_values: Vec<FieldValue>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
impl Document for TantivyDocument {
|
||||||
/// The default document in tantivy. It encodes data in a compact form.
|
type Value<'a> = &'a OwnedValue;
|
||||||
pub struct CompactDoc {
|
type FieldsValuesIter<'a> = FieldValueIter<'a>;
|
||||||
/// `node_data` is a vec of bytes, where each value is serialized into bytes and stored. It
|
|
||||||
/// includes all the data of the document and also metadata like where the nodes are located
|
|
||||||
/// in an object or array.
|
|
||||||
pub node_data: Vec<u8>,
|
|
||||||
/// The root (Field, Value) pairs
|
|
||||||
field_values: Vec<FieldValueAddr>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for CompactDoc {
|
fn iter_fields_and_values(&self) -> Self::FieldsValuesIter<'_> {
|
||||||
fn default() -> Self {
|
FieldValueIter(self.field_values.iter())
|
||||||
Self::new()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CompactDoc {
|
impl DocumentDeserialize for TantivyDocument {
|
||||||
/// Creates a new, empty document object
|
fn deserialize<'de, D>(mut deserializer: D) -> Result<Self, DeserializeError>
|
||||||
/// The reserved capacity is for the total serialized data
|
where D: DocumentDeserializer<'de> {
|
||||||
pub fn with_capacity(bytes: usize) -> CompactDoc {
|
let mut field_values = Vec::with_capacity(deserializer.size_hint());
|
||||||
CompactDoc {
|
|
||||||
node_data: Vec::with_capacity(bytes),
|
while let Some((field, value)) = deserializer.next_field()? {
|
||||||
field_values: Vec::with_capacity(4),
|
field_values.push(FieldValue::new(field, value));
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
Ok(Self { field_values })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Vec<FieldValue>> for TantivyDocument {
|
||||||
|
fn from(field_values: Vec<FieldValue>) -> Self {
|
||||||
|
Self { field_values }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq for TantivyDocument {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
// super slow, but only here for tests
|
||||||
|
let convert_to_comparable_map = |field_values: &[FieldValue]| {
|
||||||
|
let mut field_value_set: HashMap<Field, HashSet<String>> = Default::default();
|
||||||
|
for field_value in field_values.iter() {
|
||||||
|
let value = serde_json::to_string(field_value.value()).unwrap();
|
||||||
|
field_value_set
|
||||||
|
.entry(field_value.field())
|
||||||
|
.or_default()
|
||||||
|
.insert(value);
|
||||||
|
}
|
||||||
|
field_value_set
|
||||||
|
};
|
||||||
|
let self_field_values: HashMap<Field, HashSet<String>> =
|
||||||
|
convert_to_comparable_map(&self.field_values);
|
||||||
|
let other_field_values: HashMap<Field, HashSet<String>> =
|
||||||
|
convert_to_comparable_map(&other.field_values);
|
||||||
|
self_field_values.eq(&other_field_values)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Eq for TantivyDocument {}
|
||||||
|
|
||||||
|
impl IntoIterator for TantivyDocument {
|
||||||
|
type Item = FieldValue;
|
||||||
|
|
||||||
|
type IntoIter = std::vec::IntoIter<FieldValue>;
|
||||||
|
|
||||||
|
fn into_iter(self) -> Self::IntoIter {
|
||||||
|
self.field_values.into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TantivyDocument {
|
||||||
/// Creates a new, empty document object
|
/// Creates a new, empty document object
|
||||||
pub fn new() -> CompactDoc {
|
pub fn new() -> TantivyDocument {
|
||||||
CompactDoc::with_capacity(1024)
|
TantivyDocument::default()
|
||||||
}
|
|
||||||
|
|
||||||
/// Skrinks the capacity of the document to fit the data
|
|
||||||
pub fn shrink_to_fit(&mut self) {
|
|
||||||
self.node_data.shrink_to_fit();
|
|
||||||
self.field_values.shrink_to_fit();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the length of the document.
|
/// Returns the length of the document.
|
||||||
@@ -70,111 +99,83 @@ impl CompactDoc {
|
|||||||
pub fn add_facet<F>(&mut self, field: Field, path: F)
|
pub fn add_facet<F>(&mut self, field: Field, path: F)
|
||||||
where Facet: From<F> {
|
where Facet: From<F> {
|
||||||
let facet = Facet::from(path);
|
let facet = Facet::from(path);
|
||||||
self.add_leaf_field_value(field, ReferenceValueLeaf::Facet(facet.encoded_str()));
|
let value = OwnedValue::Facet(facet);
|
||||||
|
self.add_field_value(field, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a text field.
|
/// Add a text field.
|
||||||
pub fn add_text<S: AsRef<str>>(&mut self, field: Field, text: S) {
|
pub fn add_text<S: ToString>(&mut self, field: Field, text: S) {
|
||||||
self.add_leaf_field_value(field, ReferenceValueLeaf::Str(text.as_ref()));
|
let value = OwnedValue::Str(text.to_string());
|
||||||
|
self.add_field_value(field, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a pre-tokenized text field.
|
/// Add a pre-tokenized text field.
|
||||||
pub fn add_pre_tokenized_text(&mut self, field: Field, pre_tokenized_text: PreTokenizedString) {
|
pub fn add_pre_tokenized_text(&mut self, field: Field, pre_tokenized_text: PreTokenizedString) {
|
||||||
self.add_leaf_field_value(field, pre_tokenized_text);
|
self.add_field_value(field, pre_tokenized_text);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a u64 field
|
/// Add a u64 field
|
||||||
pub fn add_u64(&mut self, field: Field, value: u64) {
|
pub fn add_u64(&mut self, field: Field, value: u64) {
|
||||||
self.add_leaf_field_value(field, value);
|
self.add_field_value(field, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a IP address field. Internally only Ipv6Addr is used.
|
/// Add a IP address field. Internally only Ipv6Addr is used.
|
||||||
pub fn add_ip_addr(&mut self, field: Field, value: Ipv6Addr) {
|
pub fn add_ip_addr(&mut self, field: Field, value: Ipv6Addr) {
|
||||||
self.add_leaf_field_value(field, value);
|
self.add_field_value(field, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a i64 field
|
/// Add a i64 field
|
||||||
pub fn add_i64(&mut self, field: Field, value: i64) {
|
pub fn add_i64(&mut self, field: Field, value: i64) {
|
||||||
self.add_leaf_field_value(field, value);
|
self.add_field_value(field, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a f64 field
|
/// Add a f64 field
|
||||||
pub fn add_f64(&mut self, field: Field, value: f64) {
|
pub fn add_f64(&mut self, field: Field, value: f64) {
|
||||||
self.add_leaf_field_value(field, value);
|
self.add_field_value(field, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a bool field
|
/// Add a bool field
|
||||||
pub fn add_bool(&mut self, field: Field, value: bool) {
|
pub fn add_bool(&mut self, field: Field, value: bool) {
|
||||||
self.add_leaf_field_value(field, value);
|
self.add_field_value(field, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a date field with unspecified time zone offset
|
/// Add a date field with unspecified time zone offset
|
||||||
pub fn add_date(&mut self, field: Field, value: DateTime) {
|
pub fn add_date(&mut self, field: Field, value: DateTime) {
|
||||||
self.add_leaf_field_value(field, value);
|
self.add_field_value(field, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a bytes field
|
/// Add a bytes field
|
||||||
pub fn add_bytes(&mut self, field: Field, value: &[u8]) {
|
pub fn add_bytes<T: Into<Vec<u8>>>(&mut self, field: Field, value: T) {
|
||||||
self.add_leaf_field_value(field, value);
|
self.add_field_value(field, value.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a dynamic object field
|
/// Add a dynamic object field
|
||||||
pub fn add_object(&mut self, field: Field, object: BTreeMap<String, OwnedValue>) {
|
pub fn add_object(&mut self, field: Field, object: BTreeMap<String, OwnedValue>) {
|
||||||
self.add_field_value(field, &OwnedValue::from(object));
|
self.add_field_value(field, object);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a (field, value) to the document.
|
/// Add a (field, value) to the document.
|
||||||
///
|
pub fn add_field_value<T: Into<OwnedValue>>(&mut self, field: Field, typed_val: T) {
|
||||||
/// `OwnedValue` implements Value, which should be easiest to use, but is not the most
|
|
||||||
/// performant.
|
|
||||||
pub fn add_field_value<'a, V: Value<'a>>(&mut self, field: Field, value: V) {
|
|
||||||
let field_value = FieldValueAddr {
|
|
||||||
field: field
|
|
||||||
.field_id()
|
|
||||||
.try_into()
|
|
||||||
.expect("support only up to u16::MAX field ids"),
|
|
||||||
value_addr: self.add_value(value),
|
|
||||||
};
|
|
||||||
self.field_values.push(field_value);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a (field, leaf value) to the document.
|
|
||||||
/// Leaf values don't have nested values.
|
|
||||||
pub fn add_leaf_field_value<'a, T: Into<ReferenceValueLeaf<'a>>>(
|
|
||||||
&mut self,
|
|
||||||
field: Field,
|
|
||||||
typed_val: T,
|
|
||||||
) {
|
|
||||||
let value = typed_val.into();
|
let value = typed_val.into();
|
||||||
let field_value = FieldValueAddr {
|
let field_value = FieldValue { field, value };
|
||||||
field: field
|
|
||||||
.field_id()
|
|
||||||
.try_into()
|
|
||||||
.expect("support only up to u16::MAX field ids"),
|
|
||||||
value_addr: self.add_value_leaf(value),
|
|
||||||
};
|
|
||||||
self.field_values.push(field_value);
|
self.field_values.push(field_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// field_values accessor
|
/// field_values accessor
|
||||||
pub fn field_values(&self) -> impl Iterator<Item = (Field, CompactDocValue<'_>)> {
|
pub fn field_values(&self) -> &[FieldValue] {
|
||||||
self.field_values.iter().map(|field_val| {
|
&self.field_values
|
||||||
let field = Field::from_field_id(field_val.field as u32);
|
|
||||||
let val = self.get_compact_doc_value(field_val.value_addr);
|
|
||||||
(field, val)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns all of the `ReferenceValue`s associated the given field
|
/// Returns all of the `FieldValue`s associated the given field
|
||||||
pub fn get_all(&self, field: Field) -> impl Iterator<Item = CompactDocValue<'_>> + '_ {
|
pub fn get_all(&self, field: Field) -> impl Iterator<Item = &OwnedValue> {
|
||||||
self.field_values
|
self.field_values
|
||||||
.iter()
|
.iter()
|
||||||
.filter(move |field_value| Field::from_field_id(field_value.field as u32) == field)
|
.filter(move |field_value| field_value.field() == field)
|
||||||
.map(|val| self.get_compact_doc_value(val.value_addr))
|
.map(FieldValue::value)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the first `ReferenceValue` associated the given field
|
/// Returns the first `FieldValue` associated the given field
|
||||||
pub fn get_first(&self, field: Field) -> Option<CompactDocValue<'_>> {
|
pub fn get_first(&self, field: Field) -> Option<&OwnedValue> {
|
||||||
self.get_all(field).next()
|
self.get_all(field).next()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -182,12 +183,12 @@ impl CompactDoc {
|
|||||||
pub fn convert_named_doc(
|
pub fn convert_named_doc(
|
||||||
schema: &Schema,
|
schema: &Schema,
|
||||||
named_doc: NamedFieldDocument,
|
named_doc: NamedFieldDocument,
|
||||||
) -> Result<Self, DocParsingError> {
|
) -> Result<TantivyDocument, DocParsingError> {
|
||||||
let mut document = Self::new();
|
let mut document = TantivyDocument::new();
|
||||||
for (field_name, values) in named_doc.0 {
|
for (field_name, values) in named_doc.0 {
|
||||||
if let Ok(field) = schema.get_field(&field_name) {
|
if let Ok(field) = schema.get_field(&field_name) {
|
||||||
for value in values {
|
for value in values {
|
||||||
document.add_field_value(field, &value);
|
document.add_field_value(field, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -195,7 +196,7 @@ impl CompactDoc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Build a document object from a json-object.
|
/// Build a document object from a json-object.
|
||||||
pub fn parse_json(schema: &Schema, doc_json: &str) -> Result<Self, DocParsingError> {
|
pub fn parse_json(schema: &Schema, doc_json: &str) -> Result<TantivyDocument, DocParsingError> {
|
||||||
let json_obj: Map<String, serde_json::Value> =
|
let json_obj: Map<String, serde_json::Value> =
|
||||||
serde_json::from_str(doc_json).map_err(|_| DocParsingError::invalid_json(doc_json))?;
|
serde_json::from_str(doc_json).map_err(|_| DocParsingError::invalid_json(doc_json))?;
|
||||||
Self::from_json_object(schema, json_obj)
|
Self::from_json_object(schema, json_obj)
|
||||||
@@ -205,8 +206,8 @@ impl CompactDoc {
|
|||||||
pub fn from_json_object(
|
pub fn from_json_object(
|
||||||
schema: &Schema,
|
schema: &Schema,
|
||||||
json_obj: Map<String, serde_json::Value>,
|
json_obj: Map<String, serde_json::Value>,
|
||||||
) -> Result<Self, DocParsingError> {
|
) -> Result<TantivyDocument, DocParsingError> {
|
||||||
let mut doc = Self::default();
|
let mut doc = TantivyDocument::default();
|
||||||
for (field_name, json_value) in json_obj {
|
for (field_name, json_value) in json_obj {
|
||||||
if let Ok(field) = schema.get_field(&field_name) {
|
if let Ok(field) = schema.get_field(&field_name) {
|
||||||
let field_entry = schema.get_field_entry(field);
|
let field_entry = schema.get_field_entry(field);
|
||||||
@@ -217,482 +218,20 @@ impl CompactDoc {
|
|||||||
let value = field_type
|
let value = field_type
|
||||||
.value_from_json(json_item)
|
.value_from_json(json_item)
|
||||||
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
|
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
|
||||||
doc.add_field_value(field, &value);
|
doc.add_field_value(field, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
let value = field_type
|
let value = field_type
|
||||||
.value_from_json(json_value)
|
.value_from_json(json_value)
|
||||||
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
|
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
|
||||||
doc.add_field_value(field, &value);
|
doc.add_field_value(field, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(doc)
|
Ok(doc)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_value_leaf(&mut self, leaf: ReferenceValueLeaf) -> ValueAddr {
|
|
||||||
let type_id = ValueType::from(&leaf);
|
|
||||||
// Write into `node_data` and return u32 position as its address
|
|
||||||
// Null and bool are inlined into the address
|
|
||||||
let val_addr = match leaf {
|
|
||||||
ReferenceValueLeaf::Null => 0,
|
|
||||||
ReferenceValueLeaf::Str(bytes) => {
|
|
||||||
write_bytes_into(&mut self.node_data, bytes.as_bytes())
|
|
||||||
}
|
|
||||||
ReferenceValueLeaf::Facet(bytes) => {
|
|
||||||
write_bytes_into(&mut self.node_data, bytes.as_bytes())
|
|
||||||
}
|
|
||||||
ReferenceValueLeaf::Bytes(bytes) => write_bytes_into(&mut self.node_data, bytes),
|
|
||||||
ReferenceValueLeaf::U64(num) => write_into(&mut self.node_data, num),
|
|
||||||
ReferenceValueLeaf::I64(num) => write_into(&mut self.node_data, num),
|
|
||||||
ReferenceValueLeaf::F64(num) => write_into(&mut self.node_data, num),
|
|
||||||
ReferenceValueLeaf::Bool(b) => b as u32,
|
|
||||||
ReferenceValueLeaf::Date(date) => {
|
|
||||||
write_into(&mut self.node_data, date.into_timestamp_nanos())
|
|
||||||
}
|
|
||||||
ReferenceValueLeaf::IpAddr(num) => write_into(&mut self.node_data, num.to_u128()),
|
|
||||||
ReferenceValueLeaf::PreTokStr(pre_tok) => write_into(&mut self.node_data, *pre_tok),
|
|
||||||
};
|
|
||||||
ValueAddr { type_id, val_addr }
|
|
||||||
}
|
|
||||||
/// Adds a value and returns in address into the
|
|
||||||
fn add_value<'a, V: Value<'a>>(&mut self, value: V) -> ValueAddr {
|
|
||||||
let value = value.as_value();
|
|
||||||
let type_id = ValueType::from(&value);
|
|
||||||
match value {
|
|
||||||
ReferenceValue::Leaf(leaf) => self.add_value_leaf(leaf),
|
|
||||||
ReferenceValue::Array(elements) => {
|
|
||||||
// addresses of the elements in node_data
|
|
||||||
// Reusing a vec would be nicer, but it's not easy because of the recursion
|
|
||||||
// A global vec would work if every writer get it's discriminator
|
|
||||||
let mut addresses = Vec::new();
|
|
||||||
for elem in elements {
|
|
||||||
let value_addr = self.add_value(elem);
|
|
||||||
write_into(&mut addresses, value_addr);
|
|
||||||
}
|
|
||||||
ValueAddr {
|
|
||||||
type_id,
|
|
||||||
val_addr: write_bytes_into(&mut self.node_data, &addresses),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ReferenceValue::Object(entries) => {
|
|
||||||
// addresses of the elements in node_data
|
|
||||||
let mut addresses = Vec::new();
|
|
||||||
for (key, value) in entries {
|
|
||||||
let key_addr = self.add_value_leaf(ReferenceValueLeaf::Str(key));
|
|
||||||
let value_addr = self.add_value(value);
|
|
||||||
write_into(&mut addresses, key_addr);
|
|
||||||
write_into(&mut addresses, value_addr);
|
|
||||||
}
|
|
||||||
ValueAddr {
|
|
||||||
type_id,
|
|
||||||
val_addr: write_bytes_into(&mut self.node_data, &addresses),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get CompactDocValue for address
|
|
||||||
fn get_compact_doc_value(&self, value_addr: ValueAddr) -> CompactDocValue<'_> {
|
|
||||||
CompactDocValue {
|
|
||||||
container: self,
|
|
||||||
value_addr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// get &[u8] reference from node_data
|
|
||||||
fn extract_bytes(&self, addr: Addr) -> &[u8] {
|
|
||||||
binary_deserialize_bytes(self.get_slice(addr))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// get &str reference from node_data
|
|
||||||
fn extract_str(&self, addr: Addr) -> &str {
|
|
||||||
let data = self.extract_bytes(addr);
|
|
||||||
// Utf-8 checks would have a noticeable performance overhead here
|
|
||||||
unsafe { std::str::from_utf8_unchecked(data) }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// deserialized owned value from node_data
|
|
||||||
fn read_from<T: BinarySerializable>(&self, addr: Addr) -> io::Result<T> {
|
|
||||||
let data_slice = &self.node_data[addr as usize..];
|
|
||||||
let mut cursor = std::io::Cursor::new(data_slice);
|
|
||||||
T::deserialize(&mut cursor)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// get slice from address. The returned slice is open ended
|
|
||||||
fn get_slice(&self, addr: Addr) -> &[u8] {
|
|
||||||
&self.node_data[addr as usize..]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// BinarySerializable alternative to read references
|
|
||||||
fn binary_deserialize_bytes(data: &[u8]) -> &[u8] {
|
|
||||||
let (len, bytes_read) = read_u32_vint_no_advance(data);
|
|
||||||
&data[bytes_read..bytes_read + len as usize]
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write bytes and return the position of the written data.
|
|
||||||
///
|
|
||||||
/// BinarySerializable alternative to write references
|
|
||||||
fn write_bytes_into(vec: &mut Vec<u8>, data: &[u8]) -> u32 {
|
|
||||||
let pos = vec.len() as u32;
|
|
||||||
let mut buf = [0u8; 8];
|
|
||||||
let len_vint_bytes = serialize_vint_u32(data.len() as u32, &mut buf);
|
|
||||||
vec.extend_from_slice(len_vint_bytes);
|
|
||||||
vec.extend_from_slice(data);
|
|
||||||
pos
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serialize and return the position
|
|
||||||
fn write_into<T: BinarySerializable>(vec: &mut Vec<u8>, value: T) -> u32 {
|
|
||||||
let pos = vec.len() as u32;
|
|
||||||
value.serialize(vec).unwrap();
|
|
||||||
pos
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialEq for CompactDoc {
|
|
||||||
fn eq(&self, other: &Self) -> bool {
|
|
||||||
// super slow, but only here for tests
|
|
||||||
let convert_to_comparable_map = |doc: &CompactDoc| {
|
|
||||||
let mut field_value_set: HashMap<Field, HashSet<String>> = Default::default();
|
|
||||||
for field_value in doc.field_values.iter() {
|
|
||||||
let value: OwnedValue = doc.get_compact_doc_value(field_value.value_addr).into();
|
|
||||||
let value = serde_json::to_string(&value).unwrap();
|
|
||||||
field_value_set
|
|
||||||
.entry(Field::from_field_id(field_value.field as u32))
|
|
||||||
.or_default()
|
|
||||||
.insert(value);
|
|
||||||
}
|
|
||||||
field_value_set
|
|
||||||
};
|
|
||||||
let self_field_values: HashMap<Field, HashSet<String>> = convert_to_comparable_map(self);
|
|
||||||
let other_field_values: HashMap<Field, HashSet<String>> = convert_to_comparable_map(other);
|
|
||||||
self_field_values.eq(&other_field_values)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Eq for CompactDoc {}
|
|
||||||
|
|
||||||
impl DocumentDeserialize for CompactDoc {
|
|
||||||
fn deserialize<'de, D>(mut deserializer: D) -> Result<Self, DeserializeError>
|
|
||||||
where D: DocumentDeserializer<'de> {
|
|
||||||
let mut doc = CompactDoc::default();
|
|
||||||
// TODO: Deserializing into OwnedValue is wasteful. The deserializer should be able to work
|
|
||||||
// on slices and referenced data.
|
|
||||||
while let Some((field, value)) = deserializer.next_field::<OwnedValue>()? {
|
|
||||||
doc.add_field_value(field, &value);
|
|
||||||
}
|
|
||||||
Ok(doc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A value of Compact Doc needs a reference to the container to extract its payload
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
pub struct CompactDocValue<'a> {
|
|
||||||
container: &'a CompactDoc,
|
|
||||||
value_addr: ValueAddr,
|
|
||||||
}
|
|
||||||
impl PartialEq for CompactDocValue<'_> {
|
|
||||||
fn eq(&self, other: &Self) -> bool {
|
|
||||||
let value1: OwnedValue = (*self).into();
|
|
||||||
let value2: OwnedValue = (*other).into();
|
|
||||||
value1 == value2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> From<CompactDocValue<'a>> for OwnedValue {
|
|
||||||
fn from(value: CompactDocValue) -> Self {
|
|
||||||
value.as_value().into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> Value<'a> for CompactDocValue<'a> {
|
|
||||||
type ArrayIter = CompactDocArrayIter<'a>;
|
|
||||||
|
|
||||||
type ObjectIter = CompactDocObjectIter<'a>;
|
|
||||||
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
self.get_ref_value().unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> CompactDocValue<'a> {
|
|
||||||
fn get_ref_value(&self) -> io::Result<ReferenceValue<'a, CompactDocValue<'a>>> {
|
|
||||||
let addr = self.value_addr.val_addr;
|
|
||||||
match self.value_addr.type_id {
|
|
||||||
ValueType::Null => Ok(ReferenceValueLeaf::Null.into()),
|
|
||||||
ValueType::Str => {
|
|
||||||
let str_ref = self.container.extract_str(addr);
|
|
||||||
Ok(ReferenceValueLeaf::Str(str_ref).into())
|
|
||||||
}
|
|
||||||
ValueType::Facet => {
|
|
||||||
let str_ref = self.container.extract_str(addr);
|
|
||||||
Ok(ReferenceValueLeaf::Facet(str_ref).into())
|
|
||||||
}
|
|
||||||
ValueType::Bytes => {
|
|
||||||
let data = self.container.extract_bytes(addr);
|
|
||||||
Ok(ReferenceValueLeaf::Bytes(data).into())
|
|
||||||
}
|
|
||||||
ValueType::U64 => self
|
|
||||||
.container
|
|
||||||
.read_from::<u64>(addr)
|
|
||||||
.map(ReferenceValueLeaf::U64)
|
|
||||||
.map(Into::into),
|
|
||||||
ValueType::I64 => self
|
|
||||||
.container
|
|
||||||
.read_from::<i64>(addr)
|
|
||||||
.map(ReferenceValueLeaf::I64)
|
|
||||||
.map(Into::into),
|
|
||||||
ValueType::F64 => self
|
|
||||||
.container
|
|
||||||
.read_from::<f64>(addr)
|
|
||||||
.map(ReferenceValueLeaf::F64)
|
|
||||||
.map(Into::into),
|
|
||||||
ValueType::Bool => Ok(ReferenceValueLeaf::Bool(addr != 0).into()),
|
|
||||||
ValueType::Date => self
|
|
||||||
.container
|
|
||||||
.read_from::<i64>(addr)
|
|
||||||
.map(|ts| ReferenceValueLeaf::Date(DateTime::from_timestamp_nanos(ts)))
|
|
||||||
.map(Into::into),
|
|
||||||
ValueType::IpAddr => self
|
|
||||||
.container
|
|
||||||
.read_from::<u128>(addr)
|
|
||||||
.map(|num| ReferenceValueLeaf::IpAddr(Ipv6Addr::from_u128(num)))
|
|
||||||
.map(Into::into),
|
|
||||||
ValueType::PreTokStr => self
|
|
||||||
.container
|
|
||||||
.read_from::<PreTokenizedString>(addr)
|
|
||||||
.map(Into::into)
|
|
||||||
.map(ReferenceValueLeaf::PreTokStr)
|
|
||||||
.map(Into::into),
|
|
||||||
ValueType::Object => Ok(ReferenceValue::Object(CompactDocObjectIter::new(
|
|
||||||
self.container,
|
|
||||||
addr,
|
|
||||||
)?)),
|
|
||||||
ValueType::Array => Ok(ReferenceValue::Array(CompactDocArrayIter::new(
|
|
||||||
self.container,
|
|
||||||
addr,
|
|
||||||
)?)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The address in the vec
|
|
||||||
type Addr = u32;
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Default)]
|
|
||||||
#[repr(packed)]
|
|
||||||
/// The value type and the address to its payload in the container.
|
|
||||||
struct ValueAddr {
|
|
||||||
type_id: ValueType,
|
|
||||||
/// This is the address to the value in the vec, except for bool and null, which are inlined
|
|
||||||
val_addr: Addr,
|
|
||||||
}
|
|
||||||
impl BinarySerializable for ValueAddr {
|
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
self.type_id.serialize(writer)?;
|
|
||||||
VInt(self.val_addr as u64).serialize(writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
let type_id = ValueType::deserialize(reader)?;
|
|
||||||
let val_addr = VInt::deserialize(reader)?.0 as u32;
|
|
||||||
Ok(ValueAddr { type_id, val_addr })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::fmt::Debug for ValueAddr {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
let val_addr = self.val_addr;
|
|
||||||
f.write_fmt(format_args!("{:?} at {:?}", self.type_id, val_addr))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A enum representing a value for tantivy to index.
|
|
||||||
///
|
|
||||||
/// Any changes need to be reflected in `BinarySerializable` for `ValueType`
|
|
||||||
///
|
|
||||||
/// We can't use [schema::Type] or [columnar::ColumnType] here, because they are missing
|
|
||||||
/// some items like Array and PreTokStr.
|
|
||||||
#[derive(Default, Clone, Copy, Debug, PartialEq)]
|
|
||||||
#[repr(u8)]
|
|
||||||
pub enum ValueType {
|
|
||||||
/// A null value.
|
|
||||||
#[default]
|
|
||||||
Null = 0,
|
|
||||||
/// The str type is used for any text information.
|
|
||||||
Str = 1,
|
|
||||||
/// Unsigned 64-bits Integer `u64`
|
|
||||||
U64 = 2,
|
|
||||||
/// Signed 64-bits Integer `i64`
|
|
||||||
I64 = 3,
|
|
||||||
/// 64-bits Float `f64`
|
|
||||||
F64 = 4,
|
|
||||||
/// Date/time with nanoseconds precision
|
|
||||||
Date = 5,
|
|
||||||
/// Facet
|
|
||||||
Facet = 6,
|
|
||||||
/// Arbitrarily sized byte array
|
|
||||||
Bytes = 7,
|
|
||||||
/// IpV6 Address. Internally there is no IpV4, it needs to be converted to `Ipv6Addr`.
|
|
||||||
IpAddr = 8,
|
|
||||||
/// Bool value
|
|
||||||
Bool = 9,
|
|
||||||
/// Pre-tokenized str type,
|
|
||||||
PreTokStr = 10,
|
|
||||||
/// Object
|
|
||||||
Object = 11,
|
|
||||||
/// Pre-tokenized str type,
|
|
||||||
Array = 12,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BinarySerializable for ValueType {
|
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
(*self as u8).serialize(writer)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
let num = u8::deserialize(reader)?;
|
|
||||||
let type_id = if (0..=12).contains(&num) {
|
|
||||||
unsafe { std::mem::transmute(num) }
|
|
||||||
} else {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidData,
|
|
||||||
format!("Invalid value type id: {num}"),
|
|
||||||
));
|
|
||||||
};
|
|
||||||
Ok(type_id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, V: Value<'a>> From<&ReferenceValue<'a, V>> for ValueType {
|
|
||||||
fn from(value: &ReferenceValue<'a, V>) -> Self {
|
|
||||||
match value {
|
|
||||||
ReferenceValue::Leaf(leaf) => leaf.into(),
|
|
||||||
ReferenceValue::Array(_) => ValueType::Array,
|
|
||||||
ReferenceValue::Object(_) => ValueType::Object,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> From<&ReferenceValueLeaf<'a>> for ValueType {
|
|
||||||
fn from(value: &ReferenceValueLeaf<'a>) -> Self {
|
|
||||||
match value {
|
|
||||||
ReferenceValueLeaf::Null => ValueType::Null,
|
|
||||||
ReferenceValueLeaf::Str(_) => ValueType::Str,
|
|
||||||
ReferenceValueLeaf::U64(_) => ValueType::U64,
|
|
||||||
ReferenceValueLeaf::I64(_) => ValueType::I64,
|
|
||||||
ReferenceValueLeaf::F64(_) => ValueType::F64,
|
|
||||||
ReferenceValueLeaf::Bool(_) => ValueType::Bool,
|
|
||||||
ReferenceValueLeaf::Date(_) => ValueType::Date,
|
|
||||||
ReferenceValueLeaf::IpAddr(_) => ValueType::IpAddr,
|
|
||||||
ReferenceValueLeaf::PreTokStr(_) => ValueType::PreTokStr,
|
|
||||||
ReferenceValueLeaf::Facet(_) => ValueType::Facet,
|
|
||||||
ReferenceValueLeaf::Bytes(_) => ValueType::Bytes,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
/// The Iterator for the object values in the compact document
|
|
||||||
pub struct CompactDocObjectIter<'a> {
|
|
||||||
container: &'a CompactDoc,
|
|
||||||
node_addresses_slice: &'a [u8],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> CompactDocObjectIter<'a> {
|
|
||||||
fn new(container: &'a CompactDoc, addr: Addr) -> io::Result<Self> {
|
|
||||||
// Objects are `&[ValueAddr]` serialized into bytes
|
|
||||||
let node_addresses_slice = container.extract_bytes(addr);
|
|
||||||
Ok(Self {
|
|
||||||
container,
|
|
||||||
node_addresses_slice,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Iterator for CompactDocObjectIter<'a> {
|
|
||||||
type Item = (&'a str, CompactDocValue<'a>);
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
if self.node_addresses_slice.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
let key_addr = ValueAddr::deserialize(&mut self.node_addresses_slice).ok()?;
|
|
||||||
let key = self.container.extract_str(key_addr.val_addr);
|
|
||||||
let value = ValueAddr::deserialize(&mut self.node_addresses_slice).ok()?;
|
|
||||||
let value = CompactDocValue {
|
|
||||||
container: self.container,
|
|
||||||
value_addr: value,
|
|
||||||
};
|
|
||||||
Some((key, value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
/// The Iterator for the array values in the compact document
|
|
||||||
pub struct CompactDocArrayIter<'a> {
|
|
||||||
container: &'a CompactDoc,
|
|
||||||
node_addresses_slice: &'a [u8],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> CompactDocArrayIter<'a> {
|
|
||||||
fn new(container: &'a CompactDoc, addr: Addr) -> io::Result<Self> {
|
|
||||||
// Arrays are &[ValueAddr] serialized into bytes
|
|
||||||
let node_addresses_slice = container.extract_bytes(addr);
|
|
||||||
Ok(Self {
|
|
||||||
container,
|
|
||||||
node_addresses_slice,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Iterator for CompactDocArrayIter<'a> {
|
|
||||||
type Item = CompactDocValue<'a>;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
if self.node_addresses_slice.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
let value = ValueAddr::deserialize(&mut self.node_addresses_slice).ok()?;
|
|
||||||
let value = CompactDocValue {
|
|
||||||
container: self.container,
|
|
||||||
value_addr: value,
|
|
||||||
};
|
|
||||||
Some(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Document for CompactDoc {
|
|
||||||
type Value<'a> = CompactDocValue<'a>;
|
|
||||||
type FieldsValuesIter<'a> = FieldValueIterRef<'a>;
|
|
||||||
|
|
||||||
fn iter_fields_and_values(&self) -> Self::FieldsValuesIter<'_> {
|
|
||||||
FieldValueIterRef {
|
|
||||||
slice: self.field_values.iter(),
|
|
||||||
container: self,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A helper wrapper for creating an iterator over the field values
|
|
||||||
pub struct FieldValueIterRef<'a> {
|
|
||||||
slice: std::slice::Iter<'a, FieldValueAddr>,
|
|
||||||
container: &'a CompactDoc,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Iterator for FieldValueIterRef<'a> {
|
|
||||||
type Item = (Field, CompactDocValue<'a>);
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
self.slice.next().map(|field_value| {
|
|
||||||
(
|
|
||||||
Field::from_field_id(field_value.field as u32),
|
|
||||||
CompactDocValue::<'a> {
|
|
||||||
container: self.container,
|
|
||||||
value_addr: field_value.value_addr,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Error that may happen when deserializing
|
/// Error that may happen when deserializing
|
||||||
@@ -725,40 +264,7 @@ mod tests {
|
|||||||
let text_field = schema_builder.add_text_field("title", TEXT);
|
let text_field = schema_builder.add_text_field("title", TEXT);
|
||||||
let mut doc = TantivyDocument::default();
|
let mut doc = TantivyDocument::default();
|
||||||
doc.add_text(text_field, "My title");
|
doc.add_text(text_field, "My title");
|
||||||
assert_eq!(doc.field_values().count(), 1);
|
assert_eq!(doc.field_values().len(), 1);
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let _val = doc.get_first(text_field).unwrap();
|
|
||||||
let _json = doc.to_named_doc(&schema);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_json_value() {
|
|
||||||
let json_str = r#"{
|
|
||||||
"toto": "titi",
|
|
||||||
"float": -0.2,
|
|
||||||
"bool": true,
|
|
||||||
"unsigned": 1,
|
|
||||||
"signed": -2,
|
|
||||||
"complexobject": {
|
|
||||||
"field.with.dot": 1
|
|
||||||
},
|
|
||||||
"date": "1985-04-12T23:20:50.52Z",
|
|
||||||
"my_arr": [2, 3, {"my_key": "two tokens"}, 4, {"nested_array": [2, 5, 6, [7, 8, {"a": [{"d": {"e":[99]}}, 9000]}, 9, 10], [5, 5]]}]
|
|
||||||
}"#;
|
|
||||||
let json_val: std::collections::BTreeMap<String, OwnedValue> =
|
|
||||||
serde_json::from_str(json_str).unwrap();
|
|
||||||
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let json_field = schema_builder.add_json_field("json", TEXT);
|
|
||||||
let mut doc = TantivyDocument::default();
|
|
||||||
doc.add_object(json_field, json_val);
|
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let json = doc.to_json(&schema);
|
|
||||||
let actual_json: serde_json::Value = serde_json::from_str(&json).unwrap();
|
|
||||||
let expected_json: serde_json::Value = serde_json::from_str(json_str).unwrap();
|
|
||||||
assert_eq!(actual_json["json"][0], expected_json);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Should this be re-added with the serialize method
|
// TODO: Should this be re-added with the serialize method
|
||||||
|
|||||||
@@ -5,39 +5,21 @@
|
|||||||
//! and don't care about some of the more specialised types or only want to customise
|
//! and don't care about some of the more specialised types or only want to customise
|
||||||
//! part of the document structure.
|
//! part of the document structure.
|
||||||
use std::collections::{btree_map, hash_map, BTreeMap, HashMap};
|
use std::collections::{btree_map, hash_map, BTreeMap, HashMap};
|
||||||
use std::iter::Empty;
|
|
||||||
use std::net::Ipv6Addr;
|
|
||||||
|
|
||||||
use common::DateTime;
|
|
||||||
use serde_json::Number;
|
use serde_json::Number;
|
||||||
use time::format_description::well_known::Rfc3339;
|
|
||||||
use time::OffsetDateTime;
|
|
||||||
|
|
||||||
use super::facet::Facet;
|
|
||||||
use super::ReferenceValueLeaf;
|
use super::ReferenceValueLeaf;
|
||||||
use crate::schema::document::{
|
use crate::schema::document::{
|
||||||
ArrayAccess, DeserializeError, Document, DocumentDeserialize, DocumentDeserializer,
|
ArrayAccess, DeserializeError, Document, DocumentDeserialize, DocumentDeserializer,
|
||||||
ObjectAccess, ReferenceValue, Value, ValueDeserialize, ValueDeserializer, ValueVisitor,
|
ObjectAccess, ReferenceValue, Value, ValueDeserialize, ValueDeserializer, ValueVisitor,
|
||||||
};
|
};
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::tokenizer::PreTokenizedString;
|
|
||||||
|
|
||||||
// Serde compatibility support.
|
// Serde compatibility support.
|
||||||
pub fn can_be_rfc3339_date_time(text: &str) -> bool {
|
|
||||||
if let Some(&first_byte) = text.as_bytes().first() {
|
|
||||||
if first_byte.is_ascii_digit() {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Value<'a> for &'a serde_json::Value {
|
impl<'a> Value<'a> for &'a serde_json::Value {
|
||||||
type ArrayIter = std::slice::Iter<'a, serde_json::Value>;
|
type ArrayIter = std::slice::Iter<'a, serde_json::Value>;
|
||||||
type ObjectIter = JsonObjectIter<'a>;
|
type ObjectIter = JsonObjectIter<'a>;
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
||||||
match self {
|
match self {
|
||||||
serde_json::Value::Null => ReferenceValueLeaf::Null.into(),
|
serde_json::Value::Null => ReferenceValueLeaf::Null.into(),
|
||||||
@@ -53,19 +35,7 @@ impl<'a> Value<'a> for &'a serde_json::Value {
|
|||||||
panic!("Unsupported serde_json number {number}");
|
panic!("Unsupported serde_json number {number}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
serde_json::Value::String(text) => {
|
serde_json::Value::String(val) => ReferenceValueLeaf::Str(val).into(),
|
||||||
if can_be_rfc3339_date_time(text) {
|
|
||||||
match OffsetDateTime::parse(text, &Rfc3339) {
|
|
||||||
Ok(dt) => {
|
|
||||||
let dt_utc = dt.to_offset(time::UtcOffset::UTC);
|
|
||||||
ReferenceValueLeaf::Date(DateTime::from_utc(dt_utc)).into()
|
|
||||||
}
|
|
||||||
Err(_) => ReferenceValueLeaf::Str(text).into(),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ReferenceValueLeaf::Str(text).into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
serde_json::Value::Array(elements) => ReferenceValue::Array(elements.iter()),
|
serde_json::Value::Array(elements) => ReferenceValue::Array(elements.iter()),
|
||||||
serde_json::Value::Object(object) => {
|
serde_json::Value::Object(object) => {
|
||||||
ReferenceValue::Object(JsonObjectIter(object.iter()))
|
ReferenceValue::Object(JsonObjectIter(object.iter()))
|
||||||
@@ -74,126 +44,6 @@ impl<'a> Value<'a> for &'a serde_json::Value {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Value<'a> for &'a String {
|
|
||||||
type ArrayIter = Empty<&'a String>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a String)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::Str(self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Value<'a> for &'a Facet {
|
|
||||||
type ArrayIter = Empty<&'a Facet>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a Facet)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::Facet(self.encoded_str()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Value<'a> for &'a u64 {
|
|
||||||
type ArrayIter = Empty<&'a u64>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a u64)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::U64(**self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Value<'a> for &'a i64 {
|
|
||||||
type ArrayIter = Empty<&'a i64>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a i64)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::I64(**self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> Value<'a> for &'a f64 {
|
|
||||||
type ArrayIter = Empty<&'a f64>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a f64)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::F64(**self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> Value<'a> for &'a bool {
|
|
||||||
type ArrayIter = Empty<&'a bool>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a bool)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::Bool(**self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> Value<'a> for &'a str {
|
|
||||||
type ArrayIter = Empty<&'a str>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a str)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::Str(self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> Value<'a> for &'a &'a str {
|
|
||||||
type ArrayIter = Empty<&'a &'a str>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a &'a str)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::Str(self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Value<'a> for &'a [u8] {
|
|
||||||
type ArrayIter = Empty<&'a [u8]>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a [u8])>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::Bytes(self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Value<'a> for &'a &'a [u8] {
|
|
||||||
type ArrayIter = Empty<&'a &'a [u8]>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a &'a [u8])>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::Bytes(self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Value<'a> for &'a Vec<u8> {
|
|
||||||
type ArrayIter = Empty<&'a Vec<u8>>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a Vec<u8>)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::Bytes(self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Value<'a> for &'a DateTime {
|
|
||||||
type ArrayIter = Empty<&'a DateTime>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a DateTime)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::Date(**self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> Value<'a> for &'a Ipv6Addr {
|
|
||||||
type ArrayIter = Empty<&'a Ipv6Addr>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a Ipv6Addr)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::IpAddr(**self))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> Value<'a> for &'a PreTokenizedString {
|
|
||||||
type ArrayIter = Empty<&'a PreTokenizedString>;
|
|
||||||
type ObjectIter = Empty<(&'a str, &'a PreTokenizedString)>;
|
|
||||||
#[inline]
|
|
||||||
fn as_value(&self) -> ReferenceValue<'a, Self> {
|
|
||||||
ReferenceValue::Leaf(ReferenceValueLeaf::PreTokStr(Box::new((*self).clone())))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ValueDeserialize for serde_json::Value {
|
impl ValueDeserialize for serde_json::Value {
|
||||||
fn deserialize<'de, D>(deserializer: D) -> Result<Self, DeserializeError>
|
fn deserialize<'de, D>(deserializer: D) -> Result<Self, DeserializeError>
|
||||||
where D: ValueDeserializer<'de> {
|
where D: ValueDeserializer<'de> {
|
||||||
|
|||||||
@@ -172,9 +172,7 @@ pub use self::de::{
|
|||||||
ArrayAccess, DeserializeError, DocumentDeserialize, DocumentDeserializer, ObjectAccess,
|
ArrayAccess, DeserializeError, DocumentDeserialize, DocumentDeserializer, ObjectAccess,
|
||||||
ValueDeserialize, ValueDeserializer, ValueType, ValueVisitor,
|
ValueDeserialize, ValueDeserializer, ValueType, ValueVisitor,
|
||||||
};
|
};
|
||||||
pub use self::default_document::{
|
pub use self::default_document::{DocParsingError, TantivyDocument};
|
||||||
CompactDocArrayIter, CompactDocObjectIter, CompactDocValue, DocParsingError, TantivyDocument,
|
|
||||||
};
|
|
||||||
pub use self::owned_value::OwnedValue;
|
pub use self::owned_value::OwnedValue;
|
||||||
pub(crate) use self::se::BinaryDocumentSerializer;
|
pub(crate) use self::se::BinaryDocumentSerializer;
|
||||||
pub use self::value::{ReferenceValue, ReferenceValueLeaf, Value};
|
pub use self::value::{ReferenceValue, ReferenceValueLeaf, Value};
|
||||||
@@ -235,7 +233,7 @@ pub trait Document: Send + Sync + 'static {
|
|||||||
let field_name = schema.get_field_name(field);
|
let field_name = schema.get_field_name(field);
|
||||||
let values: Vec<OwnedValue> = field_values
|
let values: Vec<OwnedValue> = field_values
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|val| OwnedValue::from(val.as_value()))
|
.map(|val| val.as_value().into())
|
||||||
.collect();
|
.collect();
|
||||||
field_map.insert(field_name.to_string(), values);
|
field_map.insert(field_name.to_string(), values);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ use serde::de::{MapAccess, SeqAccess};
|
|||||||
use time::format_description::well_known::Rfc3339;
|
use time::format_description::well_known::Rfc3339;
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
use super::existing_type_impls::can_be_rfc3339_date_time;
|
|
||||||
use super::ReferenceValueLeaf;
|
use super::ReferenceValueLeaf;
|
||||||
use crate::schema::document::{
|
use crate::schema::document::{
|
||||||
ArrayAccess, DeserializeError, ObjectAccess, ReferenceValue, Value, ValueDeserialize,
|
ArrayAccess, DeserializeError, ObjectAccess, ReferenceValue, Value, ValueDeserialize,
|
||||||
@@ -376,6 +375,16 @@ impl From<BTreeMap<String, OwnedValue>> for OwnedValue {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn can_be_rfc3339_date_time(text: &str) -> bool {
|
||||||
|
if let Some(&first_byte) = text.as_bytes().first() {
|
||||||
|
if first_byte.is_ascii_digit() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
impl From<serde_json::Value> for OwnedValue {
|
impl From<serde_json::Value> for OwnedValue {
|
||||||
fn from(value: serde_json::Value) -> Self {
|
fn from(value: serde_json::Value) -> Self {
|
||||||
match value {
|
match value {
|
||||||
@@ -463,7 +472,6 @@ mod tests {
|
|||||||
let mut doc = TantivyDocument::default();
|
let mut doc = TantivyDocument::default();
|
||||||
doc.add_bytes(bytes_field, "".as_bytes());
|
doc.add_bytes(bytes_field, "".as_bytes());
|
||||||
let json_string = doc.to_json(&schema);
|
let json_string = doc.to_json(&schema);
|
||||||
|
|
||||||
assert_eq!(json_string, r#"{"my_bytes":[""]}"#);
|
assert_eq!(json_string, r#"{"my_bytes":[""]}"#);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ where W: Write
|
|||||||
|
|
||||||
/// Attempts to serialize a given document and write the output
|
/// Attempts to serialize a given document and write the output
|
||||||
/// to the writer.
|
/// to the writer.
|
||||||
#[inline]
|
|
||||||
pub(crate) fn serialize_doc<D>(&mut self, doc: &D) -> io::Result<()>
|
pub(crate) fn serialize_doc<D>(&mut self, doc: &D) -> io::Result<()>
|
||||||
where D: Document {
|
where D: Document {
|
||||||
let stored_field_values = || {
|
let stored_field_values = || {
|
||||||
@@ -58,8 +57,9 @@ where W: Write
|
|||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::Other,
|
io::ErrorKind::Other,
|
||||||
format!(
|
format!(
|
||||||
"Unexpected number of entries written to serializer, expected \
|
"Unexpected number of entries written to serializer, expected {} entries, got \
|
||||||
{num_field_values} entries, got {actual_length} entries",
|
{} entries",
|
||||||
|
num_field_values, actual_length,
|
||||||
),
|
),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
@@ -679,7 +679,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn serialize_doc<D: Document>(doc: &D, schema: &Schema) -> Vec<u8> {
|
fn serialize_doc<D: Document>(doc: &D, schema: &Schema) -> Vec<u8> {
|
||||||
let mut writer = Vec::new();
|
let mut writer = Vec::new();
|
||||||
|
|
||||||
|
|||||||
@@ -159,69 +159,6 @@ pub enum ReferenceValueLeaf<'a> {
|
|||||||
PreTokStr(Box<PreTokenizedString>),
|
PreTokStr(Box<PreTokenizedString>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<u64> for ReferenceValueLeaf<'_> {
|
|
||||||
#[inline]
|
|
||||||
fn from(value: u64) -> Self {
|
|
||||||
ReferenceValueLeaf::U64(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<i64> for ReferenceValueLeaf<'_> {
|
|
||||||
#[inline]
|
|
||||||
fn from(value: i64) -> Self {
|
|
||||||
ReferenceValueLeaf::I64(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<f64> for ReferenceValueLeaf<'_> {
|
|
||||||
#[inline]
|
|
||||||
fn from(value: f64) -> Self {
|
|
||||||
ReferenceValueLeaf::F64(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<bool> for ReferenceValueLeaf<'_> {
|
|
||||||
#[inline]
|
|
||||||
fn from(value: bool) -> Self {
|
|
||||||
ReferenceValueLeaf::Bool(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> From<&'a str> for ReferenceValueLeaf<'a> {
|
|
||||||
#[inline]
|
|
||||||
fn from(value: &'a str) -> Self {
|
|
||||||
ReferenceValueLeaf::Str(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> From<&'a [u8]> for ReferenceValueLeaf<'a> {
|
|
||||||
#[inline]
|
|
||||||
fn from(value: &'a [u8]) -> Self {
|
|
||||||
ReferenceValueLeaf::Bytes(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<DateTime> for ReferenceValueLeaf<'_> {
|
|
||||||
#[inline]
|
|
||||||
fn from(value: DateTime) -> Self {
|
|
||||||
ReferenceValueLeaf::Date(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Ipv6Addr> for ReferenceValueLeaf<'_> {
|
|
||||||
#[inline]
|
|
||||||
fn from(value: Ipv6Addr) -> Self {
|
|
||||||
ReferenceValueLeaf::IpAddr(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<PreTokenizedString> for ReferenceValueLeaf<'_> {
|
|
||||||
#[inline]
|
|
||||||
fn from(val: PreTokenizedString) -> Self {
|
|
||||||
ReferenceValueLeaf::PreTokStr(Box::new(val))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T: Value<'a> + ?Sized> From<ReferenceValueLeaf<'a>> for ReferenceValue<'a, T> {
|
impl<'a, T: Value<'a> + ?Sized> From<ReferenceValueLeaf<'a>> for ReferenceValue<'a, T> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(value: ReferenceValueLeaf<'a>) -> Self {
|
fn from(value: ReferenceValueLeaf<'a>) -> Self {
|
||||||
|
|||||||
@@ -568,21 +568,21 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let doc = TantivyDocument::parse_json(&schema, r#"{"id": 100}"#).unwrap();
|
let doc = TantivyDocument::parse_json(&schema, r#"{"id": 100}"#).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
OwnedValue::Str("100".to_string()),
|
&OwnedValue::Str("100".to_string()),
|
||||||
doc.get_first(text_field).unwrap().into()
|
doc.get_first(text_field).unwrap()
|
||||||
);
|
);
|
||||||
|
|
||||||
let doc = TantivyDocument::parse_json(&schema, r#"{"id": true}"#).unwrap();
|
let doc = TantivyDocument::parse_json(&schema, r#"{"id": true}"#).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
OwnedValue::Str("true".to_string()),
|
&OwnedValue::Str("true".to_string()),
|
||||||
doc.get_first(text_field).unwrap().into()
|
doc.get_first(text_field).unwrap()
|
||||||
);
|
);
|
||||||
|
|
||||||
// Not sure if this null coercion is the best approach
|
// Not sure if this null coercion is the best approach
|
||||||
let doc = TantivyDocument::parse_json(&schema, r#"{"id": null}"#).unwrap();
|
let doc = TantivyDocument::parse_json(&schema, r#"{"id": null}"#).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
OwnedValue::Str("null".to_string()),
|
&OwnedValue::Str("null".to_string()),
|
||||||
doc.get_first(text_field).unwrap().into()
|
doc.get_first(text_field).unwrap()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -595,18 +595,9 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let doc_json = r#"{"i64": "100", "u64": "100", "f64": "100"}"#;
|
let doc_json = r#"{"i64": "100", "u64": "100", "f64": "100"}"#;
|
||||||
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
|
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(&OwnedValue::I64(100), doc.get_first(i64_field).unwrap());
|
||||||
OwnedValue::I64(100),
|
assert_eq!(&OwnedValue::U64(100), doc.get_first(u64_field).unwrap());
|
||||||
doc.get_first(i64_field).unwrap().into()
|
assert_eq!(&OwnedValue::F64(100.0), doc.get_first(f64_field).unwrap());
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
OwnedValue::U64(100),
|
|
||||||
doc.get_first(u64_field).unwrap().into()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
OwnedValue::F64(100.0),
|
|
||||||
doc.get_first(f64_field).unwrap().into()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -616,17 +607,11 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let doc_json = r#"{"bool": "true"}"#;
|
let doc_json = r#"{"bool": "true"}"#;
|
||||||
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
|
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(&OwnedValue::Bool(true), doc.get_first(bool_field).unwrap());
|
||||||
OwnedValue::Bool(true),
|
|
||||||
doc.get_first(bool_field).unwrap().into()
|
|
||||||
);
|
|
||||||
|
|
||||||
let doc_json = r#"{"bool": "false"}"#;
|
let doc_json = r#"{"bool": "false"}"#;
|
||||||
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
|
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(&OwnedValue::Bool(false), doc.get_first(bool_field).unwrap());
|
||||||
OwnedValue::Bool(false),
|
|
||||||
doc.get_first(bool_field).unwrap().into()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -659,7 +644,7 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let doc_json = r#"{"date": "2019-10-12T07:20:50.52+02:00"}"#;
|
let doc_json = r#"{"date": "2019-10-12T07:20:50.52+02:00"}"#;
|
||||||
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
|
let doc = TantivyDocument::parse_json(&schema, doc_json).unwrap();
|
||||||
let date = OwnedValue::from(doc.get_first(date_field).unwrap());
|
let date = doc.get_first(date_field).unwrap();
|
||||||
// Time zone is converted to UTC
|
// Time zone is converted to UTC
|
||||||
assert_eq!("Date(2019-10-12T05:20:50.52Z)", format!("{date:?}"));
|
assert_eq!("Date(2019-10-12T05:20:50.52Z)", format!("{date:?}"));
|
||||||
}
|
}
|
||||||
|
|||||||
46
src/schema/field_value.rs
Normal file
46
src/schema/field_value.rs
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
use crate::schema::{Field, OwnedValue};
|
||||||
|
|
||||||
|
/// `FieldValue` holds together a `Field` and its `Value`.
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
||||||
|
pub struct FieldValue {
|
||||||
|
pub field: Field,
|
||||||
|
pub value: OwnedValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FieldValue {
|
||||||
|
/// Constructor
|
||||||
|
pub fn new(field: Field, value: OwnedValue) -> FieldValue {
|
||||||
|
FieldValue { field, value }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Field accessor
|
||||||
|
pub fn field(&self) -> Field {
|
||||||
|
self.field
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Value accessor
|
||||||
|
pub fn value(&self) -> &OwnedValue {
|
||||||
|
&self.value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<FieldValue> for OwnedValue {
|
||||||
|
fn from(field_value: FieldValue) -> Self {
|
||||||
|
field_value.value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A helper wrapper for creating standard iterators
|
||||||
|
/// out of the fields iterator trait.
|
||||||
|
pub struct FieldValueIter<'a>(pub(crate) std::slice::Iter<'a, FieldValue>);
|
||||||
|
|
||||||
|
impl<'a> Iterator for FieldValueIter<'a> {
|
||||||
|
type Item = (Field, &'a OwnedValue);
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
self.0
|
||||||
|
.next()
|
||||||
|
.map(|field_value| (field_value.field, &field_value.value))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -114,6 +114,7 @@ pub(crate) mod term;
|
|||||||
|
|
||||||
mod field_entry;
|
mod field_entry;
|
||||||
mod field_type;
|
mod field_type;
|
||||||
|
mod field_value;
|
||||||
|
|
||||||
mod bytes_options;
|
mod bytes_options;
|
||||||
mod date_time_options;
|
mod date_time_options;
|
||||||
@@ -137,6 +138,7 @@ pub use self::facet_options::FacetOptions;
|
|||||||
pub use self::field::Field;
|
pub use self::field::Field;
|
||||||
pub use self::field_entry::FieldEntry;
|
pub use self::field_entry::FieldEntry;
|
||||||
pub use self::field_type::{FieldType, Type};
|
pub use self::field_type::{FieldType, Type};
|
||||||
|
pub use self::field_value::FieldValue;
|
||||||
pub use self::flags::{COERCE, FAST, INDEXED, STORED};
|
pub use self::flags::{COERCE, FAST, INDEXED, STORED};
|
||||||
pub use self::index_record_option::IndexRecordOption;
|
pub use self::index_record_option::IndexRecordOption;
|
||||||
pub use self::ip_options::{IntoIpv6Addr, IpAddrOptions};
|
pub use self::ip_options::{IntoIpv6Addr, IpAddrOptions};
|
||||||
|
|||||||
@@ -645,15 +645,15 @@ mod tests {
|
|||||||
let doc =
|
let doc =
|
||||||
TantivyDocument::convert_named_doc(&schema, NamedFieldDocument(named_doc_map)).unwrap();
|
TantivyDocument::convert_named_doc(&schema, NamedFieldDocument(named_doc_map)).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
doc.get_all(title).map(OwnedValue::from).collect::<Vec<_>>(),
|
doc.get_all(title).collect::<Vec<_>>(),
|
||||||
vec![
|
vec![
|
||||||
OwnedValue::from("title1".to_string()),
|
&OwnedValue::from("title1".to_string()),
|
||||||
OwnedValue::from("title2".to_string())
|
&OwnedValue::from("title2".to_string())
|
||||||
]
|
]
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
doc.get_all(val).map(OwnedValue::from).collect::<Vec<_>>(),
|
doc.get_all(val).collect::<Vec<_>>(),
|
||||||
vec![OwnedValue::from(14u64), OwnedValue::from(-1i64)]
|
vec![&OwnedValue::from(14u64), &OwnedValue::from(-1i64)]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -682,7 +682,7 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
{
|
{
|
||||||
let doc = TantivyDocument::parse_json(&schema, "{}").unwrap();
|
let doc = TantivyDocument::parse_json(&schema, "{}").unwrap();
|
||||||
assert!(doc.field_values().next().is_none());
|
assert!(doc.field_values().is_empty());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let doc = TantivyDocument::parse_json(
|
let doc = TantivyDocument::parse_json(
|
||||||
|
|||||||
@@ -59,8 +59,9 @@ pub mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::directory::{Directory, RamDirectory, WritePtr};
|
use crate::directory::{Directory, RamDirectory, WritePtr};
|
||||||
use crate::fastfield::AliveBitSet;
|
use crate::fastfield::AliveBitSet;
|
||||||
|
use crate::schema::document::Value;
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
self, Schema, TantivyDocument, TextFieldIndexing, TextOptions, Value, STORED, TEXT,
|
self, Schema, TantivyDocument, TextFieldIndexing, TextOptions, STORED, TEXT,
|
||||||
};
|
};
|
||||||
use crate::{Index, IndexWriter, Term};
|
use crate::{Index, IndexWriter, Term};
|
||||||
|
|
||||||
@@ -91,8 +92,8 @@ pub mod tests {
|
|||||||
StoreWriter::new(writer, compressor, blocksize, separate_thread).unwrap();
|
StoreWriter::new(writer, compressor, blocksize, separate_thread).unwrap();
|
||||||
for i in 0..num_docs {
|
for i in 0..num_docs {
|
||||||
let mut doc = TantivyDocument::default();
|
let mut doc = TantivyDocument::default();
|
||||||
doc.add_text(field_body, LOREM);
|
doc.add_field_value(field_body, LOREM.to_string());
|
||||||
doc.add_text(field_title, format!("Doc {i}"));
|
doc.add_field_value(field_title, format!("Doc {i}"));
|
||||||
store_writer.store(&doc, &schema).unwrap();
|
store_writer.store(&doc, &schema).unwrap();
|
||||||
}
|
}
|
||||||
store_writer.close().unwrap();
|
store_writer.close().unwrap();
|
||||||
@@ -118,11 +119,10 @@ pub mod tests {
|
|||||||
let store = StoreReader::open(store_file, 10)?;
|
let store = StoreReader::open(store_file, 10)?;
|
||||||
for i in 0..NUM_DOCS as u32 {
|
for i in 0..NUM_DOCS as u32 {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
store
|
*store
|
||||||
.get::<TantivyDocument>(i)?
|
.get::<TantivyDocument>(i)?
|
||||||
.get_first(field_title)
|
.get_first(field_title)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_value()
|
|
||||||
.as_str()
|
.as_str()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
format!("Doc {i}")
|
format!("Doc {i}")
|
||||||
@@ -131,13 +131,7 @@ pub mod tests {
|
|||||||
|
|
||||||
for doc in store.iter::<TantivyDocument>(Some(&alive_bitset)) {
|
for doc in store.iter::<TantivyDocument>(Some(&alive_bitset)) {
|
||||||
let doc = doc?;
|
let doc = doc?;
|
||||||
let title_content = doc
|
let title_content = doc.get_first(field_title).unwrap().as_str().unwrap();
|
||||||
.get_first(field_title)
|
|
||||||
.unwrap()
|
|
||||||
.as_value()
|
|
||||||
.as_str()
|
|
||||||
.unwrap()
|
|
||||||
.to_string();
|
|
||||||
if !title_content.starts_with("Doc ") {
|
if !title_content.starts_with("Doc ") {
|
||||||
panic!("unexpected title_content {title_content}");
|
panic!("unexpected title_content {title_content}");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -403,7 +403,8 @@ mod tests {
|
|||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::directory::RamDirectory;
|
use crate::directory::RamDirectory;
|
||||||
use crate::schema::{Field, TantivyDocument, Value};
|
use crate::schema::document::Value;
|
||||||
|
use crate::schema::{Field, TantivyDocument};
|
||||||
use crate::store::tests::write_lorem_ipsum_store;
|
use crate::store::tests::write_lorem_ipsum_store;
|
||||||
use crate::store::Compressor;
|
use crate::store::Compressor;
|
||||||
use crate::Directory;
|
use crate::Directory;
|
||||||
@@ -411,7 +412,7 @@ mod tests {
|
|||||||
const BLOCK_SIZE: usize = 16_384;
|
const BLOCK_SIZE: usize = 16_384;
|
||||||
|
|
||||||
fn get_text_field<'a>(doc: &'a TantivyDocument, field: &'a Field) -> Option<&'a str> {
|
fn get_text_field<'a>(doc: &'a TantivyDocument, field: &'a Field) -> Option<&'a str> {
|
||||||
doc.get_first(*field).and_then(|f| f.as_value().as_str())
|
doc.get_first(*field).and_then(|f| f.as_str())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ fn open_fst_index(fst_file: FileSlice) -> io::Result<tantivy_fst::Map<OwnedBytes
|
|||||||
let fst = Fst::new(bytes).map_err(|err| {
|
let fst = Fst::new(bytes).map_err(|err| {
|
||||||
io::Error::new(
|
io::Error::new(
|
||||||
io::ErrorKind::InvalidData,
|
io::ErrorKind::InvalidData,
|
||||||
format!("Fst data is corrupted: {err:?}"),
|
format!("Fst data is corrupted: {:?}", err),
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
Ok(tantivy_fst::Map::from(fst))
|
Ok(tantivy_fst::Map::from(fst))
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ fn test_term_dictionary_simple() -> crate::Result<()> {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_term_dictionary_stream() -> crate::Result<()> {
|
fn test_term_dictionary_stream() -> crate::Result<()> {
|
||||||
let ids: Vec<_> = (0u32..10_000u32)
|
let ids: Vec<_> = (0u32..10_000u32)
|
||||||
.map(|i| (format!("doc{i:0>6}"), i))
|
.map(|i| (format!("doc{:0>6}", i), i))
|
||||||
.collect();
|
.collect();
|
||||||
let buffer: Vec<u8> = {
|
let buffer: Vec<u8> = {
|
||||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
||||||
@@ -156,7 +156,7 @@ fn test_stream_high_range_prefix_suffix() -> crate::Result<()> {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_stream_range() -> crate::Result<()> {
|
fn test_stream_range() -> crate::Result<()> {
|
||||||
let ids: Vec<_> = (0u32..10_000u32)
|
let ids: Vec<_> = (0u32..10_000u32)
|
||||||
.map(|i| (format!("doc{i:0>6}"), i))
|
.map(|i| (format!("doc{:0>6}", i), i))
|
||||||
.collect();
|
.collect();
|
||||||
let buffer: Vec<u8> = {
|
let buffer: Vec<u8> = {
|
||||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let mut add_token = |token: &Token| {
|
let mut add_token = |token: &Token| {
|
||||||
let facet = Facet::from_encoded(token.text.as_bytes().to_owned()).unwrap();
|
let facet = Facet::from_encoded(token.text.as_bytes().to_owned()).unwrap();
|
||||||
tokens.push(format!("{facet}"));
|
tokens.push(format!("{}", facet));
|
||||||
};
|
};
|
||||||
FacetTokenizer::default()
|
FacetTokenizer::default()
|
||||||
.token_stream(facet.encoded_str())
|
.token_stream(facet.encoded_str())
|
||||||
@@ -116,7 +116,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let mut add_token = |token: &Token| {
|
let mut add_token = |token: &Token| {
|
||||||
let facet = Facet::from_encoded(token.text.as_bytes().to_owned()).unwrap(); // ok test
|
let facet = Facet::from_encoded(token.text.as_bytes().to_owned()).unwrap(); // ok test
|
||||||
tokens.push(format!("{facet}"));
|
tokens.push(format!("{}", facet));
|
||||||
};
|
};
|
||||||
FacetTokenizer::default()
|
FacetTokenizer::default()
|
||||||
.token_stream(facet.encoded_str()) // ok test
|
.token_stream(facet.encoded_str()) // ok test
|
||||||
|
|||||||
Reference in New Issue
Block a user