mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-30 14:02:55 +00:00
Compare commits
3 Commits
raphael_op
...
0.21.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
722b6c5205 | ||
|
|
0f2211ca44 | ||
|
|
21aabf961c |
8
.github/workflows/coverage.yml
vendored
8
.github/workflows/coverage.yml
vendored
@@ -3,6 +3,8 @@ name: Coverage
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
# Ensures that we cancel running jobs for the same PR / same workflow.
|
||||
concurrency:
|
||||
@@ -13,13 +15,13 @@ jobs:
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
run: rustup toolchain install nightly-2023-09-10 --profile minimal --component llvm-tools-preview
|
||||
run: rustup toolchain install nightly --profile minimal --component llvm-tools-preview
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Generate code coverage
|
||||
run: cargo +nightly-2023-09-10 llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
|
||||
run: cargo +nightly llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
continue-on-error: true
|
||||
|
||||
2
.github/workflows/long_running.yml
vendored
2
.github/workflows/long_running.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
|
||||
11
.github/workflows/test.yml
vendored
11
.github/workflows/test.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install nightly
|
||||
uses: actions-rs/toolchain@v1
|
||||
@@ -39,13 +39,6 @@ jobs:
|
||||
|
||||
- name: Check Formatting
|
||||
run: cargo +nightly fmt --all -- --check
|
||||
|
||||
- name: Check Stable Compilation
|
||||
run: cargo build --all-features
|
||||
|
||||
|
||||
- name: Check Bench Compilation
|
||||
run: cargo +nightly bench --no-run --profile=dev --all-features
|
||||
|
||||
- uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
@@ -67,7 +60,7 @@ jobs:
|
||||
name: test-${{ matrix.features.label}}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
|
||||
@@ -1,9 +1,3 @@
|
||||
Tantivy 0.21.1
|
||||
================================
|
||||
#### Bugfixes
|
||||
- Range queries on fast fields with less values on that field than documents had an invalid end condition, leading to missing results. [#2226](https://github.com/quickwit-oss/tantivy/issues/2226)(@appaquet @PSeitz)
|
||||
- Increase the minimum memory budget from 3MB to 15MB to avoid single doc segments (API fix). [#2176](https://github.com/quickwit-oss/tantivy/issues/2176)(@PSeitz)
|
||||
|
||||
Tantivy 0.21
|
||||
================================
|
||||
#### Bugfixes
|
||||
|
||||
31
Cargo.toml
31
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.22.0-dev"
|
||||
version = "0.21.1"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -22,34 +22,36 @@ crc32fast = "1.3.2"
|
||||
once_cell = "1.10.0"
|
||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||
aho-corasick = "1.0"
|
||||
tantivy-fst = "0.5"
|
||||
memmap2 = { version = "0.9.0", optional = true }
|
||||
tantivy-fst = "0.4.0"
|
||||
memmap2 = { version = "0.7.1", optional = true }
|
||||
lz4_flex = { version = "0.11", default-features = false, optional = true }
|
||||
zstd = { version = "0.13", optional = true, default-features = false }
|
||||
zstd = { version = "0.12", optional = true, default-features = false }
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
log = "0.4.16"
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = "1.0.79"
|
||||
num_cpus = "1.13.1"
|
||||
fs4 = { version = "0.8.0", optional = true }
|
||||
fs4 = { version = "0.6.3", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
rust-stemmers = "1.2.0"
|
||||
downcast-rs = "1.2.0"
|
||||
bitpacking = { version = "0.9.2", default-features = false, features = ["bitpacker4x"] }
|
||||
census = "0.4.2"
|
||||
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
||||
census = "0.4.0"
|
||||
rustc-hash = "1.1.0"
|
||||
thiserror = "1.0.30"
|
||||
htmlescape = "0.3.1"
|
||||
fail = { version = "0.5.0", optional = true }
|
||||
murmurhash32 = "0.3.0"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.12.0"
|
||||
lru = "0.11.0"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.12.0"
|
||||
itertools = "0.11.0"
|
||||
measure_time = "0.8.2"
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
|
||||
columnar = { version= "0.2", path="./columnar", package ="tantivy-columnar" }
|
||||
@@ -61,7 +63,6 @@ common = { version= "0.6", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version= "0.2", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
||||
sketches-ddsketch = { version = "0.2.1", features = ["use_serde"] }
|
||||
futures-util = { version = "0.3.28", optional = true }
|
||||
fnv = "1.0.7"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
@@ -73,14 +74,15 @@ matches = "0.1.9"
|
||||
pretty_assertions = "1.2.1"
|
||||
proptest = "1.0.0"
|
||||
test-log = "0.2.10"
|
||||
env_logger = "0.10.0"
|
||||
futures = "0.3.21"
|
||||
paste = "1.0.11"
|
||||
more-asserts = "0.3.1"
|
||||
rand_distr = "0.4.3"
|
||||
time = { version = "0.3.10", features = ["serde-well-known", "macros"] }
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
criterion = { version = "0.5", default-features = false }
|
||||
criterion = "0.5"
|
||||
pprof = { git = "https://github.com/PSeitz/pprof-rs/", rev = "53af24b", features = ["flamegraph", "criterion"] } # temp fork that works with criterion 0.5
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.5.0"
|
||||
@@ -113,11 +115,6 @@ unstable = [] # useful for benches.
|
||||
|
||||
quickwit = ["sstable", "futures-util"]
|
||||
|
||||
# Compares only the hash of a string when indexing data.
|
||||
# Increases indexing speed, but may lead to extremely rare missing terms, when there's a hash collision.
|
||||
# Uses 64bit ahash.
|
||||
compare_hash_only = ["stacker/compare_hash_only"]
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar", "bitpacker", "common", "ownedbytes", "stacker", "sstable", "tokenizer-api", "columnar"]
|
||||
|
||||
|
||||
33
README.md
33
README.md
@@ -5,18 +5,19 @@
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://crates.io/crates/tantivy)
|
||||
|
||||
<img src="https://tantivy-search.github.io/logo/tantivy-logo.png" alt="Tantivy, the fastest full-text search engine library written in Rust" height="250">
|
||||

|
||||
|
||||
## Fast full-text search engine library written in Rust
|
||||
**Tantivy** is a **full-text search engine library** written in Rust.
|
||||
|
||||
**If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our distributed search engine built on top of Tantivy.**
|
||||
|
||||
Tantivy is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used to build such a search engine.
|
||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used
|
||||
to build such a search engine.
|
||||
|
||||
Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
|
||||
## Benchmark
|
||||
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
|
||||
|
||||
# Benchmark
|
||||
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) breakdowns
|
||||
performance for different types of queries/collections.
|
||||
@@ -27,7 +28,7 @@ Your mileage WILL vary depending on the nature of queries and their load.
|
||||
|
||||
Details about the benchmark can be found at this [repository](https://github.com/quickwit-oss/search-benchmark-game).
|
||||
|
||||
## Features
|
||||
# Features
|
||||
|
||||
- Full-text search
|
||||
- Configurable tokenizer (stemming available for 17 Latin languages) with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||
@@ -53,11 +54,11 @@ Details about the benchmark can be found at this [repository](https://github.com
|
||||
- Searcher Warmer API
|
||||
- Cheesy logo with a horse
|
||||
|
||||
### Non-features
|
||||
## Non-features
|
||||
|
||||
Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/).
|
||||
|
||||
## Getting started
|
||||
# Getting started
|
||||
|
||||
Tantivy works on stable Rust and supports Linux, macOS, and Windows.
|
||||
|
||||
@@ -67,7 +68,7 @@ index documents, and search via the CLI or a small server with a REST API.
|
||||
It walks you through getting a Wikipedia search engine up and running in a few minutes.
|
||||
- [Reference doc for the last released version](https://docs.rs/tantivy/)
|
||||
|
||||
## How can I support this project?
|
||||
# How can I support this project?
|
||||
|
||||
There are many ways to support this project.
|
||||
|
||||
@@ -78,16 +79,16 @@ There are many ways to support this project.
|
||||
- Contribute code (you can join [our Discord server](https://discord.gg/MT27AG5EVE))
|
||||
- Talk about Tantivy around you
|
||||
|
||||
## Contributing code
|
||||
# Contributing code
|
||||
|
||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||
Feel free to update CHANGELOG.md with your contribution.
|
||||
|
||||
### Tokenizer
|
||||
## Tokenizer
|
||||
|
||||
When implementing a tokenizer for tantivy depend on the `tantivy-tokenizer-api` crate.
|
||||
|
||||
### Clone and build locally
|
||||
## Clone and build locally
|
||||
|
||||
Tantivy compiles on stable Rust.
|
||||
To check out and run tests, you can simply run:
|
||||
@@ -98,7 +99,7 @@ cd tantivy
|
||||
cargo test
|
||||
```
|
||||
|
||||
## Companies Using Tantivy
|
||||
# Companies Using Tantivy
|
||||
|
||||
<p align="left">
|
||||
<img align="center" src="doc/assets/images/etsy.png" alt="Etsy" height="25" width="auto" />
|
||||
@@ -110,7 +111,7 @@ cargo test
|
||||
<img align="center" src="doc/assets/images/element-dark-theme.png#gh-dark-mode-only" alt="Element.io" height="25" width="auto" />
|
||||
</p>
|
||||
|
||||
## FAQ
|
||||
# FAQ
|
||||
|
||||
### Can I use Tantivy in other languages?
|
||||
|
||||
|
||||
@@ -1,99 +1,14 @@
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, Bencher, Criterion, Throughput};
|
||||
use tantivy::schema::{TantivyDocument, FAST, INDEXED, STORED, STRING, TEXT};
|
||||
use tantivy::{tokenizer, Index, IndexWriter};
|
||||
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
|
||||
use pprof::criterion::{Output, PProfProfiler};
|
||||
use tantivy::schema::{FAST, INDEXED, STORED, STRING, TEXT};
|
||||
use tantivy::Index;
|
||||
|
||||
const HDFS_LOGS: &str = include_str!("hdfs.json");
|
||||
const GH_LOGS: &str = include_str!("gh.json");
|
||||
const WIKI: &str = include_str!("wiki.json");
|
||||
|
||||
fn benchmark(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
parse_json: bool,
|
||||
is_dynamic: bool,
|
||||
) {
|
||||
if is_dynamic {
|
||||
benchmark_dynamic_json(b, input, schema, commit, parse_json)
|
||||
} else {
|
||||
_benchmark(b, input, schema, commit, parse_json, |schema, doc_json| {
|
||||
TantivyDocument::parse_json(&schema, doc_json).unwrap()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn get_index(schema: tantivy::schema::Schema) -> Index {
|
||||
let mut index = Index::create_in_ram(schema.clone());
|
||||
let ff_tokenizer_manager = tokenizer::TokenizerManager::default();
|
||||
ff_tokenizer_manager.register(
|
||||
"raw",
|
||||
tokenizer::TextAnalyzer::builder(tokenizer::RawTokenizer::default())
|
||||
.filter(tokenizer::RemoveLongFilter::limit(255))
|
||||
.build(),
|
||||
);
|
||||
index.set_fast_field_tokenizers(ff_tokenizer_manager.clone());
|
||||
index
|
||||
}
|
||||
|
||||
fn _benchmark(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
include_json_parsing: bool,
|
||||
create_doc: impl Fn(&tantivy::schema::Schema, &str) -> TantivyDocument,
|
||||
) {
|
||||
if include_json_parsing {
|
||||
let lines: Vec<&str> = input.trim().split('\n').collect();
|
||||
b.iter(|| {
|
||||
let index = get_index(schema.clone());
|
||||
let mut index_writer: IndexWriter =
|
||||
index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = create_doc(&schema, doc_json);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
if commit {
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
})
|
||||
} else {
|
||||
let docs: Vec<_> = input
|
||||
.trim()
|
||||
.split('\n')
|
||||
.map(|doc_json| create_doc(&schema, doc_json))
|
||||
.collect();
|
||||
b.iter_batched(
|
||||
|| docs.clone(),
|
||||
|docs| {
|
||||
let index = get_index(schema.clone());
|
||||
let mut index_writer: IndexWriter =
|
||||
index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
if commit {
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
}
|
||||
}
|
||||
fn benchmark_dynamic_json(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
parse_json: bool,
|
||||
) {
|
||||
let json_field = schema.get_field("json").unwrap();
|
||||
_benchmark(b, input, schema, commit, parse_json, |_schema, doc_json| {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
tantivy::doc!(json_field=>json_val)
|
||||
})
|
||||
fn get_lines(input: &str) -> Vec<&str> {
|
||||
input.trim().split('\n').collect()
|
||||
}
|
||||
|
||||
pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
@@ -104,14 +19,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
schema_builder.add_text_field("severity", STRING);
|
||||
schema_builder.build()
|
||||
};
|
||||
let schema_only_fast = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", FAST);
|
||||
schema_builder.add_text_field("body", FAST);
|
||||
schema_builder.add_text_field("severity", FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
let _schema_with_store = {
|
||||
let schema_with_store = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", INDEXED | STORED);
|
||||
schema_builder.add_text_field("body", TEXT | STORED);
|
||||
@@ -120,39 +28,74 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
};
|
||||
let dynamic_schema = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_json_field("json", TEXT | FAST);
|
||||
schema_builder.add_json_field("json", TEXT);
|
||||
schema_builder.build()
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group("index-hdfs");
|
||||
group.throughput(Throughput::Bytes(HDFS_LOGS.len() as u64));
|
||||
group.sample_size(20);
|
||||
|
||||
let benches = [
|
||||
("only-indexed-".to_string(), schema, false),
|
||||
//("stored-".to_string(), _schema_with_store, false),
|
||||
("only-fast-".to_string(), schema_only_fast, false),
|
||||
("dynamic-".to_string(), dynamic_schema, true),
|
||||
];
|
||||
|
||||
for (prefix, schema, is_dynamic) in benches {
|
||||
for commit in [false, true] {
|
||||
let suffix = if commit { "with-commit" } else { "no-commit" };
|
||||
for parse_json in [false] {
|
||||
// for parse_json in [false, true] {
|
||||
let suffix = if parse_json {
|
||||
format!("{}-with-json-parsing", suffix)
|
||||
} else {
|
||||
format!("{}", suffix)
|
||||
};
|
||||
|
||||
let bench_name = format!("{}{}", prefix, suffix);
|
||||
group.bench_function(bench_name, |b| {
|
||||
benchmark(b, HDFS_LOGS, schema.clone(), commit, parse_json, is_dynamic)
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-with-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit-with-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
pub fn gh_index_benchmark(c: &mut Criterion) {
|
||||
@@ -161,24 +104,38 @@ pub fn gh_index_benchmark(c: &mut Criterion) {
|
||||
schema_builder.add_json_field("json", TEXT | FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
let dynamic_schema_fast = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_json_field("json", FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group("index-gh");
|
||||
group.throughput(Throughput::Bytes(GH_LOGS.len() as u64));
|
||||
|
||||
group.bench_function("index-gh-no-commit", |b| {
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema.clone(), false, false)
|
||||
let lines = get_lines(GH_LOGS);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-gh-fast", |b| {
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema_fast.clone(), false, false)
|
||||
});
|
||||
|
||||
group.bench_function("index-gh-fast-with-commit", |b| {
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema_fast.clone(), true, false)
|
||||
group.bench_function("index-gh-with-commit", |b| {
|
||||
let lines = get_lines(GH_LOGS);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
@@ -193,10 +150,33 @@ pub fn wiki_index_benchmark(c: &mut Criterion) {
|
||||
group.throughput(Throughput::Bytes(WIKI.len() as u64));
|
||||
|
||||
group.bench_function("index-wiki-no-commit", |b| {
|
||||
benchmark_dynamic_json(b, WIKI, dynamic_schema.clone(), false, false)
|
||||
let lines = get_lines(WIKI);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-wiki-with-commit", |b| {
|
||||
benchmark_dynamic_json(b, WIKI, dynamic_schema.clone(), true, false)
|
||||
let lines = get_lines(WIKI);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
@@ -207,12 +187,12 @@ criterion_group! {
|
||||
}
|
||||
criterion_group! {
|
||||
name = gh_benches;
|
||||
config = Criterion::default();
|
||||
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
||||
targets = gh_index_benchmark
|
||||
}
|
||||
criterion_group! {
|
||||
name = wiki_benches;
|
||||
config = Criterion::default();
|
||||
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
||||
targets = wiki_index_benchmark
|
||||
}
|
||||
criterion_main!(benches, gh_benches, wiki_benches);
|
||||
|
||||
@@ -15,7 +15,7 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
bitpacking = { version = "0.9.2", default-features = false, features = ["bitpacker1x"] }
|
||||
bitpacking = {version="0.8", default-features=false, features = ["bitpacker1x"]}
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
|
||||
@@ -125,8 +125,6 @@ impl BitUnpacker {
|
||||
|
||||
// Decodes the range of bitpacked `u32` values with idx
|
||||
// in [start_idx, start_idx + output.len()).
|
||||
// It is guaranteed to completely fill `output` and not read from it, so passing a vector with
|
||||
// un-initialized values is safe.
|
||||
//
|
||||
// #Panics
|
||||
//
|
||||
@@ -239,19 +237,7 @@ impl BitUnpacker {
|
||||
data: &[u8],
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
// We use the code below instead of positions.resize(id_range.len(), 0u32) for performance
|
||||
// reasons: on some queries, the CPU cost of memsetting the array and of using a bigger
|
||||
// vector than necessary is noticeable (~5%).
|
||||
// In particular, searches are a few percent faster when using reserve_exact() as below
|
||||
// instead of reserve().
|
||||
// The un-initialized values are safe as get_batch_u32s() completely fills `positions`
|
||||
// and does not read from it.
|
||||
positions.clear();
|
||||
positions.reserve_exact(id_range.len());
|
||||
#[allow(clippy::uninit_vec)]
|
||||
unsafe {
|
||||
positions.set_len(id_range.len());
|
||||
}
|
||||
positions.resize(id_range.len(), 0u32);
|
||||
self.get_batch_u32s(id_range.start, data, positions);
|
||||
crate::filter_vec::filter_vec_in_place(value_range, id_range.start, positions)
|
||||
}
|
||||
@@ -381,7 +367,7 @@ mod test {
|
||||
let mut output: Vec<u32> = Vec::new();
|
||||
for len in [0, 1, 2, 32, 33, 34, 64] {
|
||||
for start_idx in 0u32..32u32 {
|
||||
output.resize(len, 0);
|
||||
output.resize(len as usize, 0);
|
||||
bitunpacker.get_batch_u32s(start_idx, &buffer, &mut output);
|
||||
for i in 0..len {
|
||||
let expected = (start_idx + i as u32) & mask;
|
||||
|
||||
@@ -9,7 +9,8 @@ description = "column oriented storage for tantivy"
|
||||
categories = ["database-implementations", "data-structures", "compression"]
|
||||
|
||||
[dependencies]
|
||||
itertools = "0.12.0"
|
||||
itertools = "0.11.0"
|
||||
fnv = "1.0.7"
|
||||
fastdivide = "0.4.0"
|
||||
|
||||
stacker = { version= "0.2", path = "../stacker", package="tantivy-stacker"}
|
||||
|
||||
@@ -8,6 +8,7 @@ license = "MIT"
|
||||
columnar = {path="../", package="tantivy-columnar"}
|
||||
serde_json = "1"
|
||||
serde_json_borrow = {git="https://github.com/PSeitz/serde_json_borrow/"}
|
||||
serde = "1"
|
||||
|
||||
[workspace]
|
||||
members = []
|
||||
|
||||
@@ -111,7 +111,10 @@ fn stack_multivalued_indexes<'a>(
|
||||
let mut last_row_id = 0;
|
||||
let mut current_it = multivalued_indexes.next();
|
||||
Box::new(std::iter::from_fn(move || loop {
|
||||
if let Some(row_id) = current_it.as_mut()?.next() {
|
||||
let Some(multivalued_index) = current_it.as_mut() else {
|
||||
return None;
|
||||
};
|
||||
if let Some(row_id) = multivalued_index.next() {
|
||||
last_row_id = offset + row_id;
|
||||
return Some(last_row_id);
|
||||
}
|
||||
|
||||
@@ -1,8 +1,3 @@
|
||||
//! # `column_index`
|
||||
//!
|
||||
//! `column_index` provides rank and select operations to associate positions when not all
|
||||
//! documents have exactly one element.
|
||||
|
||||
mod merge;
|
||||
mod multivalued_index;
|
||||
mod optional_index;
|
||||
@@ -46,10 +41,10 @@ impl ColumnIndex {
|
||||
pub fn is_multivalue(&self) -> bool {
|
||||
matches!(self, ColumnIndex::Multivalued(_))
|
||||
}
|
||||
/// Returns the cardinality of the column index.
|
||||
///
|
||||
/// By convention, if the column contains no docs, we consider that it is
|
||||
/// full.
|
||||
// Returns the cardinality of the column index.
|
||||
//
|
||||
// By convention, if the column contains no docs, we consider that it is
|
||||
// full.
|
||||
#[inline]
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
@@ -126,18 +121,18 @@ impl ColumnIndex {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn docid_range_to_rowids(&self, doc_id_range: Range<DocId>) -> Range<RowId> {
|
||||
pub fn docid_range_to_rowids(&self, doc_id: Range<DocId>) -> Range<RowId> {
|
||||
match self {
|
||||
ColumnIndex::Empty { .. } => 0..0,
|
||||
ColumnIndex::Full => doc_id_range,
|
||||
ColumnIndex::Full => doc_id,
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
let row_start = optional_index.rank(doc_id_range.start);
|
||||
let row_end = optional_index.rank(doc_id_range.end);
|
||||
let row_start = optional_index.rank(doc_id.start);
|
||||
let row_end = optional_index.rank(doc_id.end);
|
||||
row_start..row_end
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
let end_docid = doc_id_range.end.min(multivalued_index.num_docs() - 1) + 1;
|
||||
let start_docid = doc_id_range.start.min(end_docid);
|
||||
let end_docid = doc_id.end.min(multivalued_index.num_docs() - 1) + 1;
|
||||
let start_docid = doc_id.start.min(end_docid);
|
||||
|
||||
let row_start = multivalued_index.start_index_column.get_val(start_docid);
|
||||
let row_end = multivalued_index.start_index_column.get_val(end_docid);
|
||||
|
||||
@@ -21,6 +21,8 @@ const DENSE_BLOCK_THRESHOLD: u32 =
|
||||
|
||||
const ELEMENTS_PER_BLOCK: u32 = u16::MAX as u32 + 1;
|
||||
|
||||
const BLOCK_SIZE: RowId = 1 << 16;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct BlockMeta {
|
||||
non_null_rows_before_block: u32,
|
||||
@@ -107,8 +109,8 @@ struct RowAddr {
|
||||
#[inline(always)]
|
||||
fn row_addr_from_row_id(row_id: RowId) -> RowAddr {
|
||||
RowAddr {
|
||||
block_id: (row_id / ELEMENTS_PER_BLOCK) as u16,
|
||||
in_block_row_id: (row_id % ELEMENTS_PER_BLOCK) as u16,
|
||||
block_id: (row_id / BLOCK_SIZE) as u16,
|
||||
in_block_row_id: (row_id % BLOCK_SIZE) as u16,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,13 +185,8 @@ impl Set<RowId> for OptionalIndex {
|
||||
}
|
||||
}
|
||||
|
||||
/// Any value doc_id is allowed.
|
||||
/// In particular, doc_id = num_rows.
|
||||
#[inline]
|
||||
fn rank(&self, doc_id: DocId) -> RowId {
|
||||
if doc_id >= self.num_docs() {
|
||||
return self.num_non_nulls();
|
||||
}
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
@@ -203,15 +200,13 @@ impl Set<RowId> for OptionalIndex {
|
||||
block_meta.non_null_rows_before_block + block_offset_row_id
|
||||
}
|
||||
|
||||
/// Any value doc_id is allowed.
|
||||
/// In particular, doc_id = num_rows.
|
||||
#[inline]
|
||||
fn rank_if_exists(&self, doc_id: DocId) -> Option<RowId> {
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(doc_id);
|
||||
let block_meta = *self.block_metas.get(block_id as usize)?;
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
let block = self.block(block_meta);
|
||||
let block_offset_row_id = match block {
|
||||
Block::Dense(dense_block) => dense_block.rank_if_exists(in_block_row_id),
|
||||
@@ -496,7 +491,7 @@ fn deserialize_optional_index_block_metadatas(
|
||||
non_null_rows_before_block += num_non_null_rows;
|
||||
}
|
||||
block_metas.resize(
|
||||
((num_rows + ELEMENTS_PER_BLOCK - 1) / ELEMENTS_PER_BLOCK) as usize,
|
||||
((num_rows + BLOCK_SIZE - 1) / BLOCK_SIZE) as usize,
|
||||
BlockMeta {
|
||||
non_null_rows_before_block,
|
||||
start_byte_offset,
|
||||
|
||||
@@ -39,8 +39,7 @@ pub trait Set<T> {
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if rank is greater or equal to the number of
|
||||
/// elements in the Set.
|
||||
/// May panic if rank is greater than the number of elements in the Set.
|
||||
fn select(&self, rank: T) -> T;
|
||||
|
||||
/// Creates a brand new select cursor.
|
||||
|
||||
@@ -3,30 +3,6 @@ use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
use super::*;
|
||||
use crate::{ColumnarReader, ColumnarWriter, DynamicColumnHandle};
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_bug_2293() {
|
||||
// tests for panic in docid_range_to_rowids for docid == num_docs
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK - 1);
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK);
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK + 1);
|
||||
}
|
||||
fn test_optional_index_with_num_docs(num_docs: u32) {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(100, "score", 80i64);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(num_docs, None, &mut buffer)
|
||||
.unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("score").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
|
||||
let col = cols[0].open().unwrap();
|
||||
col.column_index().docid_range_to_rowids(0..num_docs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dense_block_threshold() {
|
||||
@@ -59,7 +35,7 @@ proptest! {
|
||||
|
||||
#[test]
|
||||
fn test_with_random_sets_simple() {
|
||||
let vals = 10..ELEMENTS_PER_BLOCK * 2;
|
||||
let vals = 10..BLOCK_SIZE * 2;
|
||||
let mut out: Vec<u8> = Vec::new();
|
||||
serialize_optional_index(&vals, 100, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
@@ -195,7 +171,7 @@ fn test_optional_index_rank() {
|
||||
test_optional_index_rank_aux(&[0u32, 1u32]);
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..ELEMENTS_PER_BLOCK).map(|i| i + ELEMENTS_PER_BLOCK + 1));
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
test_optional_index_rank_aux(&block);
|
||||
}
|
||||
|
||||
@@ -209,8 +185,8 @@ fn test_optional_index_iter_empty_one() {
|
||||
fn test_optional_index_iter_dense_block() {
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..ELEMENTS_PER_BLOCK).map(|i| i + ELEMENTS_PER_BLOCK + 1));
|
||||
test_optional_index_iter_aux(&block, 3 * ELEMENTS_PER_BLOCK);
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
test_optional_index_iter_aux(&block, 3 * BLOCK_SIZE);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -239,12 +215,12 @@ mod bench {
|
||||
let vals: Vec<RowId> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.filter(|(pos, val)| *val)
|
||||
.map(|(pos, _)| pos as RowId)
|
||||
.collect();
|
||||
serialize_optional_index(&&vals[..], TOTAL_NUM_VALUES, &mut out).unwrap();
|
||||
|
||||
open_optional_index(OwnedBytes::new(out)).unwrap()
|
||||
let codec = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
codec
|
||||
}
|
||||
|
||||
fn random_range_iterator(
|
||||
@@ -266,7 +242,7 @@ mod bench {
|
||||
}
|
||||
|
||||
fn n_percent_step_iterator(percent: f32, num_values: u32) -> impl Iterator<Item = u32> {
|
||||
let ratio = percent / 100.0;
|
||||
let ratio = percent as f32 / 100.0;
|
||||
let step_size = (1f32 / ratio) as u32;
|
||||
let deviation = step_size - 1;
|
||||
random_range_iterator(0, num_values, step_size, deviation)
|
||||
|
||||
@@ -30,7 +30,6 @@ impl<'a> SerializableColumnIndex<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize a column index.
|
||||
pub fn serialize_column_index(
|
||||
column_index: SerializableColumnIndex,
|
||||
output: &mut impl Write,
|
||||
@@ -52,7 +51,6 @@ pub fn serialize_column_index(
|
||||
Ok(column_index_num_bytes)
|
||||
}
|
||||
|
||||
/// Open a serialized column index.
|
||||
pub fn open_column_index(mut bytes: OwnedBytes) -> io::Result<ColumnIndex> {
|
||||
if bytes.is_empty() {
|
||||
return Err(io::Error::new(
|
||||
|
||||
@@ -101,7 +101,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
row_id_hits: &mut Vec<RowId>,
|
||||
) {
|
||||
let row_id_range = row_id_range.start..row_id_range.end.min(self.num_vals());
|
||||
for idx in row_id_range {
|
||||
for idx in row_id_range.start..row_id_range.end {
|
||||
let val = self.get_val(idx);
|
||||
if value_range.contains(&val) {
|
||||
row_id_hits.push(idx);
|
||||
|
||||
@@ -269,8 +269,7 @@ impl StrOrBytesColumnWriter {
|
||||
dictionaries: &mut [DictionaryBuilder],
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
let unordered_id =
|
||||
dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes, arena);
|
||||
let unordered_id = dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes);
|
||||
self.column_writer.record(doc, unordered_id, arena);
|
||||
}
|
||||
|
||||
|
||||
@@ -338,7 +338,7 @@ impl ColumnarWriter {
|
||||
let mut columns: Vec<(&[u8], ColumnType, Addr)> = self
|
||||
.numerical_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr)| {
|
||||
.map(|(column_name, addr, _)| {
|
||||
let numerical_column_writer: NumericalColumnWriter =
|
||||
self.numerical_field_hash_map.read(addr);
|
||||
let column_type = numerical_column_writer.numerical_type().into();
|
||||
@@ -348,27 +348,27 @@ impl ColumnarWriter {
|
||||
columns.extend(
|
||||
self.bytes_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr)| (term, ColumnType::Bytes, addr)),
|
||||
.map(|(term, addr, _)| (term, ColumnType::Bytes, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.str_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::Str, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Str, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.bool_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::Bool, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Bool, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.ip_addr_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::IpAddr, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::IpAddr, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.datetime_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::DateTime, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::DateTime, addr)),
|
||||
);
|
||||
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
|
||||
|
||||
@@ -437,7 +437,6 @@ impl ColumnarWriter {
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&self.arena,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
column_serializer.finalize()?;
|
||||
@@ -491,7 +490,6 @@ impl ColumnarWriter {
|
||||
|
||||
// Serialize [Dictionary, Column, dictionary num bytes U32::LE]
|
||||
// Column: [Column Index, Column Values, column index num bytes U32::LE]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn serialize_bytes_or_str_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: RowId,
|
||||
@@ -499,7 +497,6 @@ fn serialize_bytes_or_str_column(
|
||||
dictionary_builder: &DictionaryBuilder,
|
||||
operation_it: impl Iterator<Item = ColumnOperation<UnorderedId>>,
|
||||
buffers: &mut SpareBuffers,
|
||||
arena: &MemoryArena,
|
||||
wrt: impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
@@ -508,8 +505,7 @@ fn serialize_bytes_or_str_column(
|
||||
..
|
||||
} = buffers;
|
||||
let mut counting_writer = CountingWriter::wrap(wrt);
|
||||
let term_id_mapping: TermIdMapping =
|
||||
dictionary_builder.serialize(arena, &mut counting_writer)?;
|
||||
let term_id_mapping: TermIdMapping = dictionary_builder.serialize(&mut counting_writer)?;
|
||||
let dictionary_num_bytes: u32 = counting_writer.written_bytes() as u32;
|
||||
let mut wrt = counting_writer.finish();
|
||||
let operation_iterator = operation_it.map(|symbol: ColumnOperation<UnorderedId>| {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::io;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use sstable::SSTable;
|
||||
use stacker::{MemoryArena, SharedArenaHashMap};
|
||||
|
||||
pub(crate) struct TermIdMapping {
|
||||
unordered_to_ord: Vec<OrderedId>,
|
||||
@@ -31,38 +31,29 @@ pub struct OrderedId(pub u32);
|
||||
/// mapping.
|
||||
#[derive(Default)]
|
||||
pub(crate) struct DictionaryBuilder {
|
||||
dict: SharedArenaHashMap,
|
||||
dict: FnvHashMap<Vec<u8>, UnorderedId>,
|
||||
memory_consumption: usize,
|
||||
}
|
||||
|
||||
impl DictionaryBuilder {
|
||||
/// Get or allocate an unordered id.
|
||||
/// (This ID is simply an auto-incremented id.)
|
||||
pub fn get_or_allocate_id(&mut self, term: &[u8], arena: &mut MemoryArena) -> UnorderedId {
|
||||
let next_id = self.dict.len() as u32;
|
||||
let unordered_id = self
|
||||
.dict
|
||||
.mutate_or_create(term, arena, |unordered_id: Option<u32>| {
|
||||
if let Some(unordered_id) = unordered_id {
|
||||
unordered_id
|
||||
} else {
|
||||
next_id
|
||||
}
|
||||
});
|
||||
UnorderedId(unordered_id)
|
||||
pub fn get_or_allocate_id(&mut self, term: &[u8]) -> UnorderedId {
|
||||
if let Some(term_id) = self.dict.get(term) {
|
||||
return *term_id;
|
||||
}
|
||||
let new_id = UnorderedId(self.dict.len() as u32);
|
||||
self.dict.insert(term.to_vec(), new_id);
|
||||
self.memory_consumption += term.len();
|
||||
self.memory_consumption += 40; // Term Metadata + HashMap overhead
|
||||
new_id
|
||||
}
|
||||
|
||||
/// Serialize the dictionary into an fst, and returns the
|
||||
/// `UnorderedId -> TermOrdinal` map.
|
||||
pub fn serialize<'a, W: io::Write + 'a>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
wrt: &mut W,
|
||||
) -> io::Result<TermIdMapping> {
|
||||
let mut terms: Vec<(&[u8], UnorderedId)> = self
|
||||
.dict
|
||||
.iter(arena)
|
||||
.map(|(k, v)| (k, arena.read(v)))
|
||||
.collect();
|
||||
pub fn serialize<'a, W: io::Write + 'a>(&self, wrt: &mut W) -> io::Result<TermIdMapping> {
|
||||
let mut terms: Vec<(&[u8], UnorderedId)> =
|
||||
self.dict.iter().map(|(k, v)| (k.as_slice(), *v)).collect();
|
||||
terms.sort_unstable_by_key(|(key, _)| *key);
|
||||
// TODO Remove the allocation.
|
||||
let mut unordered_to_ord: Vec<OrderedId> = vec![OrderedId(0u32); terms.len()];
|
||||
@@ -77,7 +68,7 @@ impl DictionaryBuilder {
|
||||
}
|
||||
|
||||
pub(crate) fn mem_usage(&self) -> usize {
|
||||
self.dict.mem_usage()
|
||||
self.memory_consumption
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,13 +78,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_dictionary_builder() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut dictionary_builder = DictionaryBuilder::default();
|
||||
let hello_uid = dictionary_builder.get_or_allocate_id(b"hello", &mut arena);
|
||||
let happy_uid = dictionary_builder.get_or_allocate_id(b"happy", &mut arena);
|
||||
let tax_uid = dictionary_builder.get_or_allocate_id(b"tax", &mut arena);
|
||||
let hello_uid = dictionary_builder.get_or_allocate_id(b"hello");
|
||||
let happy_uid = dictionary_builder.get_or_allocate_id(b"happy");
|
||||
let tax_uid = dictionary_builder.get_or_allocate_id(b"tax");
|
||||
let mut buffer = Vec::new();
|
||||
let id_mapping = dictionary_builder.serialize(&arena, &mut buffer).unwrap();
|
||||
let id_mapping = dictionary_builder.serialize(&mut buffer).unwrap();
|
||||
assert_eq!(id_mapping.to_ord(hello_uid), OrderedId(1));
|
||||
assert_eq!(id_mapping.to_ord(happy_uid), OrderedId(0));
|
||||
assert_eq!(id_mapping.to_ord(tax_uid), OrderedId(2));
|
||||
|
||||
@@ -1,22 +1,3 @@
|
||||
//! # Tantivy-Columnar
|
||||
//!
|
||||
//! `tantivy-columnar`provides a columnar storage for tantivy.
|
||||
//! The crate allows for efficient read operations on specific columns rather than entire records.
|
||||
//!
|
||||
//! ## Overview
|
||||
//!
|
||||
//! - **columnar**: Reading, writing, and merging multiple columns:
|
||||
//! - **[ColumnarWriter]**: Makes it possible to create a new columnar.
|
||||
//! - **[ColumnarReader]**: The ColumnarReader makes it possible to access a set of columns
|
||||
//! associated to field names.
|
||||
//! - **[merge_columnar]**: Contains the functionalities to merge multiple ColumnarReader or
|
||||
//! segments into a single one.
|
||||
//!
|
||||
//! - **column**: A single column, which contains
|
||||
//! - [column_index]: Resolves the rows for a document id. Manages the cardinality of the
|
||||
//! column.
|
||||
//! - [column_values]: Stores the values of a column in a dense format.
|
||||
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -31,7 +12,7 @@ use std::io;
|
||||
|
||||
mod block_accessor;
|
||||
mod column;
|
||||
pub mod column_index;
|
||||
mod column_index;
|
||||
pub mod column_values;
|
||||
mod columnar;
|
||||
mod dictionary;
|
||||
|
||||
@@ -26,7 +26,7 @@ fn test_dataframe_writer_str() {
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 73);
|
||||
assert_eq!(cols[0].num_bytes(), 87);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -40,7 +40,7 @@ fn test_dataframe_writer_bytes() {
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 73);
|
||||
assert_eq!(cols[0].num_bytes(), 87);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -330,9 +330,9 @@ fn bytes_strategy() -> impl Strategy<Value = &'static [u8]> {
|
||||
// A random column value
|
||||
fn column_value_strategy() -> impl Strategy<Value = ColumnValue> {
|
||||
prop_oneof![
|
||||
10 => string_strategy().prop_map(ColumnValue::Str),
|
||||
1 => bytes_strategy().prop_map(ColumnValue::Bytes),
|
||||
40 => num_strategy().prop_map(ColumnValue::Numerical),
|
||||
10 => string_strategy().prop_map(|s| ColumnValue::Str(s)),
|
||||
1 => bytes_strategy().prop_map(|b| ColumnValue::Bytes(b)),
|
||||
40 => num_strategy().prop_map(|n| ColumnValue::Numerical(n)),
|
||||
1 => (1u16..3u16).prop_map(|ip_addr_byte| ColumnValue::IpAddr(Ipv6Addr::new(
|
||||
127,
|
||||
0,
|
||||
@@ -343,7 +343,7 @@ fn column_value_strategy() -> impl Strategy<Value = ColumnValue> {
|
||||
0,
|
||||
ip_addr_byte
|
||||
))),
|
||||
1 => any::<bool>().prop_map(ColumnValue::Bool),
|
||||
1 => any::<bool>().prop_map(|b| ColumnValue::Bool(b)),
|
||||
1 => (0_679_723_993i64..1_679_723_995i64)
|
||||
.prop_map(|val| { ColumnValue::DateTime(DateTime::from_timestamp_secs(val)) })
|
||||
]
|
||||
@@ -419,8 +419,8 @@ fn build_columnar_with_mapping(
|
||||
columnar_writer
|
||||
.serialize(num_docs, old_to_new_row_ids_opt, &mut buffer)
|
||||
.unwrap();
|
||||
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
columnar_reader
|
||||
}
|
||||
|
||||
fn build_columnar(docs: &[Vec<(&'static str, ColumnValue)>]) -> ColumnarReader {
|
||||
@@ -746,7 +746,7 @@ proptest! {
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]).into();
|
||||
crate::merge_columnar(&columnar_readers_arr[..], &[], stack_merge_order, &mut output).unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> = columnar_docs.iter().flatten().cloned().collect();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> = columnar_docs.iter().cloned().flatten().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
@@ -772,7 +772,7 @@ fn test_columnar_merging_empty_columnar() {
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
|
||||
columnar_docs.iter().flatten().cloned().collect();
|
||||
columnar_docs.iter().cloned().flatten().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
@@ -809,7 +809,7 @@ fn test_columnar_merging_number_columns() {
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
|
||||
columnar_docs.iter().flatten().cloned().collect();
|
||||
columnar_docs.iter().cloned().flatten().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::fmt;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use time::{OffsetDateTime, PrimitiveDateTime, UtcOffset};
|
||||
|
||||
use crate::BinarySerializable;
|
||||
|
||||
/// Precision with which datetimes are truncated when stored in fast fields. This setting is only
|
||||
/// relevant for fast fields. In the docstore, datetimes are always saved with nanosecond precision.
|
||||
#[derive(
|
||||
@@ -167,15 +164,3 @@ impl fmt::Debug for DateTime {
|
||||
f.write_str(&utc_rfc3339)
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for DateTime {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
let timestamp_micros = self.into_timestamp_micros();
|
||||
<i64 as BinarySerializable>::serialize(×tamp_micros, writer)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> std::io::Result<Self> {
|
||||
let timestamp_micros = <i64 as BinarySerializable>::deserialize(reader)?;
|
||||
Ok(Self::from_timestamp_micros(timestamp_micros))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
use crate::replace_in_place;
|
||||
|
||||
/// Separates the different segments of a json path.
|
||||
pub const JSON_PATH_SEGMENT_SEP: u8 = 1u8;
|
||||
pub const JSON_PATH_SEGMENT_SEP_STR: &str =
|
||||
unsafe { std::str::from_utf8_unchecked(&[JSON_PATH_SEGMENT_SEP]) };
|
||||
|
||||
/// Create a new JsonPathWriter, that creates flattened json paths for tantivy.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct JsonPathWriter {
|
||||
path: String,
|
||||
indices: Vec<usize>,
|
||||
expand_dots: bool,
|
||||
}
|
||||
|
||||
impl JsonPathWriter {
|
||||
pub fn new() -> Self {
|
||||
JsonPathWriter {
|
||||
path: String::new(),
|
||||
indices: Vec::new(),
|
||||
expand_dots: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// When expand_dots is enabled, json object like
|
||||
/// `{"k8s.node.id": 5}` is processed as if it was
|
||||
/// `{"k8s": {"node": {"id": 5}}}`.
|
||||
/// This option has the merit of allowing users to
|
||||
/// write queries like `k8s.node.id:5`.
|
||||
/// On the other, enabling that feature can lead to
|
||||
/// ambiguity.
|
||||
#[inline]
|
||||
pub fn set_expand_dots(&mut self, expand_dots: bool) {
|
||||
self.expand_dots = expand_dots;
|
||||
}
|
||||
|
||||
/// Push a new segment to the path.
|
||||
#[inline]
|
||||
pub fn push(&mut self, segment: &str) {
|
||||
let len_path = self.path.len();
|
||||
self.indices.push(len_path);
|
||||
if !self.path.is_empty() {
|
||||
self.path.push_str(JSON_PATH_SEGMENT_SEP_STR);
|
||||
}
|
||||
self.path.push_str(segment);
|
||||
if self.expand_dots {
|
||||
// This might include the separation byte, which is ok because it is not a dot.
|
||||
let appended_segment = &mut self.path[len_path..];
|
||||
// The unsafe below is safe as long as b'.' and JSON_PATH_SEGMENT_SEP are
|
||||
// valid single byte ut8 strings.
|
||||
// By utf-8 design, they cannot be part of another codepoint.
|
||||
unsafe {
|
||||
replace_in_place(b'.', JSON_PATH_SEGMENT_SEP, appended_segment.as_bytes_mut())
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove the last segment. Does nothing if the path is empty.
|
||||
#[inline]
|
||||
pub fn pop(&mut self) {
|
||||
if let Some(last_idx) = self.indices.pop() {
|
||||
self.path.truncate(last_idx);
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear the path.
|
||||
#[inline]
|
||||
pub fn clear(&mut self) {
|
||||
self.path.clear();
|
||||
self.indices.clear();
|
||||
}
|
||||
|
||||
/// Get the current path.
|
||||
#[inline]
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.path
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JsonPathWriter> for String {
|
||||
#[inline]
|
||||
fn from(value: JsonPathWriter) -> Self {
|
||||
value.path
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn json_path_writer_test() {
|
||||
let mut writer = JsonPathWriter::new();
|
||||
|
||||
writer.push("root");
|
||||
assert_eq!(writer.as_str(), "root");
|
||||
|
||||
writer.push("child");
|
||||
assert_eq!(writer.as_str(), "root\u{1}child");
|
||||
|
||||
writer.pop();
|
||||
assert_eq!(writer.as_str(), "root");
|
||||
|
||||
writer.push("k8s.node.id");
|
||||
assert_eq!(writer.as_str(), "root\u{1}k8s.node.id");
|
||||
|
||||
writer.set_expand_dots(true);
|
||||
writer.pop();
|
||||
writer.push("k8s.node.id");
|
||||
assert_eq!(writer.as_str(), "root\u{1}k8s\u{1}node\u{1}id");
|
||||
}
|
||||
}
|
||||
@@ -9,7 +9,6 @@ mod byte_count;
|
||||
mod datetime;
|
||||
pub mod file_slice;
|
||||
mod group_by;
|
||||
mod json_path_writer;
|
||||
mod serialize;
|
||||
mod vint;
|
||||
mod writer;
|
||||
@@ -19,7 +18,6 @@ pub use byte_count::ByteCount;
|
||||
pub use datetime::DatePrecision;
|
||||
pub use datetime::{DateTime, DateTimePrecision};
|
||||
pub use group_by::GroupByIteratorExtended;
|
||||
pub use json_path_writer::JsonPathWriter;
|
||||
pub use ownedbytes::{OwnedBytes, StableDeref};
|
||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||
pub use vint::{
|
||||
@@ -118,7 +116,6 @@ pub fn u64_to_f64(val: u64) -> f64 {
|
||||
///
|
||||
/// This function assumes that the needle is rarely contained in the bytes string
|
||||
/// and offers a fast path if the needle is not present.
|
||||
#[inline]
|
||||
pub fn replace_in_place(needle: u8, replacement: u8, bytes: &mut [u8]) {
|
||||
if !bytes.contains(&needle) {
|
||||
return;
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::borrow::Cow;
|
||||
use std::io::{Read, Write};
|
||||
use std::{fmt, io};
|
||||
|
||||
@@ -250,43 +249,6 @@ impl BinarySerializable for String {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BinarySerializable for Cow<'a, str> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let data: &[u8] = self.as_bytes();
|
||||
VInt(data.len() as u64).serialize(writer)?;
|
||||
writer.write_all(data)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Cow<'a, str>> {
|
||||
let string_length = VInt::deserialize(reader)?.val() as usize;
|
||||
let mut result = String::with_capacity(string_length);
|
||||
reader
|
||||
.take(string_length as u64)
|
||||
.read_to_string(&mut result)?;
|
||||
Ok(Cow::Owned(result))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BinarySerializable for Cow<'a, [u8]> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.len() as u64).serialize(writer)?;
|
||||
for it in self.iter() {
|
||||
it.serialize(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Cow<'a, [u8]>> {
|
||||
let num_items = VInt::deserialize(reader)?.val();
|
||||
let mut items: Vec<u8> = Vec::with_capacity(num_items as usize);
|
||||
for _ in 0..num_items {
|
||||
let item = u8::deserialize(reader)?;
|
||||
items.push(item);
|
||||
}
|
||||
Ok(Cow::Owned(items))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ use tantivy::aggregation::agg_result::AggregationResults;
|
||||
use tantivy::aggregation::AggregationCollector;
|
||||
use tantivy::query::AllQuery;
|
||||
use tantivy::schema::{self, IndexRecordOption, Schema, TextFieldIndexing, FAST};
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Create Schema
|
||||
@@ -132,10 +132,10 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let stream = Deserializer::from_str(data).into_iter::<Value>();
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut num_indexed = 0;
|
||||
for value in stream {
|
||||
let doc = TantivyDocument::parse_json(&schema, &serde_json::to_string(&value.unwrap())?)?;
|
||||
let doc = schema.parse_document(&serde_json::to_string(&value.unwrap())?)?;
|
||||
index_writer.add_document(doc)?;
|
||||
num_indexed += 1;
|
||||
if num_indexed > 4 {
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -75,7 +75,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Here we give tantivy a budget of `50MB`.
|
||||
// Using a bigger memory_arena for the indexer may increase
|
||||
// throughput, but 50 MB is already plenty.
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
@@ -87,7 +87,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
let mut old_man_doc = TantivyDocument::default();
|
||||
let mut old_man_doc = Document::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
old_man_doc.add_text(
|
||||
body,
|
||||
@@ -164,7 +164,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// will reload the index automatically after each commit.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()?;
|
||||
|
||||
// We now need to acquire a searcher.
|
||||
@@ -217,8 +217,8 @@ fn main() -> tantivy::Result<()> {
|
||||
// the document returned will only contain
|
||||
// a title.
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
// We can also get an explanation to understand
|
||||
|
||||
@@ -13,7 +13,7 @@ use columnar::Column;
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index, IndexWriter, Score, SegmentReader};
|
||||
use tantivy::{doc, Index, Score, SegmentReader};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Stats {
|
||||
@@ -142,7 +142,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// this example.
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
index_writer.add_document(doc!(
|
||||
product_name => "Super Broom 2000",
|
||||
product_description => "While it is ok for short distance travel, this broom \
|
||||
|
||||
@@ -6,7 +6,7 @@ use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::NgramTokenizer;
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
use tantivy::{doc, Index};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -62,7 +62,7 @@ fn main() -> tantivy::Result<()> {
|
||||
//
|
||||
// Here we use a buffer of 50MB per thread. Using a bigger
|
||||
// memory arena for the indexer can increase its throughput.
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
index_writer.add_document(doc!(
|
||||
title => "The Old Man and the Sea",
|
||||
body => "He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
@@ -103,8 +103,8 @@ fn main() -> tantivy::Result<()> {
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
for (_, doc_address) in top_docs {
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{DateOptions, Document, OwnedValue, Schema, INDEXED, STORED, STRING};
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
use tantivy::schema::{DateOptions, Schema, Value, INDEXED, STORED, STRING};
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -22,18 +22,16 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
// The dates are passed as string in the RFC3339 format
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T12:53:50.53Z",
|
||||
"event": "pull-request"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T13:00:00.22Z",
|
||||
"event": "comment"
|
||||
@@ -60,13 +58,13 @@ fn main() -> tantivy::Result<()> {
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(4))?;
|
||||
assert_eq!(count_docs.len(), 1);
|
||||
for (_score, doc_address) in count_docs {
|
||||
let retrieved_doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
assert!(matches!(
|
||||
retrieved_doc.get_first(occurred_at),
|
||||
Some(OwnedValue::Date(_))
|
||||
Some(Value::Date(_))
|
||||
));
|
||||
assert_eq!(
|
||||
retrieved_doc.to_json(&schema),
|
||||
schema.to_json(&retrieved_doc),
|
||||
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
|
||||
);
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, IndexReader, IndexWriter};
|
||||
use tantivy::{doc, Index, IndexReader};
|
||||
|
||||
// A simple helper function to fetch a single document
|
||||
// given its id from our index.
|
||||
@@ -19,7 +19,7 @@ use tantivy::{doc, Index, IndexReader, IndexWriter};
|
||||
fn extract_doc_given_isbn(
|
||||
reader: &IndexReader,
|
||||
isbn_term: &Term,
|
||||
) -> tantivy::Result<Option<TantivyDocument>> {
|
||||
) -> tantivy::Result<Option<Document>> {
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// This is the simplest query you can think of.
|
||||
@@ -69,10 +69,10 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// Let's add a couple of documents, for the sake of the example.
|
||||
let mut old_man_doc = TantivyDocument::default();
|
||||
let mut old_man_doc = Document::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
index_writer.add_document(doc!(
|
||||
isbn => "978-0099908401",
|
||||
@@ -94,7 +94,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Oops our frankenstein doc seems misspelled
|
||||
let frankenstein_doc_misspelled = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
frankenstein_doc_misspelled.to_json(&schema),
|
||||
schema.to_json(&frankenstein_doc_misspelled),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankentein"]}"#,
|
||||
);
|
||||
|
||||
@@ -136,7 +136,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// No more typo!
|
||||
let frankenstein_new_doc = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
frankenstein_new_doc.to_json(&schema),
|
||||
schema.to_json(&frankenstein_new_doc),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankenstein"]}"#,
|
||||
);
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
use tantivy::collector::FacetCollector;
|
||||
use tantivy::query::{AllQuery, TermQuery};
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
use tantivy::{doc, Index};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the sake of this example
|
||||
@@ -30,7 +30,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(30_000_000)?;
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
|
||||
@@ -12,7 +12,7 @@ use std::collections::HashSet;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::BooleanQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, DocId, Index, IndexWriter, Score, SegmentReader};
|
||||
use tantivy::{doc, DocId, Index, Score, SegmentReader};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -23,7 +23,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(30_000_000)?;
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Fried egg",
|
||||
@@ -91,10 +91,11 @@ fn main() -> tantivy::Result<()> {
|
||||
.iter()
|
||||
.map(|(_, doc_id)| {
|
||||
searcher
|
||||
.doc::<TantivyDocument>(*doc_id)
|
||||
.doc(*doc_id)
|
||||
.unwrap()
|
||||
.get_first(title)
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap()
|
||||
.as_text()
|
||||
.unwrap()
|
||||
.to_owned()
|
||||
})
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::FuzzyTermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -66,7 +66,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Here we give tantivy a budget of `50MB`.
|
||||
// Using a bigger memory_arena for the indexer may increase
|
||||
// throughput, but 50 MB is already plenty.
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
@@ -123,7 +123,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// will reload the index automatically after each commit.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()?;
|
||||
|
||||
// We now need to acquire a searcher.
|
||||
@@ -151,10 +151,10 @@ fn main() -> tantivy::Result<()> {
|
||||
assert_eq!(count, 3);
|
||||
assert_eq!(top_docs.len(), 3);
|
||||
for (score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
// Note that the score is not lower for the fuzzy hit.
|
||||
// There's an issue open for that: https://github.com/quickwit-oss/tantivy/issues/563
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("score {score:?} doc {}", retrieved_doc.to_json(&schema));
|
||||
println!("score {score:?} doc {}", schema.to_json(&retrieved_doc));
|
||||
// score 1.0 doc {"title":["The Diary of Muadib"]}
|
||||
//
|
||||
// score 1.0 doc {"title":["The Diary of a Young Girl"]}
|
||||
|
||||
@@ -21,7 +21,7 @@ fn main() -> tantivy::Result<()> {
|
||||
}"#;
|
||||
|
||||
// We can parse our document
|
||||
let _mice_and_men_doc = TantivyDocument::parse_json(&schema, mice_and_men_doc_json)?;
|
||||
let _mice_and_men_doc = schema.parse_document(mice_and_men_doc_json)?;
|
||||
|
||||
// Multi-valued field are allowed, they are
|
||||
// expressed in JSON by an array.
|
||||
@@ -30,7 +30,7 @@ fn main() -> tantivy::Result<()> {
|
||||
"title": ["Frankenstein", "The Modern Prometheus"],
|
||||
"year": 1818
|
||||
}"#;
|
||||
let _frankenstein_doc = TantivyDocument::parse_json(&schema, frankenstein_json)?;
|
||||
let _frankenstein_doc = schema.parse_document(frankenstein_json)?;
|
||||
|
||||
// Note that the schema is saved in your index directory.
|
||||
//
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
use tantivy::collector::Count;
|
||||
use tantivy::query::RangeQuery;
|
||||
use tantivy::schema::{Schema, INDEXED};
|
||||
use tantivy::{doc, Index, IndexWriter, Result};
|
||||
use tantivy::{doc, Index, Result};
|
||||
|
||||
fn main() -> Result<()> {
|
||||
// For the sake of simplicity, this schema will only have 1 field
|
||||
@@ -17,7 +17,7 @@ fn main() -> Result<()> {
|
||||
let index = Index::create_in_ram(schema);
|
||||
let reader = index.reader()?;
|
||||
{
|
||||
let mut index_writer: IndexWriter = index.writer_with_num_threads(1, 6_000_000)?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
|
||||
for year in 1950u64..2019u64 {
|
||||
index_writer.add_document(doc!(year_field => year))?;
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, STORED, STRING};
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -22,22 +22,20 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// ### IPv4
|
||||
// Adding documents that contain an IPv4 address. Notice that the IP addresses are passed as
|
||||
// `String`. Since the field is of type ip, we parse the IP address from the string and store it
|
||||
// internally as IPv6.
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"ip": "192.168.0.33",
|
||||
"event_type": "login"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"ip": "192.168.0.80",
|
||||
"event_type": "checkout"
|
||||
@@ -46,8 +44,7 @@ fn main() -> tantivy::Result<()> {
|
||||
index_writer.add_document(doc)?;
|
||||
// ### IPv6
|
||||
// Adding a document that contains an IPv6 address.
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"ip": "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
|
||||
"event_type": "checkout"
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, DocSet, Index, IndexWriter, Postings, TERMINATED};
|
||||
use tantivy::{doc, DocSet, Index, Postings, TERMINATED};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// We first create a schema for the sake of the
|
||||
@@ -24,7 +24,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer_with_num_threads(1, 50_000_000)?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 50_000_000)?;
|
||||
index_writer.add_document(doc!(title => "The Old Man and the Sea"))?;
|
||||
index_writer.add_document(doc!(title => "Of Mice and Men"))?;
|
||||
index_writer.add_document(doc!(title => "The modern Promotheus"))?;
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, STORED, STRING, TEXT};
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -20,9 +20,8 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"timestamp": "2022-02-22T23:20:50.53Z",
|
||||
"event_type": "click",
|
||||
@@ -34,8 +33,7 @@ fn main() -> tantivy::Result<()> {
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"timestamp": "2022-02-22T23:20:51.53Z",
|
||||
"event_type": "click",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy, Result};
|
||||
use tantivy::{doc, Index, ReloadPolicy, Result};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
@@ -17,7 +17,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema)?;
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "The Old Man and the Sea",
|
||||
@@ -51,7 +51,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
@@ -67,12 +67,8 @@ fn main() -> Result<()> {
|
||||
let mut titles = top_docs
|
||||
.into_iter()
|
||||
.map(|(_score, doc_address)| {
|
||||
let doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||
let title = doc
|
||||
.get_first(title)
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap()
|
||||
.to_owned();
|
||||
let doc = searcher.doc(doc_address)?;
|
||||
let title = doc.get_first(title).unwrap().as_text().unwrap().to_owned();
|
||||
Ok(title)
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
@@ -13,7 +13,7 @@ use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, TokenStream, Tokenizer};
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn pre_tokenize_text(text: &str) -> Vec<Token> {
|
||||
@@ -38,7 +38,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// We can create a document manually, by setting the fields
|
||||
// one by one in a Document object.
|
||||
@@ -83,7 +83,7 @@ fn main() -> tantivy::Result<()> {
|
||||
}]
|
||||
}"#;
|
||||
|
||||
let short_man_doc = TantivyDocument::parse_json(&schema, short_man_json)?;
|
||||
let short_man_doc = schema.parse_document(short_man_json)?;
|
||||
|
||||
index_writer.add_document(short_man_doc)?;
|
||||
|
||||
@@ -94,7 +94,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
@@ -115,8 +115,8 @@ fn main() -> tantivy::Result<()> {
|
||||
// Note that the tokens are not stored along with the original text
|
||||
// in the document store
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("Document: {}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
// In contrary to the previous query, when we search for the "man" term we
|
||||
|
||||
@@ -10,8 +10,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::snippet::{Snippet, SnippetGenerator};
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
use tantivy::{doc, Index, Snippet, SnippetGenerator};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -28,7 +27,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_dir(&index_path, schema)?;
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// we'll only need one doc for this example.
|
||||
index_writer.add_document(doc!(
|
||||
@@ -55,10 +54,13 @@ fn main() -> tantivy::Result<()> {
|
||||
let snippet_generator = SnippetGenerator::create(&searcher, &*query, body)?;
|
||||
|
||||
for (score, doc_address) in top_docs {
|
||||
let doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||
let doc = searcher.doc(doc_address)?;
|
||||
let snippet = snippet_generator.snippet_from_doc(&doc);
|
||||
println!("Document score {score}:");
|
||||
println!("title: {}", doc.get_first(title).unwrap().as_str().unwrap());
|
||||
println!(
|
||||
"title: {}",
|
||||
doc.get_first(title).unwrap().as_text().unwrap()
|
||||
);
|
||||
println!("snippet: {}", snippet.to_html());
|
||||
println!("custom highlighting: {}", highlight(snippet));
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::*;
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
use tantivy::{doc, Index};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// this example assumes you understand the content in `basic_search`
|
||||
@@ -60,7 +60,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
index.tokenizers().register("stoppy", tokenizer);
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
@@ -105,9 +105,9 @@ fn main() -> tantivy::Result<()> {
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
for (score, doc_address) in top_docs {
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("\n==\nDocument score {score}:");
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -6,8 +6,8 @@ use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, TEXT};
|
||||
use tantivy::{
|
||||
doc, DocAddress, DocId, Index, IndexWriter, Opstamp, Searcher, SearcherGeneration, SegmentId,
|
||||
SegmentReader, Warmer,
|
||||
doc, DocAddress, DocId, Index, Opstamp, Searcher, SearcherGeneration, SegmentId, SegmentReader,
|
||||
Warmer,
|
||||
};
|
||||
|
||||
// This example shows how warmers can be used to
|
||||
@@ -143,7 +143,7 @@ fn main() -> tantivy::Result<()> {
|
||||
const SNEAKERS: ProductId = 23222;
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer: IndexWriter = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
let mut writer = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
writer.add_document(doc!(product_id=>OLIVE_OIL, text=>"cooking olive oil from greece"))?;
|
||||
writer.add_document(doc!(product_id=>GLOVES, text=>"kitchen gloves, perfect for cooking"))?;
|
||||
writer.add_document(doc!(product_id=>SNEAKERS, text=>"uber sweet sneakers"))?;
|
||||
|
||||
@@ -81,8 +81,8 @@ where
|
||||
T: InputTakeAtPosition + Clone,
|
||||
<T as InputTakeAtPosition>::Item: AsChar + Clone,
|
||||
{
|
||||
opt_i(nom::character::complete::multispace0)(input)
|
||||
.map(|(left, (spaces, errors))| (left, (spaces.expect("multispace0 can't fail"), errors)))
|
||||
opt_i(nom::character::complete::space0)(input)
|
||||
.map(|(left, (spaces, errors))| (left, (spaces.expect("space0 can't fail"), errors)))
|
||||
}
|
||||
|
||||
pub(crate) fn space1_infallible<T>(input: T) -> JResult<T, Option<T>>
|
||||
@@ -90,7 +90,7 @@ where
|
||||
T: InputTakeAtPosition + Clone + InputLength,
|
||||
<T as InputTakeAtPosition>::Item: AsChar + Clone,
|
||||
{
|
||||
opt_i(nom::character::complete::multispace1)(input).map(|(left, (spaces, mut errors))| {
|
||||
opt_i(nom::character::complete::space1)(input).map(|(left, (spaces, mut errors))| {
|
||||
if spaces.is_none() {
|
||||
errors.push(LenientErrorInternal {
|
||||
pos: left.input_len(),
|
||||
|
||||
@@ -3,11 +3,11 @@ use std::iter::once;
|
||||
use nom::branch::alt;
|
||||
use nom::bytes::complete::tag;
|
||||
use nom::character::complete::{
|
||||
anychar, char, digit1, multispace0, multispace1, none_of, one_of, satisfy, u32,
|
||||
anychar, char, digit1, none_of, one_of, satisfy, space0, space1, u32,
|
||||
};
|
||||
use nom::combinator::{eof, map, map_res, opt, peek, recognize, value, verify};
|
||||
use nom::error::{Error, ErrorKind};
|
||||
use nom::multi::{many0, many1, separated_list0};
|
||||
use nom::multi::{many0, many1, separated_list0, separated_list1};
|
||||
use nom::sequence::{delimited, preceded, separated_pair, terminated, tuple};
|
||||
use nom::IResult;
|
||||
|
||||
@@ -24,7 +24,7 @@ const SPECIAL_CHARS: &[char] = &[
|
||||
|
||||
/// consume a field name followed by colon. Return the field name with escape sequence
|
||||
/// already interpreted
|
||||
fn field_name(inp: &str) -> IResult<&str, String> {
|
||||
fn field_name(i: &str) -> IResult<&str, String> {
|
||||
let simple_char = none_of(SPECIAL_CHARS);
|
||||
let first_char = verify(none_of(SPECIAL_CHARS), |c| *c != '-');
|
||||
let escape_sequence = || preceded(char('\\'), one_of(SPECIAL_CHARS));
|
||||
@@ -38,12 +38,12 @@ fn field_name(inp: &str) -> IResult<&str, String> {
|
||||
char(':'),
|
||||
),
|
||||
|(first_char, next)| once(first_char).chain(next).collect(),
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
/// Consume a word outside of any context.
|
||||
// TODO should support escape sequences
|
||||
fn word(inp: &str) -> IResult<&str, &str> {
|
||||
fn word(i: &str) -> IResult<&str, &str> {
|
||||
map_res(
|
||||
recognize(tuple((
|
||||
satisfy(|c| {
|
||||
@@ -55,45 +55,45 @@ fn word(inp: &str) -> IResult<&str, &str> {
|
||||
})),
|
||||
))),
|
||||
|s| match s {
|
||||
"OR" | "AND" | "NOT" | "IN" => Err(Error::new(inp, ErrorKind::Tag)),
|
||||
"OR" | "AND" | "NOT" | "IN" => Err(Error::new(i, ErrorKind::Tag)),
|
||||
_ => Ok(s),
|
||||
},
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn word_infallible(delimiter: &str) -> impl Fn(&str) -> JResult<&str, Option<&str>> + '_ {
|
||||
|inp| {
|
||||
|i| {
|
||||
opt_i_err(
|
||||
preceded(
|
||||
multispace0,
|
||||
space0,
|
||||
recognize(many1(satisfy(|c| {
|
||||
!c.is_whitespace() && !delimiter.contains(c)
|
||||
}))),
|
||||
),
|
||||
"expected word",
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
}
|
||||
|
||||
/// Consume a word inside a Range context. More values are allowed as they are
|
||||
/// not ambiguous in this context.
|
||||
fn relaxed_word(inp: &str) -> IResult<&str, &str> {
|
||||
fn relaxed_word(i: &str) -> IResult<&str, &str> {
|
||||
recognize(tuple((
|
||||
satisfy(|c| !c.is_whitespace() && !['`', '{', '}', '"', '[', ']', '(', ')'].contains(&c)),
|
||||
many0(satisfy(|c: char| {
|
||||
!c.is_whitespace() && !['{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||
})),
|
||||
)))(inp)
|
||||
)))(i)
|
||||
}
|
||||
|
||||
fn negative_number(inp: &str) -> IResult<&str, &str> {
|
||||
fn negative_number(i: &str) -> IResult<&str, &str> {
|
||||
recognize(preceded(
|
||||
char('-'),
|
||||
tuple((digit1, opt(tuple((char('.'), digit1))))),
|
||||
))(inp)
|
||||
))(i)
|
||||
}
|
||||
|
||||
fn simple_term(inp: &str) -> IResult<&str, (Delimiter, String)> {
|
||||
fn simple_term(i: &str) -> IResult<&str, (Delimiter, String)> {
|
||||
let escaped_string = |delimiter| {
|
||||
// we need this because none_of can't accept an owned array of char.
|
||||
let not_delimiter = verify(anychar, move |parsed| *parsed != delimiter);
|
||||
@@ -123,13 +123,13 @@ fn simple_term(inp: &str) -> IResult<&str, (Delimiter, String)> {
|
||||
simple_quotes,
|
||||
double_quotes,
|
||||
text_no_delimiter,
|
||||
))(inp)
|
||||
))(i)
|
||||
}
|
||||
|
||||
fn simple_term_infallible(
|
||||
delimiter: &str,
|
||||
) -> impl Fn(&str) -> JResult<&str, Option<(Delimiter, String)>> + '_ {
|
||||
|inp| {
|
||||
|i| {
|
||||
let escaped_string = |delimiter| {
|
||||
// we need this because none_of can't accept an owned array of char.
|
||||
let not_delimiter = verify(anychar, move |parsed| *parsed != delimiter);
|
||||
@@ -162,11 +162,11 @@ fn simple_term_infallible(
|
||||
map(word_infallible(delimiter), |(text, errors)| {
|
||||
(text.map(|text| (Delimiter::None, text.to_string())), errors)
|
||||
}),
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
}
|
||||
|
||||
fn term_or_phrase(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
fn term_or_phrase(i: &str) -> IResult<&str, UserInputLeaf> {
|
||||
map(
|
||||
tuple((simple_term, fallible(slop_or_prefix_val))),
|
||||
|((delimiter, phrase), (slop, prefix))| {
|
||||
@@ -179,13 +179,13 @@ fn term_or_phrase(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
}
|
||||
.into()
|
||||
},
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn term_or_phrase_infallible(inp: &str) -> JResult<&str, Option<UserInputLeaf>> {
|
||||
fn term_or_phrase_infallible(i: &str) -> JResult<&str, Option<UserInputLeaf>> {
|
||||
map(
|
||||
// ~* for slop/prefix, ) inside group or ast tree, ^ if boost
|
||||
tuple_infallible((simple_term_infallible(")^"), slop_or_prefix_val)),
|
||||
tuple_infallible((simple_term_infallible("*)^"), slop_or_prefix_val)),
|
||||
|((delimiter_phrase, (slop, prefix)), errors)| {
|
||||
let leaf = if let Some((delimiter, phrase)) = delimiter_phrase {
|
||||
Some(
|
||||
@@ -214,10 +214,10 @@ fn term_or_phrase_infallible(inp: &str) -> JResult<&str, Option<UserInputLeaf>>
|
||||
};
|
||||
(leaf, errors)
|
||||
},
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn term_group(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
fn term_group(i: &str) -> IResult<&str, UserInputAst> {
|
||||
let occur_symbol = alt((
|
||||
value(Occur::MustNot, char('-')),
|
||||
value(Occur::Must, char('+')),
|
||||
@@ -225,10 +225,10 @@ fn term_group(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
|
||||
map(
|
||||
tuple((
|
||||
terminated(field_name, multispace0),
|
||||
terminated(field_name, space0),
|
||||
delimited(
|
||||
tuple((char('('), multispace0)),
|
||||
separated_list0(multispace1, tuple((opt(occur_symbol), term_or_phrase))),
|
||||
tuple((char('('), space0)),
|
||||
separated_list0(space1, tuple((opt(occur_symbol), term_or_phrase))),
|
||||
char(')'),
|
||||
),
|
||||
)),
|
||||
@@ -240,26 +240,26 @@ fn term_group(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
.collect(),
|
||||
)
|
||||
},
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
// this is a precondition for term_group_infallible. Without it, term_group_infallible can fail
|
||||
// with a panic. It does not consume its input.
|
||||
fn term_group_precond(inp: &str) -> IResult<&str, (), ()> {
|
||||
fn term_group_precond(i: &str) -> IResult<&str, (), ()> {
|
||||
value(
|
||||
(),
|
||||
peek(tuple((
|
||||
field_name,
|
||||
multispace0,
|
||||
space0,
|
||||
char('('), // when we are here, we know it can't be anything but a term group
|
||||
))),
|
||||
)(inp)
|
||||
)(i)
|
||||
.map_err(|e| e.map(|_| ()))
|
||||
}
|
||||
|
||||
fn term_group_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
let (mut inp, (field_name, _, _, _)) =
|
||||
tuple((field_name, multispace0, char('('), multispace0))(inp).expect("precondition failed");
|
||||
fn term_group_infallible(i: &str) -> JResult<&str, UserInputAst> {
|
||||
let (mut i, (field_name, _, _, _)) =
|
||||
tuple((field_name, space0, char('('), space0))(i).expect("precondition failed");
|
||||
|
||||
let mut terms = Vec::new();
|
||||
let mut errs = Vec::new();
|
||||
@@ -270,19 +270,19 @@ fn term_group_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
first_round = false;
|
||||
Vec::new()
|
||||
} else {
|
||||
let (rest, (_, err)) = space1_infallible(inp)?;
|
||||
inp = rest;
|
||||
let (rest, (_, err)) = space1_infallible(i)?;
|
||||
i = rest;
|
||||
err
|
||||
};
|
||||
if inp.is_empty() {
|
||||
if i.is_empty() {
|
||||
errs.push(LenientErrorInternal {
|
||||
pos: inp.len(),
|
||||
pos: i.len(),
|
||||
message: "missing )".to_string(),
|
||||
});
|
||||
break Ok((inp, (UserInputAst::Clause(terms), errs)));
|
||||
break Ok((i, (UserInputAst::Clause(terms), errs)));
|
||||
}
|
||||
if let Some(inp) = inp.strip_prefix(')') {
|
||||
break Ok((inp, (UserInputAst::Clause(terms), errs)));
|
||||
if let Some(i) = i.strip_prefix(')') {
|
||||
break Ok((i, (UserInputAst::Clause(terms), errs)));
|
||||
}
|
||||
// only append missing space error if we did not reach the end of group
|
||||
errs.append(&mut space_error);
|
||||
@@ -291,57 +291,26 @@ fn term_group_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
// first byte is not `)` or ' '. If it did not, we would end up looping.
|
||||
|
||||
let (rest, ((occur, leaf), mut err)) =
|
||||
tuple_infallible((occur_symbol, term_or_phrase_infallible))(inp)?;
|
||||
tuple_infallible((occur_symbol, term_or_phrase_infallible))(i)?;
|
||||
errs.append(&mut err);
|
||||
if let Some(leaf) = leaf {
|
||||
terms.push((occur, leaf.set_field(Some(field_name.clone())).into()));
|
||||
}
|
||||
inp = rest;
|
||||
i = rest;
|
||||
}
|
||||
}
|
||||
|
||||
fn exists(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
value(
|
||||
UserInputLeaf::Exists {
|
||||
field: String::new(),
|
||||
},
|
||||
tuple((multispace0, char('*'))),
|
||||
)(inp)
|
||||
}
|
||||
|
||||
fn exists_precond(inp: &str) -> IResult<&str, (), ()> {
|
||||
value(
|
||||
(),
|
||||
peek(tuple((
|
||||
field_name,
|
||||
multispace0,
|
||||
char('*'), // when we are here, we know it can't be anything but a exists
|
||||
))),
|
||||
)(inp)
|
||||
.map_err(|e| e.map(|_| ()))
|
||||
}
|
||||
|
||||
fn exists_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
let (inp, (field_name, _, _)) =
|
||||
tuple((field_name, multispace0, char('*')))(inp).expect("precondition failed");
|
||||
|
||||
let exists = UserInputLeaf::Exists { field: field_name }.into();
|
||||
Ok((inp, (exists, Vec::new())))
|
||||
}
|
||||
|
||||
fn literal(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
// * alone is already parsed by our caller, so if `exists` succeed, we can be confident
|
||||
// something (a field name) got parsed before
|
||||
fn literal(i: &str) -> IResult<&str, UserInputAst> {
|
||||
alt((
|
||||
map(
|
||||
tuple((opt(field_name), alt((range, set, exists, term_or_phrase)))),
|
||||
tuple((opt(field_name), alt((range, set, term_or_phrase)))),
|
||||
|(field_name, leaf): (Option<String>, UserInputLeaf)| leaf.set_field(field_name).into(),
|
||||
),
|
||||
term_group,
|
||||
))(inp)
|
||||
))(i)
|
||||
}
|
||||
|
||||
fn literal_no_group_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
fn literal_no_group_infallible(i: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
map(
|
||||
tuple_infallible((
|
||||
opt_i(field_name),
|
||||
@@ -349,7 +318,7 @@ fn literal_no_group_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>>
|
||||
alt_infallible(
|
||||
(
|
||||
(
|
||||
value((), tuple((tag("IN"), multispace0, char('[')))),
|
||||
value((), tuple((tag("IN"), space0, char('[')))),
|
||||
map(set_infallible, |(set, errs)| (Some(set), errs)),
|
||||
),
|
||||
(
|
||||
@@ -368,7 +337,7 @@ fn literal_no_group_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>>
|
||||
&& field_name.is_none()
|
||||
{
|
||||
errors.push(LenientErrorInternal {
|
||||
pos: inp.len(),
|
||||
pos: i.len(),
|
||||
message: "parsed possible invalid field as term".to_string(),
|
||||
});
|
||||
}
|
||||
@@ -377,7 +346,7 @@ fn literal_no_group_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>>
|
||||
&& field_name.is_none()
|
||||
{
|
||||
errors.push(LenientErrorInternal {
|
||||
pos: inp.len(),
|
||||
pos: i.len(),
|
||||
message: "parsed keyword NOT as term. It should be quoted".to_string(),
|
||||
});
|
||||
}
|
||||
@@ -386,40 +355,34 @@ fn literal_no_group_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>>
|
||||
errors,
|
||||
)
|
||||
},
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn literal_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
fn literal_infallible(i: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
alt_infallible(
|
||||
(
|
||||
(
|
||||
term_group_precond,
|
||||
map(term_group_infallible, |(group, errs)| (Some(group), errs)),
|
||||
),
|
||||
(
|
||||
exists_precond,
|
||||
map(exists_infallible, |(exists, errs)| (Some(exists), errs)),
|
||||
),
|
||||
),
|
||||
((
|
||||
term_group_precond,
|
||||
map(term_group_infallible, |(group, errs)| (Some(group), errs)),
|
||||
),),
|
||||
literal_no_group_infallible,
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn slop_or_prefix_val(inp: &str) -> JResult<&str, (u32, bool)> {
|
||||
fn slop_or_prefix_val(i: &str) -> JResult<&str, (u32, bool)> {
|
||||
map(
|
||||
opt_i(alt((
|
||||
value((0, true), char('*')),
|
||||
map(preceded(char('~'), u32), |slop| (slop, false)),
|
||||
))),
|
||||
|(slop_or_prefix_opt, err)| (slop_or_prefix_opt.unwrap_or_default(), err),
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
/// Function that parses a range out of a Stream
|
||||
/// Supports ranges like:
|
||||
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
|
||||
/// [a TO *], [a TO c], [abc TO bcd}
|
||||
fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
fn range(i: &str) -> IResult<&str, UserInputLeaf> {
|
||||
let range_term_val = || {
|
||||
map(
|
||||
alt((negative_number, relaxed_word, tag("*"))),
|
||||
@@ -430,8 +393,8 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
// check for unbounded range in the form of <5, <=10, >5, >=5
|
||||
let elastic_unbounded_range = map(
|
||||
tuple((
|
||||
preceded(multispace0, alt((tag(">="), tag("<="), tag("<"), tag(">")))),
|
||||
preceded(multispace0, range_term_val()),
|
||||
preceded(space0, alt((tag(">="), tag("<="), tag("<"), tag(">")))),
|
||||
preceded(space0, range_term_val()),
|
||||
)),
|
||||
|(comparison_sign, bound)| match comparison_sign {
|
||||
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
|
||||
@@ -444,7 +407,7 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
);
|
||||
|
||||
let lower_bound = map(
|
||||
separated_pair(one_of("{["), multispace0, range_term_val()),
|
||||
separated_pair(one_of("{["), space0, range_term_val()),
|
||||
|(boundary_char, lower_bound)| {
|
||||
if lower_bound == "*" {
|
||||
UserInputBound::Unbounded
|
||||
@@ -457,7 +420,7 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
);
|
||||
|
||||
let upper_bound = map(
|
||||
separated_pair(range_term_val(), multispace0, one_of("}]")),
|
||||
separated_pair(range_term_val(), space0, one_of("}]")),
|
||||
|(upper_bound, boundary_char)| {
|
||||
if upper_bound == "*" {
|
||||
UserInputBound::Unbounded
|
||||
@@ -469,11 +432,8 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
},
|
||||
);
|
||||
|
||||
let lower_to_upper = separated_pair(
|
||||
lower_bound,
|
||||
tuple((multispace1, tag("TO"), multispace1)),
|
||||
upper_bound,
|
||||
);
|
||||
let lower_to_upper =
|
||||
separated_pair(lower_bound, tuple((space1, tag("TO"), space1)), upper_bound);
|
||||
|
||||
map(
|
||||
alt((elastic_unbounded_range, lower_to_upper)),
|
||||
@@ -482,10 +442,10 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
lower,
|
||||
upper,
|
||||
},
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
fn range_infallible(i: &str) -> JResult<&str, UserInputLeaf> {
|
||||
let lower_to_upper = map(
|
||||
tuple_infallible((
|
||||
opt_i(anychar),
|
||||
@@ -493,16 +453,13 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
word_infallible("]}"),
|
||||
space1_infallible,
|
||||
opt_i_err(
|
||||
terminated(tag("TO"), alt((value((), multispace1), value((), eof)))),
|
||||
terminated(tag("TO"), alt((value((), space1), value((), eof)))),
|
||||
"missing keyword TO",
|
||||
),
|
||||
word_infallible("]}"),
|
||||
opt_i_err(one_of("]}"), "missing range delimiter"),
|
||||
)),
|
||||
|(
|
||||
(lower_bound_kind, _multispace0, lower, _multispace1, to, upper, upper_bound_kind),
|
||||
errs,
|
||||
)| {
|
||||
|((lower_bound_kind, _space0, lower, _space1, to, upper, upper_bound_kind), errs)| {
|
||||
let lower_bound = match (lower_bound_kind, lower) {
|
||||
(_, Some("*")) => UserInputBound::Unbounded,
|
||||
(_, None) => UserInputBound::Unbounded,
|
||||
@@ -596,16 +553,16 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
errors,
|
||||
)
|
||||
},
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn set(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
fn set(i: &str) -> IResult<&str, UserInputLeaf> {
|
||||
map(
|
||||
preceded(
|
||||
tuple((multispace0, tag("IN"), multispace1)),
|
||||
tuple((space0, tag("IN"), space1)),
|
||||
delimited(
|
||||
tuple((char('['), multispace0)),
|
||||
separated_list0(multispace1, map(simple_term, |(_, term)| term)),
|
||||
tuple((char('['), space0)),
|
||||
separated_list0(space1, map(simple_term, |(_, term)| term)),
|
||||
char(']'),
|
||||
),
|
||||
),
|
||||
@@ -613,10 +570,10 @@ fn set(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
field: None,
|
||||
elements,
|
||||
},
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn set_infallible(mut inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
fn set_infallible(mut i: &str) -> JResult<&str, UserInputLeaf> {
|
||||
// `IN [` has already been parsed when we enter, we only need to parse simple terms until we
|
||||
// find a `]`
|
||||
let mut elements = Vec::new();
|
||||
@@ -627,41 +584,41 @@ fn set_infallible(mut inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
first_round = false;
|
||||
Vec::new()
|
||||
} else {
|
||||
let (rest, (_, err)) = space1_infallible(inp)?;
|
||||
inp = rest;
|
||||
let (rest, (_, err)) = space1_infallible(i)?;
|
||||
i = rest;
|
||||
err
|
||||
};
|
||||
if inp.is_empty() {
|
||||
if i.is_empty() {
|
||||
// TODO push error about missing ]
|
||||
//
|
||||
errs.push(LenientErrorInternal {
|
||||
pos: inp.len(),
|
||||
pos: i.len(),
|
||||
message: "missing ]".to_string(),
|
||||
});
|
||||
let res = UserInputLeaf::Set {
|
||||
field: None,
|
||||
elements,
|
||||
};
|
||||
return Ok((inp, (res, errs)));
|
||||
return Ok((i, (res, errs)));
|
||||
}
|
||||
if let Some(inp) = inp.strip_prefix(']') {
|
||||
if let Some(i) = i.strip_prefix(']') {
|
||||
let res = UserInputLeaf::Set {
|
||||
field: None,
|
||||
elements,
|
||||
};
|
||||
return Ok((inp, (res, errs)));
|
||||
return Ok((i, (res, errs)));
|
||||
}
|
||||
errs.append(&mut space_error);
|
||||
// TODO
|
||||
// here we do the assumption term_or_phrase_infallible always consume something if the
|
||||
// first byte is not `)` or ' '. If it did not, we would end up looping.
|
||||
|
||||
let (rest, (delim_term, mut err)) = simple_term_infallible("]")(inp)?;
|
||||
let (rest, (delim_term, mut err)) = simple_term_infallible("]")(i)?;
|
||||
errs.append(&mut err);
|
||||
if let Some((_, term)) = delim_term {
|
||||
elements.push(term);
|
||||
}
|
||||
inp = rest;
|
||||
i = rest;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -669,16 +626,16 @@ fn negate(expr: UserInputAst) -> UserInputAst {
|
||||
expr.unary(Occur::MustNot)
|
||||
}
|
||||
|
||||
fn leaf(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
fn leaf(i: &str) -> IResult<&str, UserInputAst> {
|
||||
alt((
|
||||
delimited(char('('), ast, char(')')),
|
||||
map(char('*'), |_| UserInputAst::from(UserInputLeaf::All)),
|
||||
map(preceded(tuple((tag("NOT"), multispace1)), leaf), negate),
|
||||
map(preceded(tuple((tag("NOT"), space1)), leaf), negate),
|
||||
literal,
|
||||
))(inp)
|
||||
))(i)
|
||||
}
|
||||
|
||||
fn leaf_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
fn leaf_infallible(i: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
alt_infallible(
|
||||
(
|
||||
(
|
||||
@@ -708,23 +665,23 @@ fn leaf_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
),
|
||||
),
|
||||
literal_infallible,
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn positive_float_number(inp: &str) -> IResult<&str, f64> {
|
||||
fn positive_float_number(i: &str) -> IResult<&str, f64> {
|
||||
map(
|
||||
recognize(tuple((digit1, opt(tuple((char('.'), digit1)))))),
|
||||
// TODO this is actually dangerous if the number is actually not representable as a f64
|
||||
// (too big for instance)
|
||||
|float_str: &str| float_str.parse::<f64>().unwrap(),
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn boost(inp: &str) -> JResult<&str, Option<f64>> {
|
||||
opt_i(preceded(char('^'), positive_float_number))(inp)
|
||||
fn boost(i: &str) -> JResult<&str, Option<f64>> {
|
||||
opt_i(preceded(char('^'), positive_float_number))(i)
|
||||
}
|
||||
|
||||
fn boosted_leaf(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
fn boosted_leaf(i: &str) -> IResult<&str, UserInputAst> {
|
||||
map(
|
||||
tuple((leaf, fallible(boost))),
|
||||
|(leaf, boost_opt)| match boost_opt {
|
||||
@@ -733,10 +690,10 @@ fn boosted_leaf(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
}
|
||||
_ => leaf,
|
||||
},
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn boosted_leaf_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
fn boosted_leaf_infallible(i: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
map(
|
||||
tuple_infallible((leaf_infallible, boost)),
|
||||
|((leaf, boost_opt), error)| match boost_opt {
|
||||
@@ -746,30 +703,30 @@ fn boosted_leaf_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
),
|
||||
_ => (leaf, error),
|
||||
},
|
||||
)(inp)
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn occur_symbol(inp: &str) -> JResult<&str, Option<Occur>> {
|
||||
fn occur_symbol(i: &str) -> JResult<&str, Option<Occur>> {
|
||||
opt_i(alt((
|
||||
value(Occur::MustNot, char('-')),
|
||||
value(Occur::Must, char('+')),
|
||||
)))(inp)
|
||||
)))(i)
|
||||
}
|
||||
|
||||
fn occur_leaf(inp: &str) -> IResult<&str, (Option<Occur>, UserInputAst)> {
|
||||
tuple((fallible(occur_symbol), boosted_leaf))(inp)
|
||||
fn occur_leaf(i: &str) -> IResult<&str, (Option<Occur>, UserInputAst)> {
|
||||
tuple((fallible(occur_symbol), boosted_leaf))(i)
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn operand_occur_leaf_infallible(
|
||||
inp: &str,
|
||||
i: &str,
|
||||
) -> JResult<&str, (Option<BinaryOperand>, Option<Occur>, Option<UserInputAst>)> {
|
||||
// TODO maybe this should support multiple chained AND/OR, and "fuse" them?
|
||||
tuple_infallible((
|
||||
delimited_infallible(nothing, opt_i(binary_operand), space0_infallible),
|
||||
occur_symbol,
|
||||
boosted_leaf_infallible,
|
||||
))(inp)
|
||||
))(i)
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
@@ -778,31 +735,35 @@ enum BinaryOperand {
|
||||
And,
|
||||
}
|
||||
|
||||
fn binary_operand(inp: &str) -> IResult<&str, BinaryOperand> {
|
||||
fn binary_operand(i: &str) -> IResult<&str, BinaryOperand> {
|
||||
alt((
|
||||
value(BinaryOperand::And, tag("AND ")),
|
||||
value(BinaryOperand::Or, tag("OR ")),
|
||||
))(inp)
|
||||
))(i)
|
||||
}
|
||||
|
||||
fn aggregate_binary_expressions(
|
||||
left: (Option<Occur>, UserInputAst),
|
||||
others: Vec<(Option<BinaryOperand>, Option<Occur>, UserInputAst)>,
|
||||
) -> Result<UserInputAst, LenientErrorInternal> {
|
||||
let mut leafs = Vec::with_capacity(others.len() + 1);
|
||||
leafs.push((None, left.0, Some(left.1)));
|
||||
leafs.extend(
|
||||
others
|
||||
.into_iter()
|
||||
.map(|(operand, occur, ast)| (operand, occur, Some(ast))),
|
||||
);
|
||||
// the parameters we pass should statically guarantee we can't get errors
|
||||
// (no prefix BinaryOperand is provided)
|
||||
let (res, mut errors) = aggregate_infallible_expressions(leafs);
|
||||
if errors.is_empty() {
|
||||
Ok(res)
|
||||
left: UserInputAst,
|
||||
others: Vec<(BinaryOperand, UserInputAst)>,
|
||||
) -> UserInputAst {
|
||||
let mut dnf: Vec<Vec<UserInputAst>> = vec![vec![left]];
|
||||
for (operator, operand_ast) in others {
|
||||
match operator {
|
||||
BinaryOperand::And => {
|
||||
if let Some(last) = dnf.last_mut() {
|
||||
last.push(operand_ast);
|
||||
}
|
||||
}
|
||||
BinaryOperand::Or => {
|
||||
dnf.push(vec![operand_ast]);
|
||||
}
|
||||
}
|
||||
}
|
||||
if dnf.len() == 1 {
|
||||
UserInputAst::and(dnf.into_iter().next().unwrap()) //< safe
|
||||
} else {
|
||||
Err(errors.swap_remove(0))
|
||||
let conjunctions = dnf.into_iter().map(UserInputAst::and).collect();
|
||||
UserInputAst::or(conjunctions)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -818,10 +779,30 @@ fn aggregate_infallible_expressions(
|
||||
return (UserInputAst::empty_query(), err);
|
||||
}
|
||||
|
||||
let use_operand = leafs.iter().any(|(operand, _, _)| operand.is_some());
|
||||
let all_operand = leafs
|
||||
.iter()
|
||||
.skip(1)
|
||||
.all(|(operand, _, _)| operand.is_some());
|
||||
let early_operand = leafs
|
||||
.iter()
|
||||
.take(1)
|
||||
.all(|(operand, _, _)| operand.is_some());
|
||||
let use_occur = leafs.iter().any(|(_, occur, _)| occur.is_some());
|
||||
|
||||
if use_operand && use_occur {
|
||||
err.push(LenientErrorInternal {
|
||||
pos: 0,
|
||||
message: "Use of mixed occur and boolean operator".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if use_operand && !all_operand {
|
||||
err.push(LenientErrorInternal {
|
||||
pos: 0,
|
||||
message: "Missing boolean operator".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if early_operand {
|
||||
err.push(LenientErrorInternal {
|
||||
@@ -848,15 +829,7 @@ fn aggregate_infallible_expressions(
|
||||
Some(BinaryOperand::And) => Some(Occur::Must),
|
||||
_ => Some(Occur::Should),
|
||||
};
|
||||
if occur == &Some(Occur::MustNot) && default_op == Some(Occur::Should) {
|
||||
// if occur is MustNot *and* operation is OR, we synthetize a ShouldNot
|
||||
clauses.push(vec![(
|
||||
Some(Occur::Should),
|
||||
ast.clone().unary(Occur::MustNot),
|
||||
)])
|
||||
} else {
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())]);
|
||||
}
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())]);
|
||||
}
|
||||
None => {
|
||||
let default_op = match next_operator {
|
||||
@@ -864,15 +837,7 @@ fn aggregate_infallible_expressions(
|
||||
Some(BinaryOperand::Or) => Some(Occur::Should),
|
||||
None => None,
|
||||
};
|
||||
if occur == &Some(Occur::MustNot) && default_op == Some(Occur::Should) {
|
||||
// if occur is MustNot *and* operation is OR, we synthetize a ShouldNot
|
||||
clauses.push(vec![(
|
||||
Some(Occur::Should),
|
||||
ast.clone().unary(Occur::MustNot),
|
||||
)])
|
||||
} else {
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())])
|
||||
}
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -889,12 +854,7 @@ fn aggregate_infallible_expressions(
|
||||
}
|
||||
}
|
||||
Some(BinaryOperand::Or) => {
|
||||
if last_occur == Some(Occur::MustNot) {
|
||||
// if occur is MustNot *and* operation is OR, we synthetize a ShouldNot
|
||||
clauses.push(vec![(Some(Occur::Should), last_ast.unary(Occur::MustNot))]);
|
||||
} else {
|
||||
clauses.push(vec![(last_occur.or(Some(Occur::Should)), last_ast)]);
|
||||
}
|
||||
clauses.push(vec![(last_occur.or(Some(Occur::Should)), last_ast)]);
|
||||
}
|
||||
None => clauses.push(vec![(last_occur, last_ast)]),
|
||||
}
|
||||
@@ -920,32 +880,38 @@ fn aggregate_infallible_expressions(
|
||||
}
|
||||
}
|
||||
|
||||
fn operand_leaf(inp: &str) -> IResult<&str, (Option<BinaryOperand>, Option<Occur>, UserInputAst)> {
|
||||
map(
|
||||
tuple((
|
||||
terminated(opt(binary_operand), multispace0),
|
||||
terminated(occur_leaf, multispace0),
|
||||
)),
|
||||
|(operand, (occur, ast))| (operand, occur, ast),
|
||||
)(inp)
|
||||
fn operand_leaf(i: &str) -> IResult<&str, (BinaryOperand, UserInputAst)> {
|
||||
tuple((
|
||||
terminated(binary_operand, space0),
|
||||
terminated(boosted_leaf, space0),
|
||||
))(i)
|
||||
}
|
||||
|
||||
fn ast(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
let boolean_expr = map_res(
|
||||
separated_pair(occur_leaf, multispace1, many1(operand_leaf)),
|
||||
fn ast(i: &str) -> IResult<&str, UserInputAst> {
|
||||
let boolean_expr = map(
|
||||
separated_pair(boosted_leaf, space1, many1(operand_leaf)),
|
||||
|(left, right)| aggregate_binary_expressions(left, right),
|
||||
);
|
||||
let single_leaf = map(occur_leaf, |(occur, ast)| {
|
||||
if occur == Some(Occur::MustNot) {
|
||||
ast.unary(Occur::MustNot)
|
||||
let whitespace_separated_leaves = map(separated_list1(space1, occur_leaf), |subqueries| {
|
||||
if subqueries.len() == 1 {
|
||||
let (occur_opt, ast) = subqueries.into_iter().next().unwrap();
|
||||
match occur_opt.unwrap_or(Occur::Should) {
|
||||
Occur::Must | Occur::Should => ast,
|
||||
Occur::MustNot => UserInputAst::Clause(vec![(Some(Occur::MustNot), ast)]),
|
||||
}
|
||||
} else {
|
||||
ast
|
||||
UserInputAst::Clause(subqueries.into_iter().collect())
|
||||
}
|
||||
});
|
||||
delimited(multispace0, alt((boolean_expr, single_leaf)), multispace0)(inp)
|
||||
|
||||
delimited(
|
||||
space0,
|
||||
alt((boolean_expr, whitespace_separated_leaves)),
|
||||
space0,
|
||||
)(i)
|
||||
}
|
||||
|
||||
fn ast_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
fn ast_infallible(i: &str) -> JResult<&str, UserInputAst> {
|
||||
// ast() parse either `term AND term OR term` or `+term term -term`
|
||||
// both are locally ambiguous, and as we allow error, it's hard to permit backtracking.
|
||||
// Instead, we allow a mix of both syntaxes, trying to make sense of what a user meant.
|
||||
@@ -962,13 +928,13 @@ fn ast_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
},
|
||||
);
|
||||
|
||||
delimited_infallible(space0_infallible, expression, space0_infallible)(inp)
|
||||
delimited_infallible(space0_infallible, expression, space0_infallible)(i)
|
||||
}
|
||||
|
||||
pub fn parse_to_ast(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
map(delimited(multispace0, opt(ast), eof), |opt_ast| {
|
||||
pub fn parse_to_ast(i: &str) -> IResult<&str, UserInputAst> {
|
||||
map(delimited(space0, opt(ast), eof), |opt_ast| {
|
||||
rewrite_ast(opt_ast.unwrap_or_else(UserInputAst::empty_query))
|
||||
})(inp)
|
||||
})(i)
|
||||
}
|
||||
|
||||
pub fn parse_to_ast_lenient(query_str: &str) -> (UserInputAst, Vec<LenientError>) {
|
||||
@@ -1110,9 +1076,6 @@ mod test {
|
||||
test_parse_query_to_ast_helper("'www-form-encoded'", "'www-form-encoded'");
|
||||
test_parse_query_to_ast_helper("www-form-encoded", "www-form-encoded");
|
||||
test_parse_query_to_ast_helper("www-form-encoded", "www-form-encoded");
|
||||
test_parse_query_to_ast_helper("mr james bo?d", "(*mr *james *bo?d)");
|
||||
test_parse_query_to_ast_helper("mr james bo*", "(*mr *james *bo*)");
|
||||
test_parse_query_to_ast_helper("mr james b*d", "(*mr *james *b*d)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1142,43 +1105,24 @@ mod test {
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_binary_op() {
|
||||
test_parse_query_to_ast_helper("a AND b", "(+a +b)");
|
||||
test_parse_query_to_ast_helper("a\nAND b", "(+a +b)");
|
||||
test_parse_query_to_ast_helper("a OR b", "(?a ?b)");
|
||||
test_parse_query_to_ast_helper("a OR b AND c", "(?a ?(+b +c))");
|
||||
test_parse_query_to_ast_helper("a AND b AND c", "(+a +b +c)");
|
||||
test_parse_query_to_ast_helper("a OR b aaa", "(?a ?b *aaa)");
|
||||
test_parse_query_to_ast_helper("a AND b aaa", "(?(+a +b) *aaa)");
|
||||
test_parse_query_to_ast_helper("aaa a OR b ", "(*aaa ?a ?b)");
|
||||
test_parse_query_to_ast_helper("aaa ccc a OR b ", "(*aaa *ccc ?a ?b)");
|
||||
test_parse_query_to_ast_helper("aaa a AND b ", "(*aaa ?(+a +b))");
|
||||
test_parse_query_to_ast_helper("aaa ccc a AND b ", "(*aaa *ccc ?(+a +b))");
|
||||
test_is_parse_err("a OR b aaa", "(?a ?b *aaa)");
|
||||
test_is_parse_err("a AND b aaa", "(?(+a +b) *aaa)");
|
||||
test_is_parse_err("aaa a OR b ", "(*aaa ?a ?b)");
|
||||
test_is_parse_err("aaa ccc a OR b ", "(*aaa *ccc ?a ?b)");
|
||||
test_is_parse_err("aaa a AND b ", "(*aaa ?(+a +b))");
|
||||
test_is_parse_err("aaa ccc a AND b ", "(*aaa *ccc ?(+a +b))");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_mixed_bool_occur() {
|
||||
test_parse_query_to_ast_helper("+a OR +b", "(+a +b)");
|
||||
|
||||
test_parse_query_to_ast_helper("a AND -b", "(+a -b)");
|
||||
test_parse_query_to_ast_helper("-a AND b", "(-a +b)");
|
||||
test_parse_query_to_ast_helper("a AND NOT b", "(+a +(-b))");
|
||||
test_parse_query_to_ast_helper("NOT a AND b", "(+(-a) +b)");
|
||||
|
||||
test_parse_query_to_ast_helper("a AND NOT b AND c", "(+a +(-b) +c)");
|
||||
test_parse_query_to_ast_helper("a AND -b AND c", "(+a -b +c)");
|
||||
|
||||
test_parse_query_to_ast_helper("a OR -b", "(?a ?(-b))");
|
||||
test_parse_query_to_ast_helper("-a OR b", "(?(-a) ?b)");
|
||||
test_parse_query_to_ast_helper("a OR NOT b", "(?a ?(-b))");
|
||||
test_parse_query_to_ast_helper("NOT a OR b", "(?(-a) ?b)");
|
||||
|
||||
test_parse_query_to_ast_helper("a OR NOT b OR c", "(?a ?(-b) ?c)");
|
||||
test_parse_query_to_ast_helper("a OR -b OR c", "(?a ?(-b) ?c)");
|
||||
|
||||
test_parse_query_to_ast_helper("a OR b +aaa", "(?a ?b +aaa)");
|
||||
test_parse_query_to_ast_helper("a AND b -aaa", "(?(+a +b) -aaa)");
|
||||
test_parse_query_to_ast_helper("+a OR +b aaa", "(+a +b *aaa)");
|
||||
test_parse_query_to_ast_helper("-a AND -b aaa", "(?(-a -b) *aaa)");
|
||||
test_parse_query_to_ast_helper("-aaa +ccc -a OR b ", "(-aaa +ccc ?(-a) ?b)");
|
||||
test_is_parse_err("a OR b +aaa", "(?a ?b +aaa)");
|
||||
test_is_parse_err("a AND b -aaa", "(?(+a +b) -aaa)");
|
||||
test_is_parse_err("+a OR +b aaa", "(+a +b *aaa)");
|
||||
test_is_parse_err("-a AND -b aaa", "(?(-a -b) *aaa)");
|
||||
test_is_parse_err("-aaa +ccc -a OR b ", "(-aaa +ccc -a ?b)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1594,17 +1538,6 @@ mod test {
|
||||
test_parse_query_to_ast_helper("foo:\"\"*", "\"foo\":\"\"*");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exist_query() {
|
||||
test_parse_query_to_ast_helper("a:*", "\"a\":*");
|
||||
test_parse_query_to_ast_helper("a: *", "\"a\":*");
|
||||
// an exist followed by default term being b
|
||||
test_is_parse_err("a:*b", "(*\"a\":* *b)");
|
||||
|
||||
// this is a term query (not a phrase prefix)
|
||||
test_parse_query_to_ast_helper("a:b*", "\"a\":b*");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_not_queries_are_consistent() {
|
||||
test_parse_query_to_ast_helper("tata -toto", "(*tata -toto)");
|
||||
|
||||
@@ -16,9 +16,6 @@ pub enum UserInputLeaf {
|
||||
field: Option<String>,
|
||||
elements: Vec<String>,
|
||||
},
|
||||
Exists {
|
||||
field: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl UserInputLeaf {
|
||||
@@ -39,9 +36,6 @@ impl UserInputLeaf {
|
||||
upper,
|
||||
},
|
||||
UserInputLeaf::Set { field: _, elements } => UserInputLeaf::Set { field, elements },
|
||||
UserInputLeaf::Exists { field: _ } => UserInputLeaf::Exists {
|
||||
field: field.expect("Exist query without a field isn't allowed"),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -80,9 +74,6 @@ impl Debug for UserInputLeaf {
|
||||
write!(formatter, "]")
|
||||
}
|
||||
UserInputLeaf::All => write!(formatter, "*"),
|
||||
UserInputLeaf::Exists { field } => {
|
||||
write!(formatter, "\"{field}\":*")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ mod bench {
|
||||
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
let index = Index::create_from_tempdir(schema_builder.build())?;
|
||||
let few_terms_data = ["INFO", "ERROR", "WARN", "DEBUG"];
|
||||
let few_terms_data = vec!["INFO", "ERROR", "WARN", "DEBUG"];
|
||||
|
||||
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
|
||||
|
||||
@@ -85,7 +85,7 @@ mod bench {
|
||||
if cardinality == Cardinality::Sparse {
|
||||
doc_with_value /= 20;
|
||||
}
|
||||
let _val_max = 1_000_000.0;
|
||||
let val_max = 1_000_000.0;
|
||||
for _ in 0..doc_with_value {
|
||||
let val: f64 = rng.gen_range(0.0..1_000_000.0);
|
||||
let json = if rng.gen_bool(0.1) {
|
||||
@@ -290,41 +290,6 @@ mod bench {
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_many_with_top_hits_agg);
|
||||
|
||||
fn bench_aggregation_terms_many_with_top_hits_agg_card(
|
||||
b: &mut Bencher,
|
||||
cardinality: Cardinality,
|
||||
) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_texts": {
|
||||
"terms": { "field": "text_many_terms" },
|
||||
"aggs": {
|
||||
"top_hits": { "top_hits":
|
||||
{
|
||||
"sort": [
|
||||
{ "score": "desc" }
|
||||
],
|
||||
"size": 2,
|
||||
"doc_value_fields": ["score_f64"]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_many_with_sub_agg);
|
||||
|
||||
fn bench_aggregation_terms_many_with_sub_agg_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
|
||||
@@ -73,9 +73,9 @@ impl AggregationLimits {
|
||||
/// Create a new ResourceLimitGuard, that will release the memory when dropped.
|
||||
pub fn new_guard(&self) -> ResourceLimitGuard {
|
||||
ResourceLimitGuard {
|
||||
// The counter which is shared between the aggregations for one request.
|
||||
/// The counter which is shared between the aggregations for one request.
|
||||
memory_consumption: Arc::clone(&self.memory_consumption),
|
||||
// The memory_limit in bytes
|
||||
/// The memory_limit in bytes
|
||||
memory_limit: self.memory_limit,
|
||||
allocated_with_the_guard: 0,
|
||||
}
|
||||
@@ -134,142 +134,3 @@ impl Drop for ResourceLimitGuard {
|
||||
.fetch_sub(self.allocated_with_the_guard, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::aggregation::tests::exec_request_with_query;
|
||||
|
||||
// https://github.com/quickwit-oss/quickwit/issues/3837
|
||||
#[test]
|
||||
fn test_agg_limits_with_empty_merge() {
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::bucket::tests::get_test_index_from_docs;
|
||||
|
||||
let docs = vec![
|
||||
vec![r#"{ "date": "2015-01-02T00:00:00Z", "text": "bbb", "text2": "bbb" }"#],
|
||||
vec![r#"{ "text": "aaa", "text2": "bbb" }"#],
|
||||
];
|
||||
let index = get_test_index_from_docs(false, &docs).unwrap();
|
||||
|
||||
{
|
||||
let elasticsearch_compatible_json = json!(
|
||||
{
|
||||
"1": {
|
||||
"terms": {"field": "text2", "min_doc_count": 0},
|
||||
"aggs": {
|
||||
"2":{
|
||||
"date_histogram": {
|
||||
"field": "date",
|
||||
"fixed_interval": "1d",
|
||||
"extended_bounds": {
|
||||
"min": "2015-01-01T00:00:00Z",
|
||||
"max": "2015-01-10T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
let agg_req: Aggregations = serde_json::from_str(
|
||||
&serde_json::to_string(&elasticsearch_compatible_json).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let res = exec_request_with_query(agg_req, &index, Some(("text", "bbb"))).unwrap();
|
||||
let expected_res = json!({
|
||||
"1": {
|
||||
"buckets": [
|
||||
{
|
||||
"2": {
|
||||
"buckets": [
|
||||
{ "doc_count": 0, "key": 1420070400000.0, "key_as_string": "2015-01-01T00:00:00Z" },
|
||||
{ "doc_count": 1, "key": 1420156800000.0, "key_as_string": "2015-01-02T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420243200000.0, "key_as_string": "2015-01-03T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420329600000.0, "key_as_string": "2015-01-04T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420416000000.0, "key_as_string": "2015-01-05T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420502400000.0, "key_as_string": "2015-01-06T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420588800000.0, "key_as_string": "2015-01-07T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420675200000.0, "key_as_string": "2015-01-08T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420761600000.0, "key_as_string": "2015-01-09T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420848000000.0, "key_as_string": "2015-01-10T00:00:00Z" }
|
||||
]
|
||||
},
|
||||
"doc_count": 1,
|
||||
"key": "bbb"
|
||||
}
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
});
|
||||
assert_eq!(res, expected_res);
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/quickwit-oss/quickwit/issues/3837
|
||||
#[test]
|
||||
fn test_agg_limits_with_empty_data() {
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::bucket::tests::get_test_index_from_docs;
|
||||
|
||||
let docs = vec![vec![r#"{ "text": "aaa", "text2": "bbb" }"#]];
|
||||
let index = get_test_index_from_docs(false, &docs).unwrap();
|
||||
|
||||
{
|
||||
// Empty result since there is no doc with dates
|
||||
let elasticsearch_compatible_json = json!(
|
||||
{
|
||||
"1": {
|
||||
"terms": {"field": "text2", "min_doc_count": 0},
|
||||
"aggs": {
|
||||
"2":{
|
||||
"date_histogram": {
|
||||
"field": "date",
|
||||
"fixed_interval": "1d",
|
||||
"extended_bounds": {
|
||||
"min": "2015-01-01T00:00:00Z",
|
||||
"max": "2015-01-10T00:00:00Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
let agg_req: Aggregations = serde_json::from_str(
|
||||
&serde_json::to_string(&elasticsearch_compatible_json).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let res = exec_request_with_query(agg_req, &index, Some(("text", "bbb"))).unwrap();
|
||||
let expected_res = json!({
|
||||
"1": {
|
||||
"buckets": [
|
||||
{
|
||||
"2": {
|
||||
"buckets": [
|
||||
{ "doc_count": 0, "key": 1420070400000.0, "key_as_string": "2015-01-01T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420156800000.0, "key_as_string": "2015-01-02T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420243200000.0, "key_as_string": "2015-01-03T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420329600000.0, "key_as_string": "2015-01-04T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420416000000.0, "key_as_string": "2015-01-05T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420502400000.0, "key_as_string": "2015-01-06T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420588800000.0, "key_as_string": "2015-01-07T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420675200000.0, "key_as_string": "2015-01-08T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420761600000.0, "key_as_string": "2015-01-09T00:00:00Z" },
|
||||
{ "doc_count": 0, "key": 1420848000000.0, "key_as_string": "2015-01-10T00:00:00Z" }
|
||||
]
|
||||
},
|
||||
"doc_count": 0,
|
||||
"key": "bbb"
|
||||
}
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
});
|
||||
assert_eq!(res, expected_res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ use super::bucket::{
|
||||
};
|
||||
use super::metric::{
|
||||
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation,
|
||||
PercentilesAggregationReq, StatsAggregation, SumAggregation, TopHitsAggregation,
|
||||
PercentilesAggregationReq, StatsAggregation, SumAggregation,
|
||||
};
|
||||
|
||||
/// The top-level aggregation request structure, which contains [`Aggregation`] and their user
|
||||
@@ -93,12 +93,7 @@ impl Aggregation {
|
||||
}
|
||||
|
||||
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||
fast_field_names.extend(
|
||||
self.agg
|
||||
.get_fast_field_names()
|
||||
.iter()
|
||||
.map(|s| s.to_string()),
|
||||
);
|
||||
fast_field_names.insert(self.agg.get_fast_field_name().to_string());
|
||||
fast_field_names.extend(get_fast_field_names(&self.sub_aggregation));
|
||||
}
|
||||
}
|
||||
@@ -152,27 +147,23 @@ pub enum AggregationVariants {
|
||||
/// Computes the sum of the extracted values.
|
||||
#[serde(rename = "percentiles")]
|
||||
Percentiles(PercentilesAggregationReq),
|
||||
/// Finds the top k values matching some order
|
||||
#[serde(rename = "top_hits")]
|
||||
TopHits(TopHitsAggregation),
|
||||
}
|
||||
|
||||
impl AggregationVariants {
|
||||
/// Returns the name of the fields used by the aggregation.
|
||||
pub fn get_fast_field_names(&self) -> Vec<&str> {
|
||||
/// Returns the name of the field used by the aggregation.
|
||||
pub fn get_fast_field_name(&self) -> &str {
|
||||
match self {
|
||||
AggregationVariants::Terms(terms) => vec![terms.field.as_str()],
|
||||
AggregationVariants::Range(range) => vec![range.field.as_str()],
|
||||
AggregationVariants::Histogram(histogram) => vec![histogram.field.as_str()],
|
||||
AggregationVariants::DateHistogram(histogram) => vec![histogram.field.as_str()],
|
||||
AggregationVariants::Average(avg) => vec![avg.field_name()],
|
||||
AggregationVariants::Count(count) => vec![count.field_name()],
|
||||
AggregationVariants::Max(max) => vec![max.field_name()],
|
||||
AggregationVariants::Min(min) => vec![min.field_name()],
|
||||
AggregationVariants::Stats(stats) => vec![stats.field_name()],
|
||||
AggregationVariants::Sum(sum) => vec![sum.field_name()],
|
||||
AggregationVariants::Percentiles(per) => vec![per.field_name()],
|
||||
AggregationVariants::TopHits(top_hits) => top_hits.field_names(),
|
||||
AggregationVariants::Terms(terms) => terms.field.as_str(),
|
||||
AggregationVariants::Range(range) => range.field.as_str(),
|
||||
AggregationVariants::Histogram(histogram) => histogram.field.as_str(),
|
||||
AggregationVariants::DateHistogram(histogram) => histogram.field.as_str(),
|
||||
AggregationVariants::Average(avg) => avg.field_name(),
|
||||
AggregationVariants::Count(count) => count.field_name(),
|
||||
AggregationVariants::Max(max) => max.field_name(),
|
||||
AggregationVariants::Min(min) => min.field_name(),
|
||||
AggregationVariants::Stats(stats) => stats.field_name(),
|
||||
AggregationVariants::Sum(sum) => sum.field_name(),
|
||||
AggregationVariants::Percentiles(per) => per.field_name(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
//! This will enhance the request tree with access to the fastfield and metadata.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType, DynamicColumn, StrColumn};
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType, StrColumn};
|
||||
|
||||
use super::agg_limits::ResourceLimitGuard;
|
||||
use super::agg_req::{Aggregation, AggregationVariants, Aggregations};
|
||||
@@ -17,7 +14,7 @@ use super::metric::{
|
||||
use super::segment_agg_result::AggregationLimits;
|
||||
use super::VecWithNames;
|
||||
use crate::aggregation::{f64_to_fastfield_u64, Key};
|
||||
use crate::{SegmentOrdinal, SegmentReader};
|
||||
use crate::SegmentReader;
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct AggregationsWithAccessor {
|
||||
@@ -35,7 +32,6 @@ impl AggregationsWithAccessor {
|
||||
}
|
||||
|
||||
pub struct AggregationWithAccessor {
|
||||
pub(crate) segment_ordinal: SegmentOrdinal,
|
||||
/// In general there can be buckets without fast field access, e.g. buckets that are created
|
||||
/// based on search terms. That is not that case currently, but eventually this needs to be
|
||||
/// Option or moved.
|
||||
@@ -48,16 +44,10 @@ pub struct AggregationWithAccessor {
|
||||
pub(crate) limits: ResourceLimitGuard,
|
||||
pub(crate) column_block_accessor: ColumnBlockAccessor<u64>,
|
||||
/// Used for missing term aggregation, which checks all columns for existence.
|
||||
/// And also for `top_hits` aggregation, which may sort on multiple fields.
|
||||
/// By convention the missing aggregation is chosen, when this property is set
|
||||
/// (instead bein set in `agg`).
|
||||
/// If this needs to used by other aggregations, we need to refactor this.
|
||||
// NOTE: we can make all other aggregations use this instead of the `accessor` and `field_type`
|
||||
// (making them obsolete) But will it have a performance impact?
|
||||
pub(crate) accessors: Vec<(Column<u64>, ColumnType)>,
|
||||
/// Map field names to all associated column accessors.
|
||||
/// This field is used for `docvalue_fields`, which is currently only supported for `top_hits`.
|
||||
pub(crate) value_accessors: HashMap<String, Vec<DynamicColumn>>,
|
||||
pub(crate) accessors: Vec<Column<u64>>,
|
||||
pub(crate) agg: Aggregation,
|
||||
}
|
||||
|
||||
@@ -67,55 +57,19 @@ impl AggregationWithAccessor {
|
||||
agg: &Aggregation,
|
||||
sub_aggregation: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: AggregationLimits,
|
||||
) -> crate::Result<Vec<AggregationWithAccessor>> {
|
||||
let mut agg = agg.clone();
|
||||
|
||||
let add_agg_with_accessor = |agg: &Aggregation,
|
||||
accessor: Column<u64>,
|
||||
let add_agg_with_accessor = |accessor: Column<u64>,
|
||||
column_type: ColumnType,
|
||||
aggs: &mut Vec<AggregationWithAccessor>|
|
||||
-> crate::Result<()> {
|
||||
let res = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
accessor,
|
||||
accessors: Default::default(),
|
||||
value_accessors: Default::default(),
|
||||
accessors: Vec::new(),
|
||||
field_type: column_type,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
limits: limits.new_guard(),
|
||||
missing_value_for_accessor: None,
|
||||
str_dict_column: None,
|
||||
column_block_accessor: Default::default(),
|
||||
};
|
||||
aggs.push(res);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let add_agg_with_accessors = |agg: &Aggregation,
|
||||
accessors: Vec<(Column<u64>, ColumnType)>,
|
||||
aggs: &mut Vec<AggregationWithAccessor>,
|
||||
value_accessors: HashMap<String, Vec<DynamicColumn>>|
|
||||
-> crate::Result<()> {
|
||||
let (accessor, field_type) = accessors.first().expect("at least one accessor");
|
||||
let res = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
// TODO: We should do away with the `accessor` field altogether
|
||||
accessor: accessor.clone(),
|
||||
value_accessors,
|
||||
field_type: *field_type,
|
||||
accessors,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
@@ -130,36 +84,31 @@ impl AggregationWithAccessor {
|
||||
|
||||
let mut res: Vec<AggregationWithAccessor> = Vec::new();
|
||||
use AggregationVariants::*;
|
||||
|
||||
match agg.agg {
|
||||
match &agg.agg {
|
||||
Range(RangeAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
field: field_name, ..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
}
|
||||
Histogram(HistogramAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
field: field_name, ..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
}
|
||||
DateHistogram(DateHistogramAggregationReq {
|
||||
field: ref field_name,
|
||||
..
|
||||
field: field_name, ..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
// Only DateTime is supported for DateHistogram
|
||||
get_ff_reader(reader, field_name, Some(&[ColumnType::DateTime]))?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
}
|
||||
Terms(TermsAggregation {
|
||||
field: ref field_name,
|
||||
ref missing,
|
||||
field: field_name,
|
||||
missing,
|
||||
..
|
||||
}) => {
|
||||
let str_dict_column = reader.fast_fields().str(field_name)?;
|
||||
@@ -168,10 +117,10 @@ impl AggregationWithAccessor {
|
||||
ColumnType::U64,
|
||||
ColumnType::F64,
|
||||
ColumnType::Str,
|
||||
ColumnType::DateTime,
|
||||
ColumnType::Bool,
|
||||
// ColumnType::Bytes Unsupported
|
||||
// ColumnType::Bool Unsupported
|
||||
// ColumnType::IpAddr Unsupported
|
||||
// ColumnType::DateTime Unsupported
|
||||
];
|
||||
|
||||
// In case the column is empty we want the shim column to match the missing type
|
||||
@@ -196,27 +145,29 @@ impl AggregationWithAccessor {
|
||||
.map(|m| matches!(m, Key::Str(_)))
|
||||
.unwrap_or(false);
|
||||
|
||||
// Actually we could convert the text to a number and have the fast path, if it is
|
||||
// provided in Rfc3339 format. But this use case is probably common
|
||||
// enough to justify the effort.
|
||||
let text_on_date_col = column_and_types.len() == 1
|
||||
&& column_and_types[0].1 == ColumnType::DateTime
|
||||
&& missing
|
||||
.as_ref()
|
||||
.map(|m| matches!(m, Key::Str(_)))
|
||||
.unwrap_or(false);
|
||||
|
||||
let use_special_missing_agg =
|
||||
missing_and_more_than_one_col || text_on_non_text_col || text_on_date_col;
|
||||
let use_special_missing_agg = missing_and_more_than_one_col || text_on_non_text_col;
|
||||
if use_special_missing_agg {
|
||||
let column_and_types =
|
||||
get_all_ff_reader_or_empty(reader, field_name, None, fallback_type)?;
|
||||
|
||||
let accessors = column_and_types
|
||||
.iter()
|
||||
.map(|c_t| (c_t.0.clone(), c_t.1))
|
||||
.collect();
|
||||
add_agg_with_accessors(&agg, accessors, &mut res, Default::default())?;
|
||||
let accessors: Vec<Column> =
|
||||
column_and_types.iter().map(|(a, _)| a.clone()).collect();
|
||||
let agg_wit_acc = AggregationWithAccessor {
|
||||
missing_value_for_accessor: None,
|
||||
accessor: accessors[0].clone(),
|
||||
accessors,
|
||||
field_type: ColumnType::U64,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
str_dict_column: str_dict_column.clone(),
|
||||
limits: limits.new_guard(),
|
||||
column_block_accessor: Default::default(),
|
||||
};
|
||||
res.push(agg_wit_acc);
|
||||
}
|
||||
|
||||
for (accessor, column_type) in column_and_types {
|
||||
@@ -226,25 +177,21 @@ impl AggregationWithAccessor {
|
||||
missing.clone()
|
||||
};
|
||||
|
||||
let missing_value_for_accessor = if let Some(missing) =
|
||||
missing_value_term_agg.as_ref()
|
||||
{
|
||||
get_missing_val(column_type, missing, agg.agg.get_fast_field_names()[0])?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let missing_value_for_accessor =
|
||||
if let Some(missing) = missing_value_term_agg.as_ref() {
|
||||
get_missing_val(column_type, missing, agg.agg.get_fast_field_name())?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let agg = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
missing_value_for_accessor,
|
||||
accessor,
|
||||
accessors: Default::default(),
|
||||
value_accessors: Default::default(),
|
||||
accessors: Vec::new(),
|
||||
field_type: column_type,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
@@ -256,63 +203,34 @@ impl AggregationWithAccessor {
|
||||
}
|
||||
}
|
||||
Average(AverageAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
field: field_name, ..
|
||||
})
|
||||
| Count(CountAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
field: field_name, ..
|
||||
})
|
||||
| Max(MaxAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
field: field_name, ..
|
||||
})
|
||||
| Min(MinAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
field: field_name, ..
|
||||
})
|
||||
| Stats(StatsAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
field: field_name, ..
|
||||
})
|
||||
| Sum(SumAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
field: field_name, ..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
}
|
||||
Percentiles(ref percentiles) => {
|
||||
Percentiles(percentiles) => {
|
||||
let (accessor, column_type) = get_ff_reader(
|
||||
reader,
|
||||
percentiles.field_name(),
|
||||
Some(get_numeric_or_date_column_types()),
|
||||
)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
TopHits(ref mut top_hits) => {
|
||||
top_hits.validate_and_resolve(reader.fast_fields().columnar())?;
|
||||
let accessors: Vec<(Column<u64>, ColumnType)> = top_hits
|
||||
.field_names()
|
||||
.iter()
|
||||
.map(|field| {
|
||||
get_ff_reader(reader, field, Some(get_numeric_or_date_column_types()))
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
let value_accessors = top_hits
|
||||
.value_field_names()
|
||||
.iter()
|
||||
.map(|field_name| {
|
||||
Ok((
|
||||
field_name.to_string(),
|
||||
get_dynamic_columns(reader, field_name)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
add_agg_with_accessors(&agg, accessors, &mut res, value_accessors)?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -354,7 +272,6 @@ fn get_numeric_or_date_column_types() -> &'static [ColumnType] {
|
||||
pub(crate) fn get_aggs_with_segment_accessor_and_validate(
|
||||
aggs: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<AggregationsWithAccessor> {
|
||||
let mut aggss = Vec::new();
|
||||
@@ -363,7 +280,6 @@ pub(crate) fn get_aggs_with_segment_accessor_and_validate(
|
||||
agg,
|
||||
agg.sub_aggregation(),
|
||||
reader,
|
||||
segment_ordinal,
|
||||
limits.clone(),
|
||||
)?;
|
||||
for agg in aggs {
|
||||
@@ -393,19 +309,6 @@ fn get_ff_reader(
|
||||
Ok(ff_field_with_type)
|
||||
}
|
||||
|
||||
fn get_dynamic_columns(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Vec<columnar::DynamicColumn>> {
|
||||
let ff_fields = reader.fast_fields().dynamic_column_handles(field_name)?;
|
||||
let cols = ff_fields
|
||||
.iter()
|
||||
.map(|h| h.open())
|
||||
.collect::<io::Result<_>>()?;
|
||||
assert!(!ff_fields.is_empty(), "field {} not found", field_name);
|
||||
Ok(cols)
|
||||
}
|
||||
|
||||
/// Get all fast field reader or empty as default.
|
||||
///
|
||||
/// Is guaranteed to return at least one column.
|
||||
|
||||
@@ -8,7 +8,7 @@ use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::bucket::GetDocCount;
|
||||
use super::metric::{PercentilesMetricResult, SingleMetricResult, Stats, TopHitsMetricResult};
|
||||
use super::metric::{PercentilesMetricResult, SingleMetricResult, Stats};
|
||||
use super::{AggregationError, Key};
|
||||
use crate::TantivyError;
|
||||
|
||||
@@ -90,10 +90,8 @@ pub enum MetricResult {
|
||||
Stats(Stats),
|
||||
/// Sum metric result.
|
||||
Sum(SingleMetricResult),
|
||||
/// Percentiles metric result.
|
||||
/// Sum metric result.
|
||||
Percentiles(PercentilesMetricResult),
|
||||
/// Top hits metric result
|
||||
TopHits(TopHitsMetricResult),
|
||||
}
|
||||
|
||||
impl MetricResult {
|
||||
@@ -108,9 +106,6 @@ impl MetricResult {
|
||||
MetricResult::Percentiles(_) => Err(TantivyError::AggregationError(
|
||||
AggregationError::InvalidRequest("percentiles can't be used to order".to_string()),
|
||||
)),
|
||||
MetricResult::TopHits(_) => Err(TantivyError::AggregationError(
|
||||
AggregationError::InvalidRequest("top_hits can't be used to order".to_string()),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::aggregation::tests::{get_test_index_2_segments, get_test_index_from_v
|
||||
use crate::aggregation::DistributedAggregationCollector;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{IndexRecordOption, Schema, FAST};
|
||||
use crate::{Index, IndexWriter, Term};
|
||||
use crate::{Index, Term};
|
||||
|
||||
fn get_avg_req(field_name: &str) -> Aggregation {
|
||||
serde_json::from_value(json!({
|
||||
@@ -586,10 +586,7 @@ fn test_aggregation_on_json_object() {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color": "red"})))
|
||||
.unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color": "red"})))
|
||||
.unwrap();
|
||||
@@ -617,74 +614,12 @@ fn test_aggregation_on_json_object() {
|
||||
&serde_json::json!({
|
||||
"jsonagg": {
|
||||
"buckets": [
|
||||
{"doc_count": 2, "key": "red"},
|
||||
{"doc_count": 1, "key": "blue"},
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_on_nested_json_object() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let json = schema_builder.add_json_field("json.blub", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color.dot": "red", "color": {"nested":"red"} })))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color.dot": "blue", "color": {"nested":"blue"} })))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color.dot": "blue", "color": {"nested":"blue"} })))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let agg: Aggregations = serde_json::from_value(json!({
|
||||
"jsonagg1": {
|
||||
"terms": {
|
||||
"field": "json\\.blub.color\\.dot",
|
||||
}
|
||||
},
|
||||
"jsonagg2": {
|
||||
"terms": {
|
||||
"field": "json\\.blub.color.nested",
|
||||
}
|
||||
}
|
||||
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let aggregation_collector = get_collector(agg);
|
||||
let aggregation_results = searcher.search(&AllQuery, &aggregation_collector).unwrap();
|
||||
let aggregation_res_json = serde_json::to_value(aggregation_results).unwrap();
|
||||
assert_eq!(
|
||||
&aggregation_res_json,
|
||||
&serde_json::json!({
|
||||
"jsonagg1": {
|
||||
"buckets": [
|
||||
{"doc_count": 2, "key": "blue"},
|
||||
{"doc_count": 1, "key": "red"}
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
},
|
||||
"jsonagg2": {
|
||||
"buckets": [
|
||||
{"doc_count": 2, "key": "blue"},
|
||||
{"doc_count": 1, "key": "red"}
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -695,7 +630,7 @@ fn test_aggregation_on_json_object_empty_columns() {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// => Empty column when accessing color
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"price": 10.0})))
|
||||
@@ -813,19 +748,13 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
// => Segment with all values text
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "blue"})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "blue"})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "blue"})))
|
||||
.unwrap();
|
||||
@@ -837,9 +766,6 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
// => Segment with mixed values
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "red"})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "red"})))
|
||||
.unwrap();
|
||||
@@ -885,8 +811,6 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
|
||||
let aggregation_results = searcher.search(&AllQuery, &aggregation_collector).unwrap();
|
||||
let aggregation_res_json = serde_json::to_value(aggregation_results).unwrap();
|
||||
// pretty print as json
|
||||
use pretty_assertions::assert_eq;
|
||||
assert_eq!(
|
||||
&aggregation_res_json,
|
||||
&serde_json::json!({
|
||||
@@ -902,9 +826,9 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
"buckets": [
|
||||
{ "doc_count": 1, "key": 10.0, "min_price": { "value": 10.0 } },
|
||||
{ "doc_count": 1, "key": -20.5, "min_price": { "value": -20.5 } },
|
||||
{ "doc_count": 2, "key": "red", "min_price": { "value": null } },
|
||||
{ "doc_count": 2, "key": 1.0, "key_as_string": "true", "min_price": { "value": null } },
|
||||
{ "doc_count": 3, "key": "blue", "min_price": { "value": null } },
|
||||
// TODO bool is also not yet handled in aggregation
|
||||
{ "doc_count": 1, "key": "blue", "min_price": { "value": null } },
|
||||
{ "doc_count": 1, "key": "red", "min_price": { "value": null } },
|
||||
],
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{HistogramAggregation, HistogramBounds};
|
||||
use crate::aggregation::*;
|
||||
use crate::aggregation::AggregationError;
|
||||
|
||||
/// DateHistogramAggregation is similar to `HistogramAggregation`, but it can only be used with date
|
||||
/// type.
|
||||
@@ -132,7 +132,6 @@ impl DateHistogramAggregationReq {
|
||||
hard_bounds: self.hard_bounds,
|
||||
extended_bounds: self.extended_bounds,
|
||||
keyed: self.keyed,
|
||||
is_normalized_to_ns: false,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -244,15 +243,15 @@ fn parse_into_milliseconds(input: &str) -> Result<i64, AggregationError> {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
mod tests {
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::tests::exec_request;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::schema::{Schema, FAST, STRING};
|
||||
use crate::{Index, IndexWriter, TantivyDocument};
|
||||
use crate::schema::{Schema, FAST};
|
||||
use crate::Index;
|
||||
|
||||
#[test]
|
||||
fn test_parse_into_millisecs() {
|
||||
@@ -307,9 +306,7 @@ pub mod tests {
|
||||
) -> crate::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_date_field("date", FAST);
|
||||
schema_builder.add_json_field("mixed", FAST);
|
||||
schema_builder.add_text_field("text", FAST | STRING);
|
||||
schema_builder.add_text_field("text2", FAST | STRING);
|
||||
schema_builder.add_text_field("text", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
{
|
||||
@@ -317,7 +314,7 @@ pub mod tests {
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
for values in segment_and_docs {
|
||||
for doc_str in values {
|
||||
let doc = TantivyDocument::parse_json(&schema, doc_str)?;
|
||||
let doc = schema.parse_document(doc_str)?;
|
||||
index_writer.add_document(doc)?;
|
||||
}
|
||||
// writing the segment
|
||||
@@ -329,7 +326,7 @@ pub mod tests {
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
if segment_ids.len() > 1 {
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
@@ -352,10 +349,8 @@ pub mod tests {
|
||||
let docs = vec![
|
||||
vec![r#"{ "date": "2015-01-01T12:10:30Z", "text": "aaa" }"#],
|
||||
vec![r#"{ "date": "2015-01-01T11:11:30Z", "text": "bbb" }"#],
|
||||
vec![r#"{ "date": "2015-01-01T11:11:30Z", "text": "bbb" }"#],
|
||||
vec![r#"{ "date": "2015-01-02T00:00:00Z", "text": "bbb" }"#],
|
||||
vec![r#"{ "date": "2015-01-06T00:00:00Z", "text": "ccc" }"#],
|
||||
vec![r#"{ "date": "2015-01-06T00:00:00Z", "text": "ccc" }"#],
|
||||
];
|
||||
let index = get_test_index_from_docs(merge_segments, &docs).unwrap();
|
||||
|
||||
@@ -384,7 +379,7 @@ pub mod tests {
|
||||
{
|
||||
"key_as_string" : "2015-01-01T00:00:00Z",
|
||||
"key" : 1420070400000.0,
|
||||
"doc_count" : 6
|
||||
"doc_count" : 4
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -422,15 +417,15 @@ pub mod tests {
|
||||
{
|
||||
"key_as_string" : "2015-01-01T00:00:00Z",
|
||||
"key" : 1420070400000.0,
|
||||
"doc_count" : 6,
|
||||
"doc_count" : 4,
|
||||
"texts": {
|
||||
"buckets": [
|
||||
{
|
||||
"doc_count": 3,
|
||||
"doc_count": 2,
|
||||
"key": "bbb"
|
||||
},
|
||||
{
|
||||
"doc_count": 2,
|
||||
"doc_count": 1,
|
||||
"key": "ccc"
|
||||
},
|
||||
{
|
||||
@@ -469,7 +464,7 @@ pub mod tests {
|
||||
"sales_over_time": {
|
||||
"buckets": [
|
||||
{
|
||||
"doc_count": 3,
|
||||
"doc_count": 2,
|
||||
"key": 1420070400000.0,
|
||||
"key_as_string": "2015-01-01T00:00:00Z"
|
||||
},
|
||||
@@ -494,7 +489,7 @@ pub mod tests {
|
||||
"key_as_string": "2015-01-05T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"doc_count": 2,
|
||||
"doc_count": 1,
|
||||
"key": 1420502400000.0,
|
||||
"key_as_string": "2015-01-06T00:00:00Z"
|
||||
}
|
||||
@@ -535,7 +530,7 @@ pub mod tests {
|
||||
"key_as_string": "2014-12-31T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"doc_count": 3,
|
||||
"doc_count": 2,
|
||||
"key": 1420070400000.0,
|
||||
"key_as_string": "2015-01-01T00:00:00Z"
|
||||
},
|
||||
@@ -560,7 +555,7 @@ pub mod tests {
|
||||
"key_as_string": "2015-01-05T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"doc_count": 2,
|
||||
"doc_count": 1,
|
||||
"key": 1420502400000.0,
|
||||
"key_as_string": "2015-01-06T00:00:00Z"
|
||||
},
|
||||
|
||||
@@ -20,7 +20,7 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, AggregationLimits, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::*;
|
||||
use crate::aggregation::{f64_from_fastfield_u64, format_date};
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
|
||||
@@ -73,7 +73,6 @@ pub struct HistogramAggregation {
|
||||
pub field: String,
|
||||
/// The interval to chunk your data range. Each bucket spans a value range of [0..interval).
|
||||
/// Must be a positive value.
|
||||
#[serde(deserialize_with = "deserialize_f64")]
|
||||
pub interval: f64,
|
||||
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k +
|
||||
/// 1))`.
|
||||
@@ -86,7 +85,6 @@ pub struct HistogramAggregation {
|
||||
/// fall into the buckets with the key 0 and 10.
|
||||
/// With offset 5 and interval 10, they would both fall into the bucket with they key 5 and the
|
||||
/// range [5..15)
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
pub offset: Option<f64>,
|
||||
/// The minimum number of documents in a bucket to be returned. Defaults to 0.
|
||||
pub min_doc_count: Option<u64>,
|
||||
@@ -124,14 +122,11 @@ pub struct HistogramAggregation {
|
||||
/// Whether to return the buckets as a hash map
|
||||
#[serde(default)]
|
||||
pub keyed: bool,
|
||||
/// Whether the values are normalized to ns for date time values. Defaults to false.
|
||||
#[serde(default)]
|
||||
pub is_normalized_to_ns: bool,
|
||||
}
|
||||
|
||||
impl HistogramAggregation {
|
||||
pub(crate) fn normalize_date_time(&mut self) {
|
||||
if !self.is_normalized_to_ns {
|
||||
pub(crate) fn normalize(&mut self, column_type: ColumnType) {
|
||||
if column_type.is_date_time() {
|
||||
// values are provided in ms, but the fastfield is in nano seconds
|
||||
self.interval *= 1_000_000.0;
|
||||
self.offset = self.offset.map(|off| off * 1_000_000.0);
|
||||
@@ -143,7 +138,6 @@ impl HistogramAggregation {
|
||||
min: bounds.min * 1_000_000.0,
|
||||
max: bounds.max * 1_000_000.0,
|
||||
});
|
||||
self.is_normalized_to_ns = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -376,7 +370,7 @@ impl SegmentHistogramCollector {
|
||||
|
||||
Ok(IntermediateBucketResult::Histogram {
|
||||
buckets,
|
||||
is_date_agg: self.column_type == ColumnType::DateTime,
|
||||
column_type: Some(self.column_type),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -387,9 +381,7 @@ impl SegmentHistogramCollector {
|
||||
accessor_idx: usize,
|
||||
) -> crate::Result<Self> {
|
||||
req.validate()?;
|
||||
if field_type == ColumnType::DateTime {
|
||||
req.normalize_date_time();
|
||||
}
|
||||
req.normalize(field_type);
|
||||
|
||||
let sub_aggregation_blueprint = if sub_aggregation.is_empty() {
|
||||
None
|
||||
@@ -447,7 +439,6 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
// memory check upfront
|
||||
let (_, first_bucket_num, last_bucket_num) =
|
||||
generate_bucket_pos_with_opt_minmax(histogram_req, min_max);
|
||||
|
||||
// It's based on user input, so we need to account for overflows
|
||||
let added_buckets = ((last_bucket_num.saturating_sub(first_bucket_num)).max(0) as u64)
|
||||
.saturating_sub(buckets.len() as u64);
|
||||
@@ -491,7 +482,7 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
// Convert to BucketEntry
|
||||
pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
is_date_agg: bool,
|
||||
column_type: Option<ColumnType>,
|
||||
histogram_req: &HistogramAggregation,
|
||||
sub_aggregation: &Aggregations,
|
||||
limits: &AggregationLimits,
|
||||
@@ -500,8 +491,8 @@ pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
|
||||
// The request used in the the call to final is not yet be normalized.
|
||||
// Normalization is changing the precision from milliseconds to nanoseconds.
|
||||
let mut histogram_req = histogram_req.clone();
|
||||
if is_date_agg {
|
||||
histogram_req.normalize_date_time();
|
||||
if let Some(column_type) = column_type {
|
||||
histogram_req.normalize(column_type);
|
||||
}
|
||||
let mut buckets = if histogram_req.min_doc_count() == 0 {
|
||||
// With min_doc_count != 0, we may need to add buckets, so that there are no
|
||||
@@ -525,7 +516,7 @@ pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
|
||||
|
||||
// If we have a date type on the histogram buckets, we add the `key_as_string` field as rfc339
|
||||
// and normalize from nanoseconds to milliseconds
|
||||
if is_date_agg {
|
||||
if column_type == Some(ColumnType::DateTime) {
|
||||
for bucket in buckets.iter_mut() {
|
||||
if let crate::aggregation::Key::F64(ref mut val) = bucket.key {
|
||||
let key_as_string = format_date(*val as i64)?;
|
||||
@@ -598,13 +589,10 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::agg_result::AggregationResults;
|
||||
use crate::aggregation::tests::{
|
||||
exec_request, exec_request_with_query, exec_request_with_query_and_memory_limit,
|
||||
get_test_index_2_segments, get_test_index_from_values, get_test_index_with_num_docs,
|
||||
};
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::query::AllQuery;
|
||||
|
||||
#[test]
|
||||
fn histogram_test_crooked_values() -> crate::Result<()> {
|
||||
@@ -1356,35 +1344,6 @@ mod tests {
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn test_aggregation_histogram_empty_index() -> crate::Result<()> {
|
||||
// test index without segments
|
||||
let values = vec![];
|
||||
|
||||
let index = get_test_index_from_values(false, &values)?;
|
||||
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"myhisto": {
|
||||
"histogram": {
|
||||
"field": "score",
|
||||
"interval": 10.0
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, Default::default());
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
// Make sure the result structure is correct
|
||||
assert_eq!(res["myhisto"]["buckets"].as_array().unwrap().len(), 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,9 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::*;
|
||||
use crate::aggregation::{
|
||||
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey,
|
||||
};
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Provide user-defined buckets to aggregate on.
|
||||
@@ -70,19 +72,11 @@ pub struct RangeAggregationRange {
|
||||
pub key: Option<String>,
|
||||
/// The from range value, which is inclusive in the range.
|
||||
/// `None` equals to an open ended interval.
|
||||
#[serde(
|
||||
skip_serializing_if = "Option::is_none",
|
||||
default,
|
||||
deserialize_with = "deserialize_option_f64"
|
||||
)]
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub from: Option<f64>,
|
||||
/// The to range value, which is not inclusive in the range.
|
||||
/// `None` equals to an open ended interval.
|
||||
#[serde(
|
||||
skip_serializing_if = "Option::is_none",
|
||||
default,
|
||||
deserialize_with = "deserialize_option_f64"
|
||||
)]
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use columnar::{BytesColumn, ColumnType, MonotonicallyMappableToU64, StrColumn};
|
||||
use columnar::{BytesColumn, ColumnType, StrColumn};
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -16,7 +16,7 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, format_date, Key};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, Key};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::TantivyError;
|
||||
|
||||
@@ -99,15 +99,24 @@ pub struct TermsAggregation {
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub size: Option<u32>,
|
||||
|
||||
/// To get more accurate results, we fetch more than `size` from each segment.
|
||||
/// Unused by tantivy.
|
||||
///
|
||||
/// Since tantivy doesn't know shards, this parameter is merely there to be used by consumers
|
||||
/// of tantivy. shard_size is the number of terms returned by each shard.
|
||||
/// The default value in elasticsearch is size * 1.5 + 10.
|
||||
///
|
||||
/// Should never be smaller than size.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
#[serde(alias = "shard_size")]
|
||||
pub split_size: Option<u32>,
|
||||
|
||||
/// The get more accurate results, we fetch more than `size` from each segment.
|
||||
///
|
||||
/// Increasing this value is will increase the cost for more accuracy.
|
||||
///
|
||||
/// Defaults to 10 * size.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
#[serde(alias = "segment_size")]
|
||||
#[serde(alias = "split_size")]
|
||||
pub shard_size: Option<u32>,
|
||||
pub segment_size: Option<u32>,
|
||||
|
||||
/// If you set the `show_term_doc_count_error` parameter to true, the terms aggregation will
|
||||
/// include doc_count_error_upper_bound, which is an upper bound to the error on the
|
||||
@@ -196,7 +205,7 @@ impl TermsAggregationInternal {
|
||||
pub(crate) fn from_req(req: &TermsAggregation) -> Self {
|
||||
let size = req.size.unwrap_or(10);
|
||||
|
||||
let mut segment_size = req.shard_size.unwrap_or(size * 10);
|
||||
let mut segment_size = req.segment_size.unwrap_or(size * 10);
|
||||
|
||||
let order = req.order.clone().unwrap_or_default();
|
||||
segment_size = segment_size.max(size);
|
||||
@@ -247,7 +256,7 @@ pub struct SegmentTermCollector {
|
||||
term_buckets: TermBuckets,
|
||||
req: TermsAggregationInternal,
|
||||
blueprint: Option<Box<dyn SegmentAggregationCollector>>,
|
||||
column_type: ColumnType,
|
||||
field_type: ColumnType,
|
||||
accessor_idx: usize,
|
||||
}
|
||||
|
||||
@@ -346,7 +355,7 @@ impl SegmentTermCollector {
|
||||
field_type: ColumnType,
|
||||
accessor_idx: usize,
|
||||
) -> crate::Result<Self> {
|
||||
if field_type == ColumnType::Bytes {
|
||||
if field_type == ColumnType::Bytes || field_type == ColumnType::Bool {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"terms aggregation is not supported for column type {:?}",
|
||||
field_type
|
||||
@@ -380,7 +389,7 @@ impl SegmentTermCollector {
|
||||
req: TermsAggregationInternal::from_req(req),
|
||||
term_buckets,
|
||||
blueprint,
|
||||
column_type: field_type,
|
||||
field_type,
|
||||
accessor_idx,
|
||||
})
|
||||
}
|
||||
@@ -457,7 +466,7 @@ impl SegmentTermCollector {
|
||||
Ok(intermediate_entry)
|
||||
};
|
||||
|
||||
if self.column_type == ColumnType::Str {
|
||||
if self.field_type == ColumnType::Str {
|
||||
let term_dict = agg_with_accessor
|
||||
.str_dict_column
|
||||
.as_ref()
|
||||
@@ -522,34 +531,21 @@ impl SegmentTermCollector {
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if self.column_type == ColumnType::DateTime {
|
||||
for (val, doc_count) in entries {
|
||||
let intermediate_entry = into_intermediate_bucket_entry(val, doc_count)?;
|
||||
let val = i64::from_u64(val);
|
||||
let date = format_date(val)?;
|
||||
dict.insert(IntermediateKey::Str(date), intermediate_entry);
|
||||
}
|
||||
} else if self.column_type == ColumnType::Bool {
|
||||
for (val, doc_count) in entries {
|
||||
let intermediate_entry = into_intermediate_bucket_entry(val, doc_count)?;
|
||||
let val = bool::from_u64(val);
|
||||
dict.insert(IntermediateKey::Bool(val), intermediate_entry);
|
||||
}
|
||||
} else {
|
||||
for (val, doc_count) in entries {
|
||||
let intermediate_entry = into_intermediate_bucket_entry(val, doc_count)?;
|
||||
let val = f64_from_fastfield_u64(val, &self.column_type);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
dict.insert(IntermediateKey::F64(val), intermediate_entry);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(IntermediateBucketResult::Terms {
|
||||
buckets: IntermediateTermBucketResult {
|
||||
Ok(IntermediateBucketResult::Terms(
|
||||
IntermediateTermBucketResult {
|
||||
entries: dict,
|
||||
sum_other_doc_count,
|
||||
doc_count_error_upper_bound: term_doc_count_before_cutoff,
|
||||
},
|
||||
})
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -587,9 +583,6 @@ pub(crate) fn cut_off_buckets<T: GetDocCount + Debug>(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common::DateTime;
|
||||
use time::{Date, Month};
|
||||
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::tests::{
|
||||
exec_request, exec_request_with_query, exec_request_with_query_and_memory_limit,
|
||||
@@ -598,7 +591,7 @@ mod tests {
|
||||
use crate::aggregation::AggregationLimits;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::schema::{Schema, FAST, STRING};
|
||||
use crate::{Index, IndexWriter};
|
||||
use crate::Index;
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_test_single_segment() -> crate::Result<()> {
|
||||
@@ -1362,7 +1355,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_different_tokenizer_on_ff_test() -> crate::Result<()> {
|
||||
let terms = vec!["Hello Hello", "Hallo Hallo", "Hallo Hallo"];
|
||||
let terms = vec!["Hello Hello", "Hallo Hallo"];
|
||||
|
||||
let index = get_test_index_from_terms(true, &[terms])?;
|
||||
|
||||
@@ -1380,7 +1373,7 @@ mod tests {
|
||||
println!("{}", serde_json::to_string_pretty(&res).unwrap());
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "Hallo Hallo");
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 1);
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "Hello Hello");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 1);
|
||||
@@ -1470,7 +1463,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// => Segment with empty json
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
@@ -1820,111 +1813,4 @@ mod tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_date() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field("date_field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut writer = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1982, Month::September, 17)?.with_hms(0, 0, 0)?)))?;
|
||||
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1982, Month::September, 17)?.with_hms(0, 0, 0)?)))?;
|
||||
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1983, Month::September, 27)?.with_hms(0, 0, 0)?)))?;
|
||||
writer.commit()?;
|
||||
}
|
||||
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_date": {
|
||||
"terms": {
|
||||
"field": "date_field"
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
// date_field field
|
||||
assert_eq!(res["my_date"]["buckets"][0]["key"], "1982-09-17T00:00:00Z");
|
||||
assert_eq!(res["my_date"]["buckets"][0]["doc_count"], 2);
|
||||
assert_eq!(res["my_date"]["buckets"][1]["key"], "1983-09-27T00:00:00Z");
|
||||
assert_eq!(res["my_date"]["buckets"][1]["doc_count"], 1);
|
||||
assert_eq!(res["my_date"]["buckets"][2]["key"], serde_json::Value::Null);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn terms_aggregation_date_missing() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field("date_field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut writer = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1982, Month::September, 17)?.with_hms(0, 0, 0)?)))?;
|
||||
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1982, Month::September, 17)?.with_hms(0, 0, 0)?)))?;
|
||||
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1983, Month::September, 27)?.with_hms(0, 0, 0)?)))?;
|
||||
writer.add_document(doc!())?;
|
||||
writer.commit()?;
|
||||
}
|
||||
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_date": {
|
||||
"terms": {
|
||||
"field": "date_field",
|
||||
"missing": "1982-09-17T00:00:00Z"
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
// date_field field
|
||||
assert_eq!(res["my_date"]["buckets"][0]["key"], "1982-09-17T00:00:00Z");
|
||||
assert_eq!(res["my_date"]["buckets"][0]["doc_count"], 3);
|
||||
assert_eq!(res["my_date"]["buckets"][1]["key"], "1983-09-27T00:00:00Z");
|
||||
assert_eq!(res["my_date"]["buckets"][1]["doc_count"], 1);
|
||||
assert_eq!(res["my_date"]["buckets"][2]["key"], serde_json::Value::Null);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_bool() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_bool_field("bool_field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut writer = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
writer.add_document(doc!(field=>true))?;
|
||||
writer.add_document(doc!(field=>false))?;
|
||||
writer.add_document(doc!(field=>true))?;
|
||||
writer.commit()?;
|
||||
}
|
||||
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_bool": {
|
||||
"terms": {
|
||||
"field": "bool_field"
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["key"], 1.0);
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["key_as_string"], "true");
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["doc_count"], 2);
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["key"], 0.0);
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["key_as_string"], "false");
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["doc_count"], 1);
|
||||
assert_eq!(res["my_bool"]["buckets"][2]["key"], serde_json::Value::Null);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,13 +73,11 @@ impl SegmentAggregationCollector for TermMissingAgg {
|
||||
|
||||
entries.insert(missing.into(), missing_entry);
|
||||
|
||||
let bucket = IntermediateBucketResult::Terms {
|
||||
buckets: IntermediateTermBucketResult {
|
||||
entries,
|
||||
sum_other_doc_count: 0,
|
||||
doc_count_error_upper_bound: 0,
|
||||
},
|
||||
};
|
||||
let bucket = IntermediateBucketResult::Terms(IntermediateTermBucketResult {
|
||||
entries,
|
||||
sum_other_doc_count: 0,
|
||||
doc_count_error_upper_bound: 0,
|
||||
});
|
||||
|
||||
results.push(name, IntermediateAggregationResult::Bucket(bucket))?;
|
||||
|
||||
@@ -92,10 +90,7 @@ impl SegmentAggregationCollector for TermMissingAgg {
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
let agg = &mut agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
let has_value = agg
|
||||
.accessors
|
||||
.iter()
|
||||
.any(|(acc, _)| acc.index.has_value(doc));
|
||||
let has_value = agg.accessors.iter().any(|acc| acc.index.has_value(doc));
|
||||
if !has_value {
|
||||
self.missing_count += 1;
|
||||
if let Some(sub_agg) = self.sub_agg.as_mut() {
|
||||
@@ -122,7 +117,7 @@ mod tests {
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::tests::exec_request_with_query;
|
||||
use crate::schema::{Schema, FAST};
|
||||
use crate::{Index, IndexWriter};
|
||||
use crate::Index;
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_missing_mixed_type_mult_seg_sub_agg() -> crate::Result<()> {
|
||||
@@ -131,7 +126,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(score => 1.0, json => json!({"mixed_type": 10.0})))
|
||||
@@ -191,7 +186,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer.add_document(doc!(score => 1.0, json => json!({"mixed_type": 10.0})))?;
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
@@ -236,7 +231,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
index_writer.commit().unwrap();
|
||||
@@ -283,7 +278,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
@@ -328,7 +323,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
@@ -390,7 +385,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
@@ -432,7 +427,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
|
||||
@@ -8,7 +8,7 @@ use super::segment_agg_result::{
|
||||
};
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_segment_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::{DocId, SegmentOrdinal, SegmentReader, TantivyError};
|
||||
use crate::{DocId, SegmentReader, TantivyError};
|
||||
|
||||
/// The default max bucket count, before the aggregation fails.
|
||||
pub const DEFAULT_BUCKET_LIMIT: u32 = 65000;
|
||||
@@ -64,15 +64,10 @@ impl Collector for DistributedAggregationCollector {
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: crate::SegmentOrdinal,
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
segment_local_id,
|
||||
&self.limits,
|
||||
)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader, &self.limits)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -94,15 +89,10 @@ impl Collector for AggregationCollector {
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: crate::SegmentOrdinal,
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
segment_local_id,
|
||||
&self.limits,
|
||||
)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader, &self.limits)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -145,11 +135,10 @@ impl AggregationSegmentCollector {
|
||||
pub fn from_agg_req_and_reader(
|
||||
agg: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<Self> {
|
||||
let mut aggs_with_accessor =
|
||||
get_aggs_with_segment_accessor_and_validate(agg, reader, segment_ordinal, limits)?;
|
||||
get_aggs_with_segment_accessor_and_validate(agg, reader, limits)?;
|
||||
let result =
|
||||
BufAggregationCollector::new(build_segment_agg_collector(&mut aggs_with_accessor)?);
|
||||
Ok(AggregationSegmentCollector {
|
||||
|
||||
@@ -19,7 +19,7 @@ use super::bucket::{
|
||||
};
|
||||
use super::metric::{
|
||||
IntermediateAverage, IntermediateCount, IntermediateMax, IntermediateMin, IntermediateStats,
|
||||
IntermediateSum, PercentilesCollector, TopHitsCollector,
|
||||
IntermediateSum, PercentilesCollector,
|
||||
};
|
||||
use super::segment_agg_result::AggregationLimits;
|
||||
use super::{format_date, AggregationError, Key, SerializedKey};
|
||||
@@ -41,8 +41,6 @@ pub struct IntermediateAggregationResults {
|
||||
/// This might seem redundant with `Key`, but the point is to have a different
|
||||
/// Serialize implementation.
|
||||
pub enum IntermediateKey {
|
||||
/// Bool key
|
||||
Bool(bool),
|
||||
/// String key
|
||||
Str(String),
|
||||
/// `f64` key
|
||||
@@ -61,7 +59,6 @@ impl From<IntermediateKey> for Key {
|
||||
match value {
|
||||
IntermediateKey::Str(s) => Self::Str(s),
|
||||
IntermediateKey::F64(f) => Self::F64(f),
|
||||
IntermediateKey::Bool(f) => Self::F64(f as u64 as f64),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -74,7 +71,6 @@ impl std::hash::Hash for IntermediateKey {
|
||||
match self {
|
||||
IntermediateKey::Str(text) => text.hash(state),
|
||||
IntermediateKey::F64(val) => val.to_bits().hash(state),
|
||||
IntermediateKey::Bool(val) => val.hash(state),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -170,22 +166,16 @@ impl IntermediateAggregationResults {
|
||||
pub(crate) fn empty_from_req(req: &Aggregation) -> IntermediateAggregationResult {
|
||||
use AggregationVariants::*;
|
||||
match req.agg {
|
||||
Terms(_) => IntermediateAggregationResult::Bucket(IntermediateBucketResult::Terms {
|
||||
buckets: Default::default(),
|
||||
}),
|
||||
Terms(_) => IntermediateAggregationResult::Bucket(IntermediateBucketResult::Terms(
|
||||
Default::default(),
|
||||
)),
|
||||
Range(_) => IntermediateAggregationResult::Bucket(IntermediateBucketResult::Range(
|
||||
Default::default(),
|
||||
)),
|
||||
Histogram(_) => {
|
||||
Histogram(_) | DateHistogram(_) => {
|
||||
IntermediateAggregationResult::Bucket(IntermediateBucketResult::Histogram {
|
||||
buckets: Vec::new(),
|
||||
is_date_agg: false,
|
||||
})
|
||||
}
|
||||
DateHistogram(_) => {
|
||||
IntermediateAggregationResult::Bucket(IntermediateBucketResult::Histogram {
|
||||
buckets: Vec::new(),
|
||||
is_date_agg: true,
|
||||
column_type: None,
|
||||
})
|
||||
}
|
||||
Average(_) => IntermediateAggregationResult::Metric(IntermediateMetricResult::Average(
|
||||
@@ -209,9 +199,6 @@ pub(crate) fn empty_from_req(req: &Aggregation) -> IntermediateAggregationResult
|
||||
Percentiles(_) => IntermediateAggregationResult::Metric(
|
||||
IntermediateMetricResult::Percentiles(PercentilesCollector::default()),
|
||||
),
|
||||
TopHits(_) => IntermediateAggregationResult::Metric(IntermediateMetricResult::TopHits(
|
||||
TopHitsCollector::default(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -272,8 +259,6 @@ pub enum IntermediateMetricResult {
|
||||
Stats(IntermediateStats),
|
||||
/// Intermediate sum result.
|
||||
Sum(IntermediateSum),
|
||||
/// Intermediate top_hits result
|
||||
TopHits(TopHitsCollector),
|
||||
}
|
||||
|
||||
impl IntermediateMetricResult {
|
||||
@@ -301,13 +286,9 @@ impl IntermediateMetricResult {
|
||||
percentiles
|
||||
.into_final_result(req.agg.as_percentile().expect("unexpected metric type")),
|
||||
),
|
||||
IntermediateMetricResult::TopHits(top_hits) => {
|
||||
MetricResult::TopHits(top_hits.finalize())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this is our top-of-the-chain fruit merge mech
|
||||
fn merge_fruits(&mut self, other: IntermediateMetricResult) -> crate::Result<()> {
|
||||
match (self, other) {
|
||||
(
|
||||
@@ -343,9 +324,6 @@ impl IntermediateMetricResult {
|
||||
) => {
|
||||
left.merge_fruits(right)?;
|
||||
}
|
||||
(IntermediateMetricResult::TopHits(left), IntermediateMetricResult::TopHits(right)) => {
|
||||
left.merge_fruits(right)?;
|
||||
}
|
||||
_ => {
|
||||
panic!("incompatible fruit types in tree or missing merge_fruits handler");
|
||||
}
|
||||
@@ -365,16 +343,13 @@ pub enum IntermediateBucketResult {
|
||||
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
||||
/// sub_aggregations.
|
||||
Histogram {
|
||||
/// The column_type of the underlying `Column` is DateTime
|
||||
is_date_agg: bool,
|
||||
/// The histogram buckets
|
||||
/// The column_type of the underlying `Column`
|
||||
column_type: Option<ColumnType>,
|
||||
/// The buckets
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
},
|
||||
/// Term aggregation
|
||||
Terms {
|
||||
/// The term buckets
|
||||
buckets: IntermediateTermBucketResult,
|
||||
},
|
||||
Terms(IntermediateTermBucketResult),
|
||||
}
|
||||
|
||||
impl IntermediateBucketResult {
|
||||
@@ -424,7 +399,7 @@ impl IntermediateBucketResult {
|
||||
Ok(BucketResult::Range { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Histogram {
|
||||
is_date_agg,
|
||||
column_type,
|
||||
buckets,
|
||||
} => {
|
||||
let histogram_req = &req
|
||||
@@ -433,7 +408,7 @@ impl IntermediateBucketResult {
|
||||
.expect("unexpected aggregation, expected histogram aggregation");
|
||||
let buckets = intermediate_histogram_buckets_to_final_buckets(
|
||||
buckets,
|
||||
is_date_agg,
|
||||
column_type,
|
||||
histogram_req,
|
||||
req.sub_aggregation(),
|
||||
limits,
|
||||
@@ -451,7 +426,7 @@ impl IntermediateBucketResult {
|
||||
};
|
||||
Ok(BucketResult::Histogram { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Terms { buckets: terms } => terms.into_final_result(
|
||||
IntermediateBucketResult::Terms(terms) => terms.into_final_result(
|
||||
req.agg
|
||||
.as_term()
|
||||
.expect("unexpected aggregation, expected term aggregation"),
|
||||
@@ -464,12 +439,8 @@ impl IntermediateBucketResult {
|
||||
fn merge_fruits(&mut self, other: IntermediateBucketResult) -> crate::Result<()> {
|
||||
match (self, other) {
|
||||
(
|
||||
IntermediateBucketResult::Terms {
|
||||
buckets: term_res_left,
|
||||
},
|
||||
IntermediateBucketResult::Terms {
|
||||
buckets: term_res_right,
|
||||
},
|
||||
IntermediateBucketResult::Terms(term_res_left),
|
||||
IntermediateBucketResult::Terms(term_res_right),
|
||||
) => {
|
||||
merge_maps(&mut term_res_left.entries, term_res_right.entries)?;
|
||||
term_res_left.sum_other_doc_count += term_res_right.sum_other_doc_count;
|
||||
@@ -486,11 +457,11 @@ impl IntermediateBucketResult {
|
||||
(
|
||||
IntermediateBucketResult::Histogram {
|
||||
buckets: buckets_left,
|
||||
is_date_agg: _,
|
||||
..
|
||||
},
|
||||
IntermediateBucketResult::Histogram {
|
||||
buckets: buckets_right,
|
||||
is_date_agg: _,
|
||||
..
|
||||
},
|
||||
) => {
|
||||
let buckets: Result<Vec<IntermediateHistogramBucketEntry>, TantivyError> =
|
||||
@@ -553,15 +524,8 @@ impl IntermediateTermBucketResult {
|
||||
.into_iter()
|
||||
.filter(|bucket| bucket.1.doc_count as u64 >= req.min_doc_count)
|
||||
.map(|(key, entry)| {
|
||||
let key_as_string = match key {
|
||||
IntermediateKey::Bool(key) => {
|
||||
let val = if key { "true" } else { "false" };
|
||||
Some(val.to_string())
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
Ok(BucketEntry {
|
||||
key_as_string,
|
||||
key_as_string: None,
|
||||
key: key.into(),
|
||||
doc_count: entry.doc_count as u64,
|
||||
sub_aggregation: entry
|
||||
|
||||
@@ -2,8 +2,7 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
|
||||
/// A single-value metric aggregation that computes the average of numeric values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -25,7 +24,7 @@ pub struct AverageAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
#[serde(default)]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -66,71 +65,3 @@ impl IntermediateAverage {
|
||||
self.stats.finalize().avg
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn deserialization_with_missing_test1() {
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "10.0"
|
||||
}"#;
|
||||
let avg: AverageAggregation = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(avg.field, "score");
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
// no dot
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "10"
|
||||
}"#;
|
||||
let avg: AverageAggregation = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(avg.field, "score");
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
|
||||
// from value
|
||||
let avg: AverageAggregation = serde_json::from_value(json!({
|
||||
"field": "score_f64",
|
||||
"missing": 10u64,
|
||||
}))
|
||||
.unwrap();
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
// from value
|
||||
let avg: AverageAggregation = serde_json::from_value(json!({
|
||||
"field": "score_f64",
|
||||
"missing": 10u32,
|
||||
}))
|
||||
.unwrap();
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
let avg: AverageAggregation = serde_json::from_value(json!({
|
||||
"field": "score_f64",
|
||||
"missing": 10i8,
|
||||
}))
|
||||
.unwrap();
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialization_with_missing_test_fail() {
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "a"
|
||||
}"#;
|
||||
let avg: Result<AverageAggregation, _> = serde_json::from_str(json);
|
||||
assert!(avg.is_err());
|
||||
assert!(avg
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("Failed to parse f64 from string: \"a\""));
|
||||
|
||||
// Disallow NaN
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "NaN"
|
||||
}"#;
|
||||
let avg: Result<AverageAggregation, _> = serde_json::from_str(json);
|
||||
assert!(avg.is_err());
|
||||
assert!(avg.unwrap_err().to_string().contains("NaN"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,8 +2,7 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
|
||||
/// A single-value metric aggregation that counts the number of values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -25,7 +24,7 @@ pub struct CountAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
#[serde(default)]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
|
||||
@@ -2,8 +2,7 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
|
||||
/// A single-value metric aggregation that computes the maximum of numeric values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -25,7 +24,7 @@ pub struct MaxAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
#[serde(default)]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -72,7 +71,7 @@ mod tests {
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::tests::exec_request_with_query;
|
||||
use crate::schema::{Schema, FAST};
|
||||
use crate::{Index, IndexWriter};
|
||||
use crate::Index;
|
||||
|
||||
#[test]
|
||||
fn test_max_agg_with_missing() -> crate::Result<()> {
|
||||
@@ -80,7 +79,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// => Segment with empty json
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
@@ -2,8 +2,7 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
|
||||
/// A single-value metric aggregation that computes the minimum of numeric values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -25,7 +24,7 @@ pub struct MinAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
#[serde(default)]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
|
||||
@@ -23,8 +23,6 @@ mod min;
|
||||
mod percentiles;
|
||||
mod stats;
|
||||
mod sum;
|
||||
mod top_hits;
|
||||
|
||||
pub use average::*;
|
||||
pub use count::*;
|
||||
pub use max::*;
|
||||
@@ -34,7 +32,6 @@ use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
pub use stats::*;
|
||||
pub use sum::*;
|
||||
pub use top_hits::*;
|
||||
|
||||
/// Single-metric aggregations use this common result structure.
|
||||
///
|
||||
@@ -84,27 +81,6 @@ pub struct PercentilesMetricResult {
|
||||
pub values: PercentileValues,
|
||||
}
|
||||
|
||||
/// The top_hits metric results entry
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct TopHitsVecEntry {
|
||||
/// The sort values of the document, depending on the sort criteria in the request.
|
||||
pub sort: Vec<Option<u64>>,
|
||||
|
||||
/// Search results, for queries that include field retrieval requests
|
||||
/// (`docvalue_fields`).
|
||||
#[serde(flatten)]
|
||||
pub search_results: FieldRetrivalResult,
|
||||
}
|
||||
|
||||
/// The top_hits metric aggregation results a list of top hits by sort criteria.
|
||||
///
|
||||
/// The main reason for wrapping it in `hits` is to match elasticsearch output structure.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct TopHitsMetricResult {
|
||||
/// The result of the top_hits metric.
|
||||
pub hits: Vec<TopHitsVecEntry>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
@@ -112,7 +88,7 @@ mod tests {
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::query::AllQuery;
|
||||
use crate::schema::{NumericOptions, Schema};
|
||||
use crate::{Index, IndexWriter};
|
||||
use crate::Index;
|
||||
|
||||
#[test]
|
||||
fn test_metric_aggregations() {
|
||||
@@ -120,7 +96,7 @@ mod tests {
|
||||
let field_options = NumericOptions::default().set_fast();
|
||||
let field = schema_builder.add_f64_field("price", field_options);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
for i in 0..3 {
|
||||
index_writer
|
||||
|
||||
@@ -11,7 +11,7 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateMetricResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::*;
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, AggregationError};
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// # Percentiles
|
||||
@@ -84,11 +84,7 @@ pub struct PercentilesAggregationReq {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(
|
||||
skip_serializing_if = "Option::is_none",
|
||||
default,
|
||||
deserialize_with = "deserialize_option_f64"
|
||||
)]
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
fn default_percentiles() -> &'static [f64] {
|
||||
@@ -137,6 +133,7 @@ pub(crate) struct SegmentPercentilesCollector {
|
||||
field_type: ColumnType,
|
||||
pub(crate) percentiles: PercentilesCollector,
|
||||
pub(crate) accessor_idx: usize,
|
||||
val_cache: Vec<u64>,
|
||||
missing: Option<u64>,
|
||||
}
|
||||
|
||||
@@ -246,6 +243,7 @@ impl SegmentPercentilesCollector {
|
||||
field_type,
|
||||
percentiles: PercentilesCollector::new(),
|
||||
accessor_idx,
|
||||
val_cache: Default::default(),
|
||||
missing,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateMetricResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::*;
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64};
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// A multi-value metric aggregation that computes a collection of statistics on numeric values that
|
||||
@@ -33,7 +33,7 @@ pub struct StatsAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
#[serde(default)]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -300,7 +300,7 @@ mod tests {
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{IndexRecordOption, Schema, FAST};
|
||||
use crate::{Index, IndexWriter, Term};
|
||||
use crate::{Index, Term};
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_stats_empty_index() -> crate::Result<()> {
|
||||
@@ -494,7 +494,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// => Segment with empty json
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
@@ -541,7 +541,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// => Segment with empty json
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
@@ -580,30 +580,6 @@ mod tests {
|
||||
})
|
||||
);
|
||||
|
||||
// From string
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_stats": {
|
||||
"stats": {
|
||||
"field": "json.partially_empty",
|
||||
"missing": "0.0"
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(
|
||||
res["my_stats"],
|
||||
json!({
|
||||
"avg": 2.5,
|
||||
"count": 4,
|
||||
"max": 10.0,
|
||||
"min": 0.0,
|
||||
"sum": 10.0
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -2,8 +2,7 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
|
||||
/// A single-value metric aggregation that sums up numeric values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -25,7 +24,7 @@ pub struct SumAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
#[serde(default)]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,837 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Formatter;
|
||||
|
||||
use columnar::{ColumnarReader, DynamicColumn};
|
||||
use regex::Regex;
|
||||
use serde::ser::SerializeMap;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use super::{TopHitsMetricResult, TopHitsVecEntry};
|
||||
use crate::aggregation::bucket::Order;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateMetricResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::collector::TopNComputer;
|
||||
use crate::schema::term::JSON_PATH_SEGMENT_SEP_STR;
|
||||
use crate::schema::OwnedValue;
|
||||
use crate::{DocAddress, DocId, SegmentOrdinal};
|
||||
|
||||
/// # Top Hits
|
||||
///
|
||||
/// The top hits aggregation is a useful tool to answer questions like:
|
||||
/// - "What are the most recent posts by each author?"
|
||||
/// - "What are the most popular items in each category?"
|
||||
///
|
||||
/// It does so by keeping track of the most relevant document being aggregated,
|
||||
/// in terms of a sort criterion that can consist of multiple fields and their
|
||||
/// sort-orders (ascending or descending).
|
||||
///
|
||||
/// `top_hits` should not be used as a top-level aggregation. It is intended to be
|
||||
/// used as a sub-aggregation, inside a `terms` aggregation or a `filters` aggregation,
|
||||
/// for example.
|
||||
///
|
||||
/// Note that this aggregator does not return the actual document addresses, but
|
||||
/// rather a list of the values of the fields that were requested to be retrieved.
|
||||
/// These values can be specified in the `docvalue_fields` parameter, which can include
|
||||
/// a list of fast fields to be retrieved. At the moment, only fast fields are supported
|
||||
/// but it is possible that we support the `fields` parameter to retrieve any stored
|
||||
/// field in the future.
|
||||
///
|
||||
/// The following example demonstrates a request for the top_hits aggregation:
|
||||
/// ```JSON
|
||||
/// {
|
||||
/// "aggs": {
|
||||
/// "top_authors": {
|
||||
/// "terms": {
|
||||
/// "field": "author",
|
||||
/// "size": 5
|
||||
/// }
|
||||
/// },
|
||||
/// "aggs": {
|
||||
/// "top_hits": {
|
||||
/// "size": 2,
|
||||
/// "from": 0
|
||||
/// "sort": [
|
||||
/// { "date": "desc" }
|
||||
/// ]
|
||||
/// "docvalue_fields": ["date", "title", "iden"]
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// This request will return an object containing the top two documents, sorted
|
||||
/// by the `date` field in descending order. You can also sort by multiple fields, which
|
||||
/// helps to resolve ties. The aggregation object for each bucket will look like:
|
||||
/// ```JSON
|
||||
/// {
|
||||
/// "hits": [
|
||||
/// {
|
||||
/// "score": [<time_u64>],
|
||||
/// "docvalue_fields": {
|
||||
/// "date": "<date_RFC3339>",
|
||||
/// "title": "<title>",
|
||||
/// "iden": "<iden>"
|
||||
/// }
|
||||
/// },
|
||||
/// {
|
||||
/// "score": [<time_u64>]
|
||||
/// "docvalue_fields": {
|
||||
/// "date": "<date_RFC3339>",
|
||||
/// "title": "<title>",
|
||||
/// "iden": "<iden>"
|
||||
/// }
|
||||
/// }
|
||||
/// ]
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
|
||||
pub struct TopHitsAggregation {
|
||||
sort: Vec<KeyOrder>,
|
||||
size: usize,
|
||||
from: Option<usize>,
|
||||
|
||||
#[serde(flatten)]
|
||||
retrieval: RetrievalFields,
|
||||
}
|
||||
|
||||
const fn default_doc_value_fields() -> Vec<String> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
/// Search query spec for each matched document
|
||||
/// TODO: move this to a common module
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
|
||||
pub struct RetrievalFields {
|
||||
/// The fast fields to return for each hit.
|
||||
/// This is the only variant supported for now.
|
||||
/// TODO: support the {field, format} variant for custom formatting.
|
||||
#[serde(rename = "docvalue_fields")]
|
||||
#[serde(default = "default_doc_value_fields")]
|
||||
pub doc_value_fields: Vec<String>,
|
||||
}
|
||||
|
||||
/// Search query result for each matched document
|
||||
/// TODO: move this to a common module
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
|
||||
pub struct FieldRetrivalResult {
|
||||
/// The fast fields returned for each hit.
|
||||
#[serde(rename = "docvalue_fields")]
|
||||
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||
pub doc_value_fields: HashMap<String, OwnedValue>,
|
||||
}
|
||||
|
||||
impl RetrievalFields {
|
||||
fn get_field_names(&self) -> Vec<&str> {
|
||||
self.doc_value_fields.iter().map(|s| s.as_str()).collect()
|
||||
}
|
||||
|
||||
fn resolve_field_names(&mut self, reader: &ColumnarReader) -> crate::Result<()> {
|
||||
// Tranform a glob (`pattern*`, for example) into a regex::Regex (`^pattern.*$`)
|
||||
let globbed_string_to_regex = |glob: &str| {
|
||||
// Replace `*` glob with `.*` regex
|
||||
let sanitized = format!("^{}$", regex::escape(glob).replace(r"\*", ".*"));
|
||||
Regex::new(&sanitized.replace('*', ".*")).map_err(|e| {
|
||||
crate::TantivyError::SchemaError(format!(
|
||||
"Invalid regex '{}' in docvalue_fields: {}",
|
||||
glob, e
|
||||
))
|
||||
})
|
||||
};
|
||||
self.doc_value_fields = self
|
||||
.doc_value_fields
|
||||
.iter()
|
||||
.map(|field| {
|
||||
if !field.contains('*')
|
||||
&& reader
|
||||
.iter_columns()?
|
||||
.any(|(name, _)| name.as_str() == field)
|
||||
{
|
||||
return Ok(vec![field.to_owned()]);
|
||||
}
|
||||
|
||||
let pattern = globbed_string_to_regex(field)?;
|
||||
let fields = reader
|
||||
.iter_columns()?
|
||||
.map(|(name, _)| {
|
||||
// normalize path from internal fast field repr
|
||||
name.replace(JSON_PATH_SEGMENT_SEP_STR, ".")
|
||||
})
|
||||
.filter(|name| pattern.is_match(name))
|
||||
.collect::<Vec<_>>();
|
||||
assert!(
|
||||
!fields.is_empty(),
|
||||
"No fields matched the glob '{}' in docvalue_fields",
|
||||
field
|
||||
);
|
||||
Ok(fields)
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_document_field_data(
|
||||
&self,
|
||||
accessors: &HashMap<String, Vec<DynamicColumn>>,
|
||||
doc_id: DocId,
|
||||
) -> FieldRetrivalResult {
|
||||
let dvf = self
|
||||
.doc_value_fields
|
||||
.iter()
|
||||
.map(|field| {
|
||||
let accessors = accessors
|
||||
.get(field)
|
||||
.unwrap_or_else(|| panic!("field '{}' not found in accessors", field));
|
||||
|
||||
let values: Vec<OwnedValue> = accessors
|
||||
.iter()
|
||||
.flat_map(|accessor| match accessor {
|
||||
DynamicColumn::U64(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(OwnedValue::U64)
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::I64(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(OwnedValue::I64)
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::F64(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(OwnedValue::F64)
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::Bytes(accessor) => accessor
|
||||
.term_ords(doc_id)
|
||||
.map(|term_ord| {
|
||||
let mut buffer = vec![];
|
||||
assert!(
|
||||
accessor
|
||||
.ord_to_bytes(term_ord, &mut buffer)
|
||||
.expect("could not read term dictionary"),
|
||||
"term corresponding to term_ord does not exist"
|
||||
);
|
||||
OwnedValue::Bytes(buffer)
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::Str(accessor) => accessor
|
||||
.term_ords(doc_id)
|
||||
.map(|term_ord| {
|
||||
let mut buffer = vec![];
|
||||
assert!(
|
||||
accessor
|
||||
.ord_to_bytes(term_ord, &mut buffer)
|
||||
.expect("could not read term dictionary"),
|
||||
"term corresponding to term_ord does not exist"
|
||||
);
|
||||
OwnedValue::Str(String::from_utf8(buffer).unwrap())
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::Bool(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(OwnedValue::Bool)
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::IpAddr(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(OwnedValue::IpAddr)
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::DateTime(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(OwnedValue::Date)
|
||||
.collect::<Vec<_>>(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
(field.to_owned(), OwnedValue::Array(values))
|
||||
})
|
||||
.collect();
|
||||
FieldRetrivalResult {
|
||||
doc_value_fields: dvf,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Default)]
|
||||
struct KeyOrder {
|
||||
field: String,
|
||||
order: Order,
|
||||
}
|
||||
|
||||
impl Serialize for KeyOrder {
|
||||
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
||||
let KeyOrder { field, order } = self;
|
||||
let mut map = serializer.serialize_map(Some(1))?;
|
||||
map.serialize_entry(field, order)?;
|
||||
map.end()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for KeyOrder {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where D: Deserializer<'de> {
|
||||
let mut k_o = <HashMap<String, Order>>::deserialize(deserializer)?.into_iter();
|
||||
let (k, v) = k_o.next().ok_or(serde::de::Error::custom(
|
||||
"Expected exactly one key-value pair in KeyOrder, found none",
|
||||
))?;
|
||||
if k_o.next().is_some() {
|
||||
return Err(serde::de::Error::custom(
|
||||
"Expected exactly one key-value pair in KeyOrder, found more",
|
||||
));
|
||||
}
|
||||
Ok(Self { field: k, order: v })
|
||||
}
|
||||
}
|
||||
|
||||
impl TopHitsAggregation {
|
||||
/// Validate and resolve field retrieval parameters
|
||||
pub fn validate_and_resolve(&mut self, reader: &ColumnarReader) -> crate::Result<()> {
|
||||
self.retrieval.resolve_field_names(reader)
|
||||
}
|
||||
|
||||
/// Return fields accessed by the aggregator, in order.
|
||||
pub fn field_names(&self) -> Vec<&str> {
|
||||
self.sort
|
||||
.iter()
|
||||
.map(|KeyOrder { field, .. }| field.as_str())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Return fields accessed by the aggregator's value retrieval.
|
||||
pub fn value_field_names(&self) -> Vec<&str> {
|
||||
self.retrieval.get_field_names()
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds a single comparable doc feature, and the order in which it should be sorted.
|
||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||
struct ComparableDocFeature {
|
||||
/// Stores any u64-mappable feature.
|
||||
value: Option<u64>,
|
||||
/// Sort order for the doc feature
|
||||
order: Order,
|
||||
}
|
||||
|
||||
impl Ord for ComparableDocFeature {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
let invert = |cmp: std::cmp::Ordering| match self.order {
|
||||
Order::Asc => cmp,
|
||||
Order::Desc => cmp.reverse(),
|
||||
};
|
||||
|
||||
match (self.value, other.value) {
|
||||
(Some(self_value), Some(other_value)) => invert(self_value.cmp(&other_value)),
|
||||
(Some(_), None) => std::cmp::Ordering::Greater,
|
||||
(None, Some(_)) => std::cmp::Ordering::Less,
|
||||
(None, None) => std::cmp::Ordering::Equal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for ComparableDocFeature {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for ComparableDocFeature {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.value.cmp(&other.value) == std::cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for ComparableDocFeature {}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||
struct ComparableDocFeatures(Vec<ComparableDocFeature>, FieldRetrivalResult);
|
||||
|
||||
impl Ord for ComparableDocFeatures {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
for (self_feature, other_feature) in self.0.iter().zip(other.0.iter()) {
|
||||
let cmp = self_feature.cmp(other_feature);
|
||||
if cmp != std::cmp::Ordering::Equal {
|
||||
return cmp;
|
||||
}
|
||||
}
|
||||
std::cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for ComparableDocFeatures {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for ComparableDocFeatures {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.cmp(other) == std::cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for ComparableDocFeatures {}
|
||||
|
||||
/// The TopHitsCollector used for collecting over segments and merging results.
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct TopHitsCollector {
|
||||
req: TopHitsAggregation,
|
||||
top_n: TopNComputer<ComparableDocFeatures, DocAddress, false>,
|
||||
}
|
||||
|
||||
impl Default for TopHitsCollector {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
req: TopHitsAggregation::default(),
|
||||
top_n: TopNComputer::new(1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for TopHitsCollector {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("TopHitsCollector")
|
||||
.field("req", &self.req)
|
||||
.field("top_n_threshold", &self.top_n.threshold)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::cmp::PartialEq for TopHitsCollector {
|
||||
fn eq(&self, _other: &Self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl TopHitsCollector {
|
||||
fn collect(&mut self, features: ComparableDocFeatures, doc: DocAddress) {
|
||||
self.top_n.push(features, doc);
|
||||
}
|
||||
|
||||
pub(crate) fn merge_fruits(&mut self, other_fruit: Self) -> crate::Result<()> {
|
||||
for doc in other_fruit.top_n.into_vec() {
|
||||
self.collect(doc.feature, doc.doc);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finalize by converting self into the final result form
|
||||
pub fn finalize(self) -> TopHitsMetricResult {
|
||||
let mut hits: Vec<TopHitsVecEntry> = self
|
||||
.top_n
|
||||
.into_sorted_vec()
|
||||
.into_iter()
|
||||
.map(|doc| TopHitsVecEntry {
|
||||
sort: doc.feature.0.iter().map(|f| f.value).collect(),
|
||||
search_results: doc.feature.1,
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Remove the first `from` elements
|
||||
// Truncating from end would be more efficient, but we need to truncate from the front
|
||||
// because `into_sorted_vec` gives us a descending order because of the inverted
|
||||
// `Ord` semantics of the heap elements.
|
||||
hits.drain(..self.req.from.unwrap_or(0));
|
||||
TopHitsMetricResult { hits }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SegmentTopHitsCollector {
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
accessor_idx: usize,
|
||||
inner_collector: TopHitsCollector,
|
||||
}
|
||||
|
||||
impl SegmentTopHitsCollector {
|
||||
pub fn from_req(
|
||||
req: &TopHitsAggregation,
|
||||
accessor_idx: usize,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
) -> Self {
|
||||
Self {
|
||||
inner_collector: TopHitsCollector {
|
||||
req: req.clone(),
|
||||
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
|
||||
},
|
||||
segment_ordinal,
|
||||
accessor_idx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for SegmentTopHitsCollector {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SegmentTopHitsCollector")
|
||||
.field("segment_id", &self.segment_ordinal)
|
||||
.field("accessor_idx", &self.accessor_idx)
|
||||
.field("inner_collector", &self.inner_collector)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentAggregationCollector for SegmentTopHitsCollector {
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||
results: &mut crate::aggregation::intermediate_agg_result::IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||
let intermediate_result = IntermediateMetricResult::TopHits(self.inner_collector);
|
||||
results.push(
|
||||
name,
|
||||
IntermediateAggregationResult::Metric(intermediate_result),
|
||||
)
|
||||
}
|
||||
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc_id: crate::DocId,
|
||||
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
let accessors = &agg_with_accessor.aggs.values[self.accessor_idx].accessors;
|
||||
let value_accessors = &agg_with_accessor.aggs.values[self.accessor_idx].value_accessors;
|
||||
let features: Vec<ComparableDocFeature> = self
|
||||
.inner_collector
|
||||
.req
|
||||
.sort
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, KeyOrder { order, .. })| {
|
||||
let order = *order;
|
||||
let value = accessors
|
||||
.get(idx)
|
||||
.expect("could not find field in accessors")
|
||||
.0
|
||||
.values_for_doc(doc_id)
|
||||
.next();
|
||||
ComparableDocFeature { value, order }
|
||||
})
|
||||
.collect();
|
||||
|
||||
let retrieval_result = self
|
||||
.inner_collector
|
||||
.req
|
||||
.retrieval
|
||||
.get_document_field_data(value_accessors, doc_id);
|
||||
|
||||
self.inner_collector.collect(
|
||||
ComparableDocFeatures(features, retrieval_result),
|
||||
DocAddress {
|
||||
segment_ord: self.segment_ordinal,
|
||||
doc_id,
|
||||
},
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
// TODO: Consider getting fields with the column block accessor and refactor this.
|
||||
// ---
|
||||
// Would the additional complexity of getting fields with the column_block_accessor
|
||||
// make sense here? Probably yes, but I want to get a first-pass review first
|
||||
// before proceeding.
|
||||
for doc in docs {
|
||||
self.collect(*doc, agg_with_accessor)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common::DateTime;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::Value;
|
||||
use time::macros::datetime;
|
||||
|
||||
use super::{ComparableDocFeature, ComparableDocFeatures, Order};
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::agg_result::AggregationResults;
|
||||
use crate::aggregation::bucket::tests::get_test_index_from_docs;
|
||||
use crate::aggregation::tests::get_test_index_from_values;
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::collector::ComparableDoc;
|
||||
use crate::query::AllQuery;
|
||||
use crate::schema::OwnedValue as SchemaValue;
|
||||
|
||||
fn invert_order(cmp_feature: ComparableDocFeature) -> ComparableDocFeature {
|
||||
let ComparableDocFeature { value, order } = cmp_feature;
|
||||
let order = match order {
|
||||
Order::Asc => Order::Desc,
|
||||
Order::Desc => Order::Asc,
|
||||
};
|
||||
ComparableDocFeature { value, order }
|
||||
}
|
||||
|
||||
fn collector_with_capacity(capacity: usize) -> super::TopHitsCollector {
|
||||
super::TopHitsCollector {
|
||||
top_n: super::TopNComputer::new(capacity),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn invert_order_features(cmp_features: ComparableDocFeatures) -> ComparableDocFeatures {
|
||||
let ComparableDocFeatures(cmp_features, search_results) = cmp_features;
|
||||
let cmp_features = cmp_features
|
||||
.into_iter()
|
||||
.map(invert_order)
|
||||
.collect::<Vec<_>>();
|
||||
ComparableDocFeatures(cmp_features, search_results)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_comparable_doc_feature() -> crate::Result<()> {
|
||||
let small = ComparableDocFeature {
|
||||
value: Some(1),
|
||||
order: Order::Asc,
|
||||
};
|
||||
let big = ComparableDocFeature {
|
||||
value: Some(2),
|
||||
order: Order::Asc,
|
||||
};
|
||||
let none = ComparableDocFeature {
|
||||
value: None,
|
||||
order: Order::Asc,
|
||||
};
|
||||
|
||||
assert!(small < big);
|
||||
assert!(none < small);
|
||||
assert!(none < big);
|
||||
|
||||
let small = invert_order(small);
|
||||
let big = invert_order(big);
|
||||
let none = invert_order(none);
|
||||
|
||||
assert!(small > big);
|
||||
assert!(none < small);
|
||||
assert!(none < big);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_comparable_doc_features() -> crate::Result<()> {
|
||||
let features_1 = ComparableDocFeatures(
|
||||
vec![ComparableDocFeature {
|
||||
value: Some(1),
|
||||
order: Order::Asc,
|
||||
}],
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
let features_2 = ComparableDocFeatures(
|
||||
vec![ComparableDocFeature {
|
||||
value: Some(2),
|
||||
order: Order::Asc,
|
||||
}],
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
assert!(features_1 < features_2);
|
||||
|
||||
assert!(invert_order_features(features_1.clone()) > invert_order_features(features_2));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_top_hits_empty_index() -> crate::Result<()> {
|
||||
let values = vec![];
|
||||
|
||||
let index = get_test_index_from_values(false, &values)?;
|
||||
|
||||
let d: Aggregations = serde_json::from_value(json!({
|
||||
"top_hits_req": {
|
||||
"top_hits": {
|
||||
"size": 2,
|
||||
"sort": [
|
||||
{ "date": "desc" }
|
||||
],
|
||||
"from": 0,
|
||||
}
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(d, Default::default());
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let res: Value = serde_json::from_str(
|
||||
&serde_json::to_string(&agg_res).expect("JSON serialization failed"),
|
||||
)
|
||||
.expect("JSON parsing failed");
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"top_hits_req": {
|
||||
"hits": []
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_hits_collector_single_feature() -> crate::Result<()> {
|
||||
let docs = vec![
|
||||
ComparableDoc::<_, _, false> {
|
||||
doc: crate::DocAddress {
|
||||
segment_ord: 0,
|
||||
doc_id: 0,
|
||||
},
|
||||
feature: ComparableDocFeatures(
|
||||
vec![ComparableDocFeature {
|
||||
value: Some(1),
|
||||
order: Order::Asc,
|
||||
}],
|
||||
Default::default(),
|
||||
),
|
||||
},
|
||||
ComparableDoc {
|
||||
doc: crate::DocAddress {
|
||||
segment_ord: 0,
|
||||
doc_id: 2,
|
||||
},
|
||||
feature: ComparableDocFeatures(
|
||||
vec![ComparableDocFeature {
|
||||
value: Some(3),
|
||||
order: Order::Asc,
|
||||
}],
|
||||
Default::default(),
|
||||
),
|
||||
},
|
||||
ComparableDoc {
|
||||
doc: crate::DocAddress {
|
||||
segment_ord: 0,
|
||||
doc_id: 1,
|
||||
},
|
||||
feature: ComparableDocFeatures(
|
||||
vec![ComparableDocFeature {
|
||||
value: Some(5),
|
||||
order: Order::Asc,
|
||||
}],
|
||||
Default::default(),
|
||||
),
|
||||
},
|
||||
];
|
||||
|
||||
let mut collector = collector_with_capacity(3);
|
||||
for doc in docs.clone() {
|
||||
collector.collect(doc.feature, doc.doc);
|
||||
}
|
||||
|
||||
let res = collector.finalize();
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
super::TopHitsMetricResult {
|
||||
hits: vec![
|
||||
super::TopHitsVecEntry {
|
||||
sort: vec![docs[0].feature.0[0].value],
|
||||
search_results: Default::default(),
|
||||
},
|
||||
super::TopHitsVecEntry {
|
||||
sort: vec![docs[1].feature.0[0].value],
|
||||
search_results: Default::default(),
|
||||
},
|
||||
super::TopHitsVecEntry {
|
||||
sort: vec![docs[2].feature.0[0].value],
|
||||
search_results: Default::default(),
|
||||
},
|
||||
]
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_aggregation_top_hits(merge_segments: bool) -> crate::Result<()> {
|
||||
let docs = vec![
|
||||
vec![
|
||||
r#"{ "date": "2015-01-02T00:00:00Z", "text": "bbb", "text2": "bbb", "mixed": { "dyn_arr": [1, "2"] } }"#,
|
||||
r#"{ "date": "2017-06-15T00:00:00Z", "text": "ccc", "text2": "ddd", "mixed": { "dyn_arr": [3, "4"] } }"#,
|
||||
],
|
||||
vec![
|
||||
r#"{ "text": "aaa", "text2": "bbb", "date": "2018-01-02T00:00:00Z", "mixed": { "dyn_arr": ["9", 8] } }"#,
|
||||
r#"{ "text": "aaa", "text2": "bbb", "date": "2016-01-02T00:00:00Z", "mixed": { "dyn_arr": ["7", 6] } }"#,
|
||||
],
|
||||
];
|
||||
|
||||
let index = get_test_index_from_docs(merge_segments, &docs)?;
|
||||
|
||||
let d: Aggregations = serde_json::from_value(json!({
|
||||
"top_hits_req": {
|
||||
"top_hits": {
|
||||
"size": 2,
|
||||
"sort": [
|
||||
{ "date": "desc" }
|
||||
],
|
||||
"from": 1,
|
||||
"docvalue_fields": [
|
||||
"date",
|
||||
"tex*",
|
||||
"mixed.*",
|
||||
],
|
||||
}
|
||||
}
|
||||
}))?;
|
||||
|
||||
let collector = AggregationCollector::from_aggs(d, Default::default());
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let agg_res =
|
||||
serde_json::to_value(searcher.search(&AllQuery, &collector).unwrap()).unwrap();
|
||||
|
||||
let date_2017 = datetime!(2017-06-15 00:00:00 UTC);
|
||||
let date_2016 = datetime!(2016-01-02 00:00:00 UTC);
|
||||
|
||||
assert_eq!(
|
||||
agg_res["top_hits_req"],
|
||||
json!({
|
||||
"hits": [
|
||||
{
|
||||
"sort": [common::i64_to_u64(date_2017.unix_timestamp_nanos() as i64)],
|
||||
"docvalue_fields": {
|
||||
"date": [ SchemaValue::Date(DateTime::from_utc(date_2017)) ],
|
||||
"text": [ "ccc" ],
|
||||
"text2": [ "ddd" ],
|
||||
"mixed.dyn_arr": [ 3, "4" ],
|
||||
}
|
||||
},
|
||||
{
|
||||
"sort": [common::i64_to_u64(date_2016.unix_timestamp_nanos() as i64)],
|
||||
"docvalue_fields": {
|
||||
"date": [ SchemaValue::Date(DateTime::from_utc(date_2016)) ],
|
||||
"text": [ "aaa" ],
|
||||
"text2": [ "bbb" ],
|
||||
"mixed.dyn_arr": [ 6, "7" ],
|
||||
}
|
||||
}
|
||||
]
|
||||
}),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_top_hits_single_segment() -> crate::Result<()> {
|
||||
test_aggregation_top_hits(true)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_top_hits_multi_segment() -> crate::Result<()> {
|
||||
test_aggregation_top_hits(false)
|
||||
}
|
||||
}
|
||||
@@ -145,8 +145,6 @@ mod agg_tests;
|
||||
|
||||
mod agg_bench;
|
||||
|
||||
use core::fmt;
|
||||
|
||||
pub use agg_limits::AggregationLimits;
|
||||
pub use collector::{
|
||||
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
|
||||
@@ -156,106 +154,7 @@ use columnar::{ColumnType, MonotonicallyMappableToU64};
|
||||
pub(crate) use date::format_date;
|
||||
pub use error::AggregationError;
|
||||
use itertools::Itertools;
|
||||
use serde::de::{self, Visitor};
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
|
||||
fn parse_str_into_f64<E: de::Error>(value: &str) -> Result<f64, E> {
|
||||
let parsed = value.parse::<f64>().map_err(|_err| {
|
||||
de::Error::custom(format!("Failed to parse f64 from string: {:?}", value))
|
||||
})?;
|
||||
|
||||
// Check if the parsed value is NaN or infinity
|
||||
if parsed.is_nan() || parsed.is_infinite() {
|
||||
Err(de::Error::custom(format!(
|
||||
"Value is not a valid f64 (NaN or Infinity): {:?}",
|
||||
value
|
||||
)))
|
||||
} else {
|
||||
Ok(parsed)
|
||||
}
|
||||
}
|
||||
|
||||
/// deserialize Option<f64> from string or float
|
||||
pub(crate) fn deserialize_option_f64<'de, D>(deserializer: D) -> Result<Option<f64>, D::Error>
|
||||
where D: Deserializer<'de> {
|
||||
struct StringOrFloatVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for StringOrFloatVisitor {
|
||||
type Value = Option<f64>;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("a string or a float")
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
parse_str_into_f64(value).map(Some)
|
||||
}
|
||||
|
||||
fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(Some(value))
|
||||
}
|
||||
|
||||
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(Some(value as f64))
|
||||
}
|
||||
|
||||
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(Some(value as f64))
|
||||
}
|
||||
|
||||
fn visit_none<E>(self) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn visit_unit<E>(self) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
deserializer.deserialize_any(StringOrFloatVisitor)
|
||||
}
|
||||
|
||||
/// deserialize f64 from string or float
|
||||
pub(crate) fn deserialize_f64<'de, D>(deserializer: D) -> Result<f64, D::Error>
|
||||
where D: Deserializer<'de> {
|
||||
struct StringOrFloatVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for StringOrFloatVisitor {
|
||||
type Value = f64;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("a string or a float")
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
parse_str_into_f64(value)
|
||||
}
|
||||
|
||||
fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(value as f64)
|
||||
}
|
||||
|
||||
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(value as f64)
|
||||
}
|
||||
}
|
||||
|
||||
deserializer.deserialize_any(StringOrFloatVisitor)
|
||||
}
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Represents an associative array `(key => values)` in a very efficient manner.
|
||||
#[derive(PartialEq, Serialize, Deserialize)]
|
||||
@@ -382,7 +281,6 @@ pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &ColumnType) -> f64 {
|
||||
ColumnType::U64 => val as f64,
|
||||
ColumnType::I64 | ColumnType::DateTime => i64::from_u64(val) as f64,
|
||||
ColumnType::F64 => f64::from_u64(val),
|
||||
ColumnType::Bool => val as f64,
|
||||
_ => {
|
||||
panic!("unexpected type {field_type:?}. This should not happen")
|
||||
}
|
||||
@@ -403,7 +301,6 @@ pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &ColumnType) -> Option<
|
||||
ColumnType::U64 => Some(val as u64),
|
||||
ColumnType::I64 | ColumnType::DateTime => Some((val as i64).to_u64()),
|
||||
ColumnType::F64 => Some(val.to_u64()),
|
||||
ColumnType::Bool => Some(val as u64),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -422,7 +319,7 @@ mod tests {
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
use crate::{Index, IndexWriter, Term};
|
||||
use crate::{Index, Term};
|
||||
|
||||
pub fn get_test_index_with_num_docs(
|
||||
merge_segments: bool,
|
||||
@@ -554,7 +451,7 @@ mod tests {
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
if segment_ids.len() > 1 {
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
@@ -668,7 +565,7 @@ mod tests {
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ use super::metric::{
|
||||
SumAggregation,
|
||||
};
|
||||
use crate::aggregation::bucket::TermMissingAgg;
|
||||
use crate::aggregation::metric::SegmentTopHitsCollector;
|
||||
|
||||
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
|
||||
fn add_intermediate_aggregation_result(
|
||||
@@ -161,11 +160,6 @@ pub(crate) fn build_single_agg_segment_collector(
|
||||
accessor_idx,
|
||||
)?,
|
||||
)),
|
||||
TopHits(top_hits_req) => Ok(Box::new(SegmentTopHitsCollector::from_req(
|
||||
top_hits_req,
|
||||
accessor_idx,
|
||||
req.segment_ordinal,
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -410,7 +410,6 @@ impl SegmentCollector for FacetSegmentCollector {
|
||||
|
||||
/// Intermediary result of the `FacetCollector` that stores
|
||||
/// the facet counts for all the segments.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct FacetCounts {
|
||||
facet_counts: BTreeMap<Facet, u64>,
|
||||
}
|
||||
@@ -494,10 +493,10 @@ mod tests {
|
||||
use super::{FacetCollector, FacetCounts};
|
||||
use crate::collector::facet_collector::compress_mapping;
|
||||
use crate::collector::Count;
|
||||
use crate::index::Index;
|
||||
use crate::core::Index;
|
||||
use crate::query::{AllQuery, QueryParser, TermQuery};
|
||||
use crate::schema::{Facet, FacetOptions, IndexRecordOption, Schema, TantivyDocument};
|
||||
use crate::{IndexWriter, Term};
|
||||
use crate::schema::{Document, Facet, FacetOptions, IndexRecordOption, Schema};
|
||||
use crate::Term;
|
||||
|
||||
fn test_collapse_mapping_aux(
|
||||
facet_terms: &[&str],
|
||||
@@ -560,7 +559,7 @@ mod tests {
|
||||
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(facet_field=>Facet::from("/facet/a")))
|
||||
.unwrap();
|
||||
@@ -589,7 +588,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let num_facets: usize = 3 * 4 * 5;
|
||||
let facets: Vec<Facet> = (0..num_facets)
|
||||
.map(|mut n| {
|
||||
@@ -602,7 +601,7 @@ mod tests {
|
||||
})
|
||||
.collect();
|
||||
for i in 0..num_facets * 10 {
|
||||
let mut doc = TantivyDocument::new();
|
||||
let mut doc = Document::new();
|
||||
doc.add_facet(facet_field, facets[i % num_facets].clone());
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
@@ -733,25 +732,24 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let uniform = Uniform::new_inclusive(1, 100_000);
|
||||
let mut docs: Vec<TantivyDocument> =
|
||||
vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
|
||||
.into_iter()
|
||||
.flat_map(|(c, count)| {
|
||||
let facet = Facet::from(&format!("/facet/{}", c));
|
||||
let doc = doc!(facet_field => facet);
|
||||
iter::repeat(doc).take(count)
|
||||
})
|
||||
.map(|mut doc| {
|
||||
doc.add_facet(
|
||||
facet_field,
|
||||
&format!("/facet/{}", thread_rng().sample(uniform)),
|
||||
);
|
||||
doc
|
||||
})
|
||||
.collect();
|
||||
let mut docs: Vec<Document> = vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
|
||||
.into_iter()
|
||||
.flat_map(|(c, count)| {
|
||||
let facet = Facet::from(&format!("/facet/{}", c));
|
||||
let doc = doc!(facet_field => facet);
|
||||
iter::repeat(doc).take(count)
|
||||
})
|
||||
.map(|mut doc| {
|
||||
doc.add_facet(
|
||||
facet_field,
|
||||
&format!("/facet/{}", thread_rng().sample(uniform)),
|
||||
);
|
||||
doc
|
||||
})
|
||||
.collect();
|
||||
docs[..].shuffle(&mut thread_rng());
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
@@ -782,7 +780,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let docs: Vec<TantivyDocument> = vec![("b", 2), ("a", 2), ("c", 4)]
|
||||
let docs: Vec<Document> = vec![("b", 2), ("a", 2), ("c", 4)]
|
||||
.into_iter()
|
||||
.flat_map(|(c, count)| {
|
||||
let facet = Facet::from(&format!("/facet/{}", c));
|
||||
@@ -830,7 +828,7 @@ mod bench {
|
||||
use crate::collector::FacetCollector;
|
||||
use crate::query::AllQuery;
|
||||
use crate::schema::{Facet, Schema, INDEXED};
|
||||
use crate::{Index, IndexWriter};
|
||||
use crate::Index;
|
||||
|
||||
#[bench]
|
||||
fn bench_facet_collector(b: &mut Bencher) {
|
||||
@@ -849,7 +847,7 @@ mod bench {
|
||||
// 40425 docs
|
||||
docs[..].shuffle(&mut thread_rng());
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
|
||||
@@ -12,7 +12,8 @@ use std::marker::PhantomData;
|
||||
use columnar::{BytesColumn, Column, DynamicColumn, HasAssociatedColumnType};
|
||||
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::{DocId, Score, SegmentReader};
|
||||
use crate::schema::Field;
|
||||
use crate::{DocId, Score, SegmentReader, TantivyError};
|
||||
|
||||
/// The `FilterCollector` filters docs using a fast field value and a predicate.
|
||||
///
|
||||
@@ -49,13 +50,13 @@ use crate::{DocId, Score, SegmentReader};
|
||||
///
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let no_filter_collector = FilterCollector::new("price".to_string(), |value: u64| value > 20_120u64, TopDocs::with_limit(2));
|
||||
/// let no_filter_collector = FilterCollector::new(price, |value: u64| value > 20_120u64, TopDocs::with_limit(2));
|
||||
/// let top_docs = searcher.search(&query, &no_filter_collector)?;
|
||||
///
|
||||
/// assert_eq!(top_docs.len(), 1);
|
||||
/// assert_eq!(top_docs[0].1, DocAddress::new(0, 1));
|
||||
///
|
||||
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new("price".to_string(), |value| value < 5u64, TopDocs::with_limit(2));
|
||||
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(price, |value| value < 5u64, TopDocs::with_limit(2));
|
||||
/// let filtered_top_docs = searcher.search(&query, &filter_all_collector)?;
|
||||
///
|
||||
/// assert_eq!(filtered_top_docs.len(), 0);
|
||||
@@ -69,7 +70,7 @@ use crate::{DocId, Score, SegmentReader};
|
||||
pub struct FilterCollector<TCollector, TPredicate, TPredicateValue>
|
||||
where TPredicate: 'static + Clone
|
||||
{
|
||||
field: String,
|
||||
field: Field,
|
||||
collector: TCollector,
|
||||
predicate: TPredicate,
|
||||
t_predicate_value: PhantomData<TPredicateValue>,
|
||||
@@ -82,7 +83,7 @@ where
|
||||
TPredicate: Fn(TPredicateValue) -> bool + Send + Sync + Clone,
|
||||
{
|
||||
/// Create a new `FilterCollector`.
|
||||
pub fn new(field: String, predicate: TPredicate, collector: TCollector) -> Self {
|
||||
pub fn new(field: Field, predicate: TPredicate, collector: TCollector) -> Self {
|
||||
Self {
|
||||
field,
|
||||
predicate,
|
||||
@@ -109,7 +110,18 @@ where
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let column_opt = segment_reader.fast_fields().column_opt(&self.field)?;
|
||||
let schema = segment_reader.schema();
|
||||
let field_entry = schema.get_field_entry(self.field);
|
||||
if !field_entry.is_fast() {
|
||||
return Err(TantivyError::SchemaError(format!(
|
||||
"Field {:?} is not a fast field.",
|
||||
field_entry.name()
|
||||
)));
|
||||
}
|
||||
|
||||
let column_opt = segment_reader
|
||||
.fast_fields()
|
||||
.column_opt(field_entry.name())?;
|
||||
|
||||
let segment_collector = self
|
||||
.collector
|
||||
@@ -217,7 +229,7 @@ where
|
||||
///
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let filter_collector = BytesFilterCollector::new("barcode".to_string(), |bytes: &[u8]| bytes.starts_with(b"01"), TopDocs::with_limit(2));
|
||||
/// let filter_collector = BytesFilterCollector::new(barcode, |bytes: &[u8]| bytes.starts_with(b"01"), TopDocs::with_limit(2));
|
||||
/// let top_docs = searcher.search(&query, &filter_collector)?;
|
||||
///
|
||||
/// assert_eq!(top_docs.len(), 1);
|
||||
@@ -228,7 +240,7 @@ where
|
||||
pub struct BytesFilterCollector<TCollector, TPredicate>
|
||||
where TPredicate: 'static + Clone
|
||||
{
|
||||
field: String,
|
||||
field: Field,
|
||||
collector: TCollector,
|
||||
predicate: TPredicate,
|
||||
}
|
||||
@@ -239,7 +251,7 @@ where
|
||||
TPredicate: Fn(&[u8]) -> bool + Send + Sync + Clone,
|
||||
{
|
||||
/// Create a new `BytesFilterCollector`.
|
||||
pub fn new(field: String, predicate: TPredicate, collector: TCollector) -> Self {
|
||||
pub fn new(field: Field, predicate: TPredicate, collector: TCollector) -> Self {
|
||||
Self {
|
||||
field,
|
||||
predicate,
|
||||
@@ -262,7 +274,10 @@ where
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let column_opt = segment_reader.fast_fields().bytes(&self.field)?;
|
||||
let schema = segment_reader.schema();
|
||||
let field_name = schema.get_field_name(self.field);
|
||||
|
||||
let column_opt = segment_reader.fast_fields().bytes(field_name)?;
|
||||
|
||||
let segment_collector = self
|
||||
.collector
|
||||
|
||||
@@ -97,8 +97,7 @@ pub use self::multi_collector::{FruitHandle, MultiCollector, MultiFruit};
|
||||
mod top_collector;
|
||||
|
||||
mod top_score_collector;
|
||||
pub use self::top_collector::ComparableDoc;
|
||||
pub use self::top_score_collector::{TopDocs, TopNComputer};
|
||||
pub use self::top_score_collector::TopDocs;
|
||||
|
||||
mod custom_score_top_collector;
|
||||
pub use self::custom_score_top_collector::{CustomScorer, CustomSegmentScorer};
|
||||
|
||||
@@ -2,14 +2,12 @@ use columnar::{BytesColumn, Column};
|
||||
|
||||
use super::*;
|
||||
use crate::collector::{Count, FilterCollector, TopDocs};
|
||||
use crate::index::SegmentReader;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::query::{AllQuery, QueryParser};
|
||||
use crate::schema::{Schema, FAST, TEXT};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::{
|
||||
doc, DateTime, DocAddress, DocId, Index, Score, Searcher, SegmentOrdinal, TantivyDocument,
|
||||
};
|
||||
use crate::{doc, DateTime, DocAddress, DocId, Document, Index, Score, Searcher, SegmentOrdinal};
|
||||
|
||||
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
||||
compute_score: true,
|
||||
@@ -42,7 +40,7 @@ pub fn test_filter_collector() -> crate::Result<()> {
|
||||
let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
let query = query_parser.parse_query("diary")?;
|
||||
let filter_some_collector = FilterCollector::new(
|
||||
"price".to_string(),
|
||||
price,
|
||||
&|value: u64| value > 20_120u64,
|
||||
TopDocs::with_limit(2),
|
||||
);
|
||||
@@ -51,11 +49,8 @@ pub fn test_filter_collector() -> crate::Result<()> {
|
||||
assert_eq!(top_docs.len(), 1);
|
||||
assert_eq!(top_docs[0].1, DocAddress::new(0, 1));
|
||||
|
||||
let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(
|
||||
"price".to_string(),
|
||||
&|value| value < 5u64,
|
||||
TopDocs::with_limit(2),
|
||||
);
|
||||
let filter_all_collector: FilterCollector<_, _, u64> =
|
||||
FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
|
||||
let filtered_top_docs = searcher.search(&query, &filter_all_collector).unwrap();
|
||||
|
||||
assert_eq!(filtered_top_docs.len(), 0);
|
||||
@@ -66,8 +61,7 @@ pub fn test_filter_collector() -> crate::Result<()> {
|
||||
> 0
|
||||
}
|
||||
|
||||
let filter_dates_collector =
|
||||
FilterCollector::new("date".to_string(), &date_filter, TopDocs::with_limit(5));
|
||||
let filter_dates_collector = FilterCollector::new(date, &date_filter, TopDocs::with_limit(5));
|
||||
let filtered_date_docs = searcher.search(&query, &filter_dates_collector)?;
|
||||
|
||||
assert_eq!(filtered_date_docs.len(), 2);
|
||||
@@ -286,8 +280,8 @@ fn make_test_searcher() -> crate::Result<Searcher> {
|
||||
let schema = Schema::builder().build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(TantivyDocument::default())?;
|
||||
index_writer.add_document(TantivyDocument::default())?;
|
||||
index_writer.add_document(Document::default())?;
|
||||
index_writer.add_document(Document::default())?;
|
||||
index_writer.commit()?;
|
||||
Ok(index.reader()?.searcher())
|
||||
}
|
||||
|
||||
@@ -1,58 +1,39 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::top_score_collector::TopNComputer;
|
||||
use crate::{DocAddress, DocId, SegmentOrdinal, SegmentReader};
|
||||
|
||||
/// Contains a feature (field, score, etc.) of a document along with the document address.
|
||||
///
|
||||
/// It guarantees stable sorting: in case of a tie on the feature, the document
|
||||
/// address is used.
|
||||
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
|
||||
/// default Rust heap is a max heap, whereas a min heap is needed.
|
||||
///
|
||||
/// The REVERSE_ORDER generic parameter controls whether the by-feature order
|
||||
/// should be reversed, which is useful for achieving for example largest-first
|
||||
/// semantics without having to wrap the feature in a `Reverse`.
|
||||
/// Additionally, it guarantees stable sorting: in case of a tie on the feature, the document
|
||||
/// address is used.
|
||||
///
|
||||
/// WARNING: equality is not what you would expect here.
|
||||
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
|
||||
/// is equal. This should be perfectly fine for this usage, but let's make sure this
|
||||
/// struct is never public.
|
||||
#[derive(Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ComparableDoc<T, D, const REVERSE_ORDER: bool = false> {
|
||||
/// The feature of the document. In practice, this is
|
||||
/// is any type that implements `PartialOrd`.
|
||||
pub(crate) struct ComparableDoc<T, D> {
|
||||
pub feature: T,
|
||||
/// The document address. In practice, this is any
|
||||
/// type that implements `PartialOrd`, and is guaranteed
|
||||
/// to be unique for each document.
|
||||
pub doc: D,
|
||||
}
|
||||
impl<T: std::fmt::Debug, D: std::fmt::Debug, const R: bool> std::fmt::Debug
|
||||
for ComparableDoc<T, D, R>
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct(format!("ComparableDoc<_, _ {R}").as_str())
|
||||
.field("feature", &self.feature)
|
||||
.field("doc", &self.doc)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialOrd, D: PartialOrd, const R: bool> PartialOrd for ComparableDoc<T, D, R> {
|
||||
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialOrd, D: PartialOrd, const R: bool> Ord for ComparableDoc<T, D, R> {
|
||||
impl<T: PartialOrd, D: PartialOrd> Ord for ComparableDoc<T, D> {
|
||||
#[inline]
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
let by_feature = self
|
||||
// Reversed to make BinaryHeap work as a min-heap
|
||||
let by_feature = other
|
||||
.feature
|
||||
.partial_cmp(&other.feature)
|
||||
.map(|ord| if R { ord.reverse() } else { ord })
|
||||
.partial_cmp(&self.feature)
|
||||
.unwrap_or(Ordering::Equal);
|
||||
|
||||
let lazy_by_doc_address = || self.doc.partial_cmp(&other.doc).unwrap_or(Ordering::Equal);
|
||||
@@ -64,13 +45,13 @@ impl<T: PartialOrd, D: PartialOrd, const R: bool> Ord for ComparableDoc<T, D, R>
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialOrd, D: PartialOrd, const R: bool> PartialEq for ComparableDoc<T, D, R> {
|
||||
impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.cmp(other) == Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialOrd, D: PartialOrd, const R: bool> Eq for ComparableDoc<T, D, R> {}
|
||||
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {}
|
||||
|
||||
pub(crate) struct TopCollector<T> {
|
||||
pub limit: usize,
|
||||
@@ -110,13 +91,18 @@ where T: PartialOrd + Clone
|
||||
if self.limit == 0 {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let mut top_collector: TopNComputer<_, _> = TopNComputer::new(self.limit + self.offset);
|
||||
let mut top_collector = BinaryHeap::new();
|
||||
for child_fruit in children {
|
||||
for (feature, doc) in child_fruit {
|
||||
top_collector.push(feature, doc);
|
||||
if top_collector.len() < (self.limit + self.offset) {
|
||||
top_collector.push(ComparableDoc { feature, doc });
|
||||
} else if let Some(mut head) = top_collector.peek_mut() {
|
||||
if head.feature < feature {
|
||||
*head = ComparableDoc { feature, doc };
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(top_collector
|
||||
.into_sorted_vec()
|
||||
.into_iter()
|
||||
@@ -125,7 +111,7 @@ where T: PartialOrd + Clone
|
||||
.collect())
|
||||
}
|
||||
|
||||
pub(crate) fn for_segment<F: PartialOrd + Clone>(
|
||||
pub(crate) fn for_segment<F: PartialOrd>(
|
||||
&self,
|
||||
segment_id: SegmentOrdinal,
|
||||
_: &SegmentReader,
|
||||
@@ -150,20 +136,20 @@ where T: PartialOrd + Clone
|
||||
/// The Top Collector keeps track of the K documents
|
||||
/// sorted by type `T`.
|
||||
///
|
||||
/// The implementation is based on a repeatedly truncating on the median after K * 2 documents
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theoretical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n + K)`.
|
||||
/// is `O(n log K)`.
|
||||
pub(crate) struct TopSegmentCollector<T> {
|
||||
/// We reverse the order of the feature in order to
|
||||
/// have top-semantics instead of bottom semantics.
|
||||
topn_computer: TopNComputer<T, DocId>,
|
||||
limit: usize,
|
||||
heap: BinaryHeap<ComparableDoc<T, DocId>>,
|
||||
segment_ord: u32,
|
||||
}
|
||||
|
||||
impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
|
||||
impl<T: PartialOrd> TopSegmentCollector<T> {
|
||||
fn new(segment_ord: SegmentOrdinal, limit: usize) -> TopSegmentCollector<T> {
|
||||
TopSegmentCollector {
|
||||
topn_computer: TopNComputer::new(limit),
|
||||
limit,
|
||||
heap: BinaryHeap::with_capacity(limit),
|
||||
segment_ord,
|
||||
}
|
||||
}
|
||||
@@ -172,7 +158,7 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
|
||||
impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
|
||||
pub fn harvest(self) -> Vec<(T, DocAddress)> {
|
||||
let segment_ord = self.segment_ord;
|
||||
self.topn_computer
|
||||
self.heap
|
||||
.into_sorted_vec()
|
||||
.into_iter()
|
||||
.map(|comparable_doc| {
|
||||
@@ -187,13 +173,33 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Return true if more documents have been collected than the limit.
|
||||
#[inline]
|
||||
pub(crate) fn at_capacity(&self) -> bool {
|
||||
self.heap.len() >= self.limit
|
||||
}
|
||||
|
||||
/// Collects a document scored by the given feature
|
||||
///
|
||||
/// It collects documents until it has reached the max capacity. Once it reaches capacity, it
|
||||
/// will compare the lowest scoring item with the given one and keep whichever is greater.
|
||||
#[inline]
|
||||
pub fn collect(&mut self, doc: DocId, feature: T) {
|
||||
self.topn_computer.push(feature, doc);
|
||||
if self.at_capacity() {
|
||||
// It's ok to unwrap as long as a limit of 0 is forbidden.
|
||||
if let Some(limit_feature) = self.heap.peek().map(|head| head.feature.clone()) {
|
||||
if limit_feature < feature {
|
||||
if let Some(mut head) = self.heap.peek_mut() {
|
||||
head.feature = feature;
|
||||
head.doc = doc;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// we have not reached capacity yet, so we can just push the
|
||||
// element.
|
||||
self.heap.push(ComparableDoc { feature, doc });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use std::collections::BinaryHeap;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
use columnar::ColumnValues;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::Collector;
|
||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
||||
@@ -87,15 +86,12 @@ where
|
||||
/// The `TopDocs` collector keeps track of the top `K` documents
|
||||
/// sorted by their score.
|
||||
///
|
||||
/// The implementation is based on a repeatedly truncating on the median after K * 2 documents
|
||||
/// with pattern defeating QuickSort.
|
||||
/// The theoretical complexity for collecting the top `K` out of `N` documents
|
||||
/// is `O(N + K)`.
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theoretical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
///
|
||||
/// This collector does not guarantee a stable sorting in case of a tie on the
|
||||
/// document score, for stable sorting `PartialOrd` needs to resolve on other fields
|
||||
/// like docid in case of score equality.
|
||||
/// Only then, it is suitable for pagination.
|
||||
/// This collector guarantees a stable sorting in case of a tie on the
|
||||
/// document score. As such, it is suitable to implement pagination.
|
||||
///
|
||||
/// ```rust
|
||||
/// use tantivy::collector::TopDocs;
|
||||
@@ -311,7 +307,7 @@ impl TopDocs {
|
||||
///
|
||||
/// To comfortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||
/// the [.order_by_fast_field(...)](TopDocs::order_by_fast_field) method.
|
||||
pub fn order_by_u64_field(
|
||||
fn order_by_u64_field(
|
||||
self,
|
||||
field: impl ToString,
|
||||
order: Order,
|
||||
@@ -665,27 +661,50 @@ impl Collector for TopDocs {
|
||||
reader: &SegmentReader,
|
||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||
let heap_len = self.0.limit + self.0.offset;
|
||||
let mut top_n: TopNComputer<_, _> = TopNComputer::new(heap_len);
|
||||
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
|
||||
|
||||
if let Some(alive_bitset) = reader.alive_bitset() {
|
||||
let mut threshold = Score::MIN;
|
||||
top_n.threshold = Some(threshold);
|
||||
weight.for_each_pruning(Score::MIN, reader, &mut |doc, score| {
|
||||
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
|
||||
if alive_bitset.is_deleted(doc) {
|
||||
return threshold;
|
||||
}
|
||||
top_n.push(score, doc);
|
||||
threshold = top_n.threshold.unwrap_or(Score::MIN);
|
||||
let heap_item = ComparableDoc {
|
||||
feature: score,
|
||||
doc,
|
||||
};
|
||||
if heap.len() < heap_len {
|
||||
heap.push(heap_item);
|
||||
if heap.len() == heap_len {
|
||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||
}
|
||||
return threshold;
|
||||
}
|
||||
*heap.peek_mut().unwrap() = heap_item;
|
||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||
threshold
|
||||
})?;
|
||||
} else {
|
||||
weight.for_each_pruning(Score::MIN, reader, &mut |doc, score| {
|
||||
top_n.push(score, doc);
|
||||
top_n.threshold.unwrap_or(Score::MIN)
|
||||
let heap_item = ComparableDoc {
|
||||
feature: score,
|
||||
doc,
|
||||
};
|
||||
if heap.len() < heap_len {
|
||||
heap.push(heap_item);
|
||||
// TODO the threshold is suboptimal for heap.len == heap_len
|
||||
if heap.len() == heap_len {
|
||||
return heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||
} else {
|
||||
return Score::MIN;
|
||||
}
|
||||
}
|
||||
*heap.peek_mut().unwrap() = heap_item;
|
||||
heap.peek().map(|el| el.feature).unwrap_or(Score::MIN)
|
||||
})?;
|
||||
}
|
||||
|
||||
let fruit = top_n
|
||||
let fruit = heap
|
||||
.into_sorted_vec()
|
||||
.into_iter()
|
||||
.map(|cid| {
|
||||
@@ -717,142 +736,9 @@ impl SegmentCollector for TopScoreSegmentCollector {
|
||||
}
|
||||
}
|
||||
|
||||
/// Fast TopN Computation
|
||||
///
|
||||
/// Capacity of the vec is 2 * top_n.
|
||||
/// The buffer is truncated to the top_n elements when it reaches the capacity of the Vec.
|
||||
/// That means capacity has special meaning and should be carried over when cloning or serializing.
|
||||
///
|
||||
/// For TopN == 0, it will be relative expensive.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(from = "TopNComputerDeser<Score, D, REVERSE_ORDER>")]
|
||||
pub struct TopNComputer<Score, D, const REVERSE_ORDER: bool = true> {
|
||||
/// The buffer reverses sort order to get top-semantics instead of bottom-semantics
|
||||
buffer: Vec<ComparableDoc<Score, D, REVERSE_ORDER>>,
|
||||
top_n: usize,
|
||||
pub(crate) threshold: Option<Score>,
|
||||
}
|
||||
// Intermediate struct for TopNComputer for deserialization, to keep vec capacity
|
||||
#[derive(Deserialize)]
|
||||
struct TopNComputerDeser<Score, D, const REVERSE_ORDER: bool> {
|
||||
buffer: Vec<ComparableDoc<Score, D, REVERSE_ORDER>>,
|
||||
top_n: usize,
|
||||
threshold: Option<Score>,
|
||||
}
|
||||
|
||||
// Custom clone to keep capacity
|
||||
impl<Score: Clone, D: Clone, const REVERSE_ORDER: bool> Clone
|
||||
for TopNComputer<Score, D, REVERSE_ORDER>
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
let mut buffer_clone = Vec::with_capacity(self.buffer.capacity());
|
||||
buffer_clone.extend(self.buffer.iter().cloned());
|
||||
|
||||
TopNComputer {
|
||||
buffer: buffer_clone,
|
||||
top_n: self.top_n,
|
||||
threshold: self.threshold.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Score, D, const R: bool> From<TopNComputerDeser<Score, D, R>> for TopNComputer<Score, D, R> {
|
||||
fn from(mut value: TopNComputerDeser<Score, D, R>) -> Self {
|
||||
let expected_cap = value.top_n.max(1) * 2;
|
||||
let current_cap = value.buffer.capacity();
|
||||
if current_cap < expected_cap {
|
||||
value.buffer.reserve_exact(expected_cap - current_cap);
|
||||
} else {
|
||||
value.buffer.shrink_to(expected_cap);
|
||||
}
|
||||
|
||||
TopNComputer {
|
||||
buffer: value.buffer,
|
||||
top_n: value.top_n,
|
||||
threshold: value.threshold,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Score, D, const R: bool> TopNComputer<Score, D, R>
|
||||
where
|
||||
Score: PartialOrd + Clone,
|
||||
D: Serialize + DeserializeOwned + Ord + Clone,
|
||||
{
|
||||
/// Create a new `TopNComputer`.
|
||||
/// Internally it will allocate a buffer of size `2 * top_n`.
|
||||
pub fn new(top_n: usize) -> Self {
|
||||
let vec_cap = top_n.max(1) * 2;
|
||||
TopNComputer {
|
||||
buffer: Vec::with_capacity(vec_cap),
|
||||
top_n,
|
||||
threshold: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Push a new document to the top n.
|
||||
/// If the document is below the current threshold, it will be ignored.
|
||||
#[inline]
|
||||
pub fn push(&mut self, feature: Score, doc: D) {
|
||||
if let Some(last_median) = self.threshold.clone() {
|
||||
if feature < last_median {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if self.buffer.len() == self.buffer.capacity() {
|
||||
let median = self.truncate_top_n();
|
||||
self.threshold = Some(median);
|
||||
}
|
||||
|
||||
// This is faster since it avoids the buffer resizing to be inlined from vec.push()
|
||||
// (this is in the hot path)
|
||||
// TODO: Replace with `push_within_capacity` when it's stabilized
|
||||
let uninit = self.buffer.spare_capacity_mut();
|
||||
// This cannot panic, because we truncate_median will at least remove one element, since
|
||||
// the min capacity is 2.
|
||||
uninit[0].write(ComparableDoc { doc, feature });
|
||||
// This is safe because it would panic in the line above
|
||||
unsafe {
|
||||
self.buffer.set_len(self.buffer.len() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn truncate_top_n(&mut self) -> Score {
|
||||
// Use select_nth_unstable to find the top nth score
|
||||
let (_, median_el, _) = self.buffer.select_nth_unstable(self.top_n);
|
||||
|
||||
let median_score = median_el.feature.clone();
|
||||
// Remove all elements below the top_n
|
||||
self.buffer.truncate(self.top_n);
|
||||
|
||||
median_score
|
||||
}
|
||||
|
||||
/// Returns the top n elements in sorted order.
|
||||
pub fn into_sorted_vec(mut self) -> Vec<ComparableDoc<Score, D, R>> {
|
||||
if self.buffer.len() > self.top_n {
|
||||
self.truncate_top_n();
|
||||
}
|
||||
self.buffer.sort_unstable();
|
||||
self.buffer
|
||||
}
|
||||
|
||||
/// Returns the top n elements in stored order.
|
||||
/// Useful if you do not need the elements in sorted order,
|
||||
/// for example when merging the results of multiple segments.
|
||||
pub fn into_vec(mut self) -> Vec<ComparableDoc<Score, D, R>> {
|
||||
if self.buffer.len() > self.top_n {
|
||||
self.truncate_top_n();
|
||||
}
|
||||
self.buffer
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{TopDocs, TopNComputer};
|
||||
use crate::collector::top_collector::ComparableDoc;
|
||||
use super::TopDocs;
|
||||
use crate::collector::Collector;
|
||||
use crate::query::{AllQuery, Query, QueryParser};
|
||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||
@@ -880,70 +766,6 @@ mod tests {
|
||||
crate::assert_nearly_equals!(result.0, expected.0);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_topn_computer_serde() {
|
||||
let computer: TopNComputer<u32, u32> = TopNComputer::new(1);
|
||||
|
||||
let computer_ser = serde_json::to_string(&computer).unwrap();
|
||||
let mut computer: TopNComputer<u32, u32> = serde_json::from_str(&computer_ser).unwrap();
|
||||
|
||||
computer.push(1u32, 5u32);
|
||||
computer.push(1u32, 0u32);
|
||||
computer.push(1u32, 7u32);
|
||||
|
||||
assert_eq!(
|
||||
computer.into_sorted_vec(),
|
||||
&[ComparableDoc {
|
||||
feature: 1u32,
|
||||
doc: 0u32,
|
||||
},]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_topn_computer() {
|
||||
let mut computer: TopNComputer<u32, u32> = TopNComputer::new(0);
|
||||
|
||||
computer.push(1u32, 1u32);
|
||||
computer.push(1u32, 2u32);
|
||||
computer.push(1u32, 3u32);
|
||||
assert!(computer.into_sorted_vec().is_empty());
|
||||
}
|
||||
#[test]
|
||||
fn test_topn_computer() {
|
||||
let mut computer: TopNComputer<u32, u32> = TopNComputer::new(2);
|
||||
|
||||
computer.push(1u32, 1u32);
|
||||
computer.push(2u32, 2u32);
|
||||
computer.push(3u32, 3u32);
|
||||
computer.push(2u32, 4u32);
|
||||
computer.push(1u32, 5u32);
|
||||
assert_eq!(
|
||||
computer.into_sorted_vec(),
|
||||
&[
|
||||
ComparableDoc {
|
||||
feature: 3u32,
|
||||
doc: 3u32,
|
||||
},
|
||||
ComparableDoc {
|
||||
feature: 2u32,
|
||||
doc: 2u32,
|
||||
}
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_topn_computer_no_panic() {
|
||||
for top_n in 0..10 {
|
||||
let mut computer: TopNComputer<u32, u32> = TopNComputer::new(top_n);
|
||||
|
||||
for _ in 0..1 + top_n * 2 {
|
||||
computer.push(1u32, 1u32);
|
||||
}
|
||||
let _vals = computer.into_sorted_vec();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_not_at_capacity_without_offset() -> crate::Result<()> {
|
||||
@@ -1030,25 +852,20 @@ mod tests {
|
||||
// using AllQuery to get a constant score
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
|
||||
let page_0 = searcher.search(&AllQuery, &TopDocs::with_limit(1)).unwrap();
|
||||
|
||||
let page_1 = searcher.search(&AllQuery, &TopDocs::with_limit(2)).unwrap();
|
||||
|
||||
let page_2 = searcher.search(&AllQuery, &TopDocs::with_limit(3)).unwrap();
|
||||
|
||||
// precondition for the test to be meaningful: we did get documents
|
||||
// with the same score
|
||||
assert!(page_0.iter().all(|result| result.0 == page_1[0].0));
|
||||
assert!(page_1.iter().all(|result| result.0 == page_1[0].0));
|
||||
assert!(page_2.iter().all(|result| result.0 == page_2[0].0));
|
||||
|
||||
// sanity check since we're relying on make_index()
|
||||
assert_eq!(page_0.len(), 1);
|
||||
assert_eq!(page_1.len(), 2);
|
||||
assert_eq!(page_2.len(), 3);
|
||||
|
||||
assert_eq!(page_1, &page_2[..page_1.len()]);
|
||||
assert_eq!(page_0, &page_2[..page_0.len()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -6,23 +6,22 @@ use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::segment::Segment;
|
||||
use super::segment_reader::merge_field_meta_data;
|
||||
use super::{FieldMetadata, IndexSettings};
|
||||
use crate::core::{Executor, META_FILEPATH};
|
||||
use super::IndexSettings;
|
||||
use crate::core::single_segment_index_writer::SingleSegmentIndexWriter;
|
||||
use crate::core::{
|
||||
Executor, IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory, META_FILEPATH,
|
||||
};
|
||||
use crate::directory::error::OpenReadError;
|
||||
#[cfg(feature = "mmap")]
|
||||
use crate::directory::MmapDirectory;
|
||||
use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK};
|
||||
use crate::error::{DataCorruption, TantivyError};
|
||||
use crate::index::{IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory};
|
||||
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_BUDGET_NUM_BYTES_MIN};
|
||||
use crate::indexer::segment_updater::save_metas;
|
||||
use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
|
||||
use crate::reader::{IndexReader, IndexReaderBuilder};
|
||||
use crate::schema::document::Document;
|
||||
use crate::schema::{Field, FieldType, Schema};
|
||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||
use crate::SegmentReader;
|
||||
use crate::IndexWriter;
|
||||
|
||||
fn load_metas(
|
||||
directory: &dyn Directory,
|
||||
@@ -185,11 +184,11 @@ impl IndexBuilder {
|
||||
///
|
||||
/// It expects an originally empty directory, and will not run any GC operation.
|
||||
#[doc(hidden)]
|
||||
pub fn single_segment_index_writer<D: Document>(
|
||||
pub fn single_segment_index_writer(
|
||||
self,
|
||||
dir: impl Into<Box<dyn Directory>>,
|
||||
mem_budget: usize,
|
||||
) -> crate::Result<SingleSegmentIndexWriter<D>> {
|
||||
) -> crate::Result<SingleSegmentIndexWriter> {
|
||||
let index = self.create(dir)?;
|
||||
let index_simple_writer = SingleSegmentIndexWriter::new(index, mem_budget)?;
|
||||
Ok(index_simple_writer)
|
||||
@@ -322,15 +321,6 @@ impl Index {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Custom thread pool by a outer thread pool.
|
||||
pub fn set_shared_multithread_executor(
|
||||
&mut self,
|
||||
shared_thread_pool: Arc<Executor>,
|
||||
) -> crate::Result<()> {
|
||||
self.executor = shared_thread_pool.clone();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with as many threads as there are CPUs on the system.
|
||||
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
|
||||
@@ -498,28 +488,6 @@ impl Index {
|
||||
self.inventory.all()
|
||||
}
|
||||
|
||||
/// Returns the list of fields that have been indexed in the Index.
|
||||
/// The field list includes the field defined in the schema as well as the fields
|
||||
/// that have been indexed as a part of a JSON field.
|
||||
/// The returned field name is the full field name, including the name of the JSON field.
|
||||
///
|
||||
/// The returned field names can be used in queries.
|
||||
///
|
||||
/// Notice: If your data contains JSON fields this is **very expensive**, as it requires
|
||||
/// browsing through the inverted index term dictionary and the columnar field dictionary.
|
||||
///
|
||||
/// Disclaimer: Some fields may not be listed here. For instance, if the schema contains a json
|
||||
/// field that is not indexed nor a fast field but is stored, it is possible for the field
|
||||
/// to not be listed.
|
||||
pub fn fields_metadata(&self) -> crate::Result<Vec<FieldMetadata>> {
|
||||
let segments = self.searchable_segments()?;
|
||||
let fields_metadata: Vec<Vec<FieldMetadata>> = segments
|
||||
.into_iter()
|
||||
.map(|segment| SegmentReader::open(&segment)?.fields_metadata())
|
||||
.collect::<Result<_, _>>()?;
|
||||
Ok(merge_field_meta_data(fields_metadata, &self.schema()))
|
||||
}
|
||||
|
||||
/// Creates a new segment_meta (Advanced user only).
|
||||
///
|
||||
/// As long as the `SegmentMeta` lives, the files associated with the
|
||||
@@ -563,11 +531,11 @@ impl Index {
|
||||
/// If the lockfile already exists, returns `Error::DirectoryLockBusy` or an `Error::IoError`.
|
||||
/// If the memory arena per thread is too small or too big, returns
|
||||
/// `TantivyError::InvalidArgument`
|
||||
pub fn writer_with_num_threads<D: Document>(
|
||||
pub fn writer_with_num_threads(
|
||||
&self,
|
||||
num_threads: usize,
|
||||
overall_memory_budget_in_bytes: usize,
|
||||
) -> crate::Result<IndexWriter<D>> {
|
||||
) -> crate::Result<IndexWriter> {
|
||||
let directory_lock = self
|
||||
.directory
|
||||
.acquire_lock(&INDEX_WRITER_LOCK)
|
||||
@@ -596,8 +564,8 @@ impl Index {
|
||||
/// That index writer only simply has a single thread and a memory budget of 15 MB.
|
||||
/// Using a single thread gives us a deterministic allocation of DocId.
|
||||
#[cfg(test)]
|
||||
pub fn writer_for_tests<D: Document>(&self) -> crate::Result<IndexWriter<D>> {
|
||||
self.writer_with_num_threads(1, MEMORY_BUDGET_NUM_BYTES_MIN)
|
||||
pub fn writer_for_tests(&self) -> crate::Result<IndexWriter> {
|
||||
self.writer_with_num_threads(1, 15_000_000)
|
||||
}
|
||||
|
||||
/// Creates a multithreaded writer
|
||||
@@ -611,10 +579,7 @@ impl Index {
|
||||
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
||||
/// If the memory arena per thread is too small or too big, returns
|
||||
/// `TantivyError::InvalidArgument`
|
||||
pub fn writer<D: Document>(
|
||||
&self,
|
||||
memory_budget_in_bytes: usize,
|
||||
) -> crate::Result<IndexWriter<D>> {
|
||||
pub fn writer(&self, memory_budget_in_bytes: usize) -> crate::Result<IndexWriter> {
|
||||
let mut num_threads = std::cmp::min(num_cpus::get(), MAX_NUM_THREAD);
|
||||
let memory_budget_num_bytes_per_thread = memory_budget_in_bytes / num_threads;
|
||||
if memory_budget_num_bytes_per_thread < MEMORY_BUDGET_NUM_BYTES_MIN {
|
||||
@@ -7,7 +7,7 @@ use std::sync::Arc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::SegmentComponent;
|
||||
use crate::index::SegmentId;
|
||||
use crate::core::SegmentId;
|
||||
use crate::schema::Schema;
|
||||
use crate::store::Compressor;
|
||||
use crate::{Inventory, Opstamp, TrackedObject};
|
||||
@@ -19,7 +19,7 @@ struct DeleteMeta {
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct SegmentMetaInventory {
|
||||
pub struct SegmentMetaInventory {
|
||||
inventory: Inventory<InnerSegmentMeta>,
|
||||
}
|
||||
|
||||
@@ -408,7 +408,7 @@ impl fmt::Debug for IndexMeta {
|
||||
mod tests {
|
||||
|
||||
use super::IndexMeta;
|
||||
use crate::index::index_meta::UntrackedIndexMeta;
|
||||
use crate::core::index_meta::UntrackedIndexMeta;
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use crate::store::Compressor;
|
||||
#[cfg(feature = "zstd-compression")]
|
||||
@@ -1,12 +1,11 @@
|
||||
use std::io;
|
||||
|
||||
use common::BinarySerializable;
|
||||
use fnv::FnvHashSet;
|
||||
|
||||
use crate::directory::FileSlice;
|
||||
use crate::positions::PositionReader;
|
||||
use crate::postings::{BlockSegmentPostings, SegmentPostings, TermInfo};
|
||||
use crate::schema::{IndexRecordOption, Term, Type, JSON_END_OF_PATH};
|
||||
use crate::schema::{IndexRecordOption, Term};
|
||||
use crate::termdict::TermDictionary;
|
||||
|
||||
/// The inverted index reader is in charge of accessing
|
||||
@@ -70,28 +69,6 @@ impl InvertedIndexReader {
|
||||
&self.termdict
|
||||
}
|
||||
|
||||
/// Return the fields and types encoded in the dictionary in lexicographic oder.
|
||||
/// Only valid on JSON fields.
|
||||
///
|
||||
/// Notice: This requires a full scan and therefore **very expensive**.
|
||||
/// TODO: Move to sstable to use the index.
|
||||
pub fn list_encoded_fields(&self) -> io::Result<Vec<(String, Type)>> {
|
||||
let mut stream = self.termdict.stream()?;
|
||||
let mut fields = Vec::new();
|
||||
let mut fields_set = FnvHashSet::default();
|
||||
while let Some((term, _term_info)) = stream.next() {
|
||||
if let Some(index) = term.iter().position(|&byte| byte == JSON_END_OF_PATH) {
|
||||
if !fields_set.contains(&term[..index + 2]) {
|
||||
fields_set.insert(term[..index + 2].to_vec());
|
||||
let typ = Type::from_code(term[index + 1]).unwrap();
|
||||
fields.push((String::from_utf8_lossy(&term[..index]).to_string(), typ));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(fields)
|
||||
}
|
||||
|
||||
/// Resets the block segment to another position of the postings
|
||||
/// file.
|
||||
///
|
||||
@@ -266,9 +243,7 @@ impl InvertedIndexReader {
|
||||
|
||||
/// Warmup a block postings given a `Term`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// returns a boolean, whether the term was found in the dictionary
|
||||
pub async fn warm_postings(&self, term: &Term, with_positions: bool) -> io::Result<bool> {
|
||||
pub async fn warm_postings(&self, term: &Term, with_positions: bool) -> io::Result<()> {
|
||||
let term_info_opt: Option<TermInfo> = self.get_term_info_async(term).await?;
|
||||
if let Some(term_info) = term_info_opt {
|
||||
let postings = self
|
||||
@@ -282,27 +257,23 @@ impl InvertedIndexReader {
|
||||
} else {
|
||||
postings.await?;
|
||||
}
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Warmup a block postings given a range of `Term`s.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// returns a boolean, whether a term matching the range was found in the dictionary
|
||||
pub async fn warm_postings_range(
|
||||
&self,
|
||||
terms: impl std::ops::RangeBounds<Term>,
|
||||
limit: Option<u64>,
|
||||
with_positions: bool,
|
||||
) -> io::Result<bool> {
|
||||
) -> io::Result<()> {
|
||||
let mut term_info = self.get_term_range_async(terms, limit).await?;
|
||||
|
||||
let Some(first_terminfo) = term_info.next() else {
|
||||
// no key matches, nothing more to load
|
||||
return Ok(false);
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let last_terminfo = term_info.last().unwrap_or_else(|| first_terminfo.clone());
|
||||
@@ -322,7 +293,7 @@ impl InvertedIndexReader {
|
||||
} else {
|
||||
postings.await?;
|
||||
}
|
||||
Ok(true)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Warmup the block postings for all terms.
|
||||
@@ -1,11 +1,11 @@
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
use common::{replace_in_place, JsonPathWriter};
|
||||
use common::replace_in_place;
|
||||
use murmurhash32::murmurhash2;
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use crate::fastfield::FastValue;
|
||||
use crate::postings::{IndexingContext, IndexingPosition, PostingsWriter};
|
||||
use crate::schema::document::{ReferenceValue, ReferenceValueLeaf, Value};
|
||||
use crate::schema::term::JSON_PATH_SEGMENT_SEP;
|
||||
use crate::schema::term::{JSON_PATH_SEGMENT_SEP, JSON_PATH_SEGMENT_SEP_STR};
|
||||
use crate::schema::{Field, Type, DATE_TIME_PRECISION_INDEXED};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::{OffsetDateTime, UtcOffset};
|
||||
@@ -57,41 +57,31 @@ struct IndexingPositionsPerPath {
|
||||
}
|
||||
|
||||
impl IndexingPositionsPerPath {
|
||||
fn get_position_from_id(&mut self, id: u32) -> &mut IndexingPosition {
|
||||
self.positions_per_path.entry(id).or_default()
|
||||
fn get_position(&mut self, term: &Term) -> &mut IndexingPosition {
|
||||
self.positions_per_path
|
||||
.entry(murmurhash2(term.serialized_term()))
|
||||
.or_default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert JSON_PATH_SEGMENT_SEP to a dot.
|
||||
pub fn json_path_sep_to_dot(path: &mut str) {
|
||||
// This is safe since we are replacing a ASCII character by another ASCII character.
|
||||
unsafe {
|
||||
replace_in_place(JSON_PATH_SEGMENT_SEP, b'.', path.as_bytes_mut());
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn index_json_values<'a, V: Value<'a>>(
|
||||
pub(crate) fn index_json_values<'a>(
|
||||
doc: DocId,
|
||||
json_visitors: impl Iterator<Item = crate::Result<V::ObjectIter>>,
|
||||
json_values: impl Iterator<Item = crate::Result<&'a serde_json::Map<String, serde_json::Value>>>,
|
||||
text_analyzer: &mut TextAnalyzer,
|
||||
expand_dots_enabled: bool,
|
||||
term_buffer: &mut Term,
|
||||
postings_writer: &mut dyn PostingsWriter,
|
||||
json_path_writer: &mut JsonPathWriter,
|
||||
ctx: &mut IndexingContext,
|
||||
) -> crate::Result<()> {
|
||||
json_path_writer.clear();
|
||||
json_path_writer.set_expand_dots(expand_dots_enabled);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(term_buffer, expand_dots_enabled);
|
||||
let mut positions_per_path: IndexingPositionsPerPath = Default::default();
|
||||
for json_visitor_res in json_visitors {
|
||||
let json_visitor = json_visitor_res?;
|
||||
index_json_object::<V>(
|
||||
for json_value_res in json_values {
|
||||
let json_value = json_value_res?;
|
||||
index_json_object(
|
||||
doc,
|
||||
json_visitor,
|
||||
json_value,
|
||||
text_analyzer,
|
||||
term_buffer,
|
||||
json_path_writer,
|
||||
&mut json_term_writer,
|
||||
postings_writer,
|
||||
ctx,
|
||||
&mut positions_per_path,
|
||||
@@ -100,154 +90,93 @@ pub(crate) fn index_json_values<'a, V: Value<'a>>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn index_json_object<'a, V: Value<'a>>(
|
||||
fn index_json_object(
|
||||
doc: DocId,
|
||||
json_visitor: V::ObjectIter,
|
||||
json_value: &serde_json::Map<String, serde_json::Value>,
|
||||
text_analyzer: &mut TextAnalyzer,
|
||||
term_buffer: &mut Term,
|
||||
json_path_writer: &mut JsonPathWriter,
|
||||
json_term_writer: &mut JsonTermWriter,
|
||||
postings_writer: &mut dyn PostingsWriter,
|
||||
ctx: &mut IndexingContext,
|
||||
positions_per_path: &mut IndexingPositionsPerPath,
|
||||
) {
|
||||
for (json_path_segment, json_value_visitor) in json_visitor {
|
||||
json_path_writer.push(json_path_segment);
|
||||
for (json_path_segment, json_value) in json_value {
|
||||
json_term_writer.push_path_segment(json_path_segment);
|
||||
index_json_value(
|
||||
doc,
|
||||
json_value_visitor,
|
||||
json_value,
|
||||
text_analyzer,
|
||||
term_buffer,
|
||||
json_path_writer,
|
||||
json_term_writer,
|
||||
postings_writer,
|
||||
ctx,
|
||||
positions_per_path,
|
||||
);
|
||||
json_path_writer.pop();
|
||||
json_term_writer.pop_path_segment();
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn index_json_value<'a, V: Value<'a>>(
|
||||
fn index_json_value(
|
||||
doc: DocId,
|
||||
json_value: V,
|
||||
json_value: &serde_json::Value,
|
||||
text_analyzer: &mut TextAnalyzer,
|
||||
term_buffer: &mut Term,
|
||||
json_path_writer: &mut JsonPathWriter,
|
||||
json_term_writer: &mut JsonTermWriter,
|
||||
postings_writer: &mut dyn PostingsWriter,
|
||||
ctx: &mut IndexingContext,
|
||||
positions_per_path: &mut IndexingPositionsPerPath,
|
||||
) {
|
||||
let set_path_id = |term_buffer: &mut Term, unordered_id: u32| {
|
||||
term_buffer.truncate_value_bytes(0);
|
||||
term_buffer.append_bytes(&unordered_id.to_be_bytes());
|
||||
};
|
||||
let set_type = |term_buffer: &mut Term, typ: Type| {
|
||||
term_buffer.append_bytes(&[typ.to_code()]);
|
||||
};
|
||||
|
||||
match json_value.as_value() {
|
||||
ReferenceValue::Leaf(leaf) => match leaf {
|
||||
ReferenceValueLeaf::Null => {}
|
||||
ReferenceValueLeaf::Str(val) => {
|
||||
let mut token_stream = text_analyzer.token_stream(val);
|
||||
let unordered_id = ctx
|
||||
.path_to_unordered_id
|
||||
.get_or_allocate_unordered_id(json_path_writer.as_str());
|
||||
|
||||
// TODO: make sure the chain position works out.
|
||||
set_path_id(term_buffer, unordered_id);
|
||||
set_type(term_buffer, Type::Str);
|
||||
let indexing_position = positions_per_path.get_position_from_id(unordered_id);
|
||||
match json_value {
|
||||
serde_json::Value::Null => {}
|
||||
serde_json::Value::Bool(val_bool) => {
|
||||
json_term_writer.set_fast_value(*val_bool);
|
||||
postings_writer.subscribe(doc, 0u32, json_term_writer.term(), ctx);
|
||||
}
|
||||
serde_json::Value::Number(number) => {
|
||||
if let Some(number_i64) = number.as_i64() {
|
||||
json_term_writer.set_fast_value(number_i64);
|
||||
} else if let Some(number_u64) = number.as_u64() {
|
||||
json_term_writer.set_fast_value(number_u64);
|
||||
} else if let Some(number_f64) = number.as_f64() {
|
||||
json_term_writer.set_fast_value(number_f64);
|
||||
}
|
||||
postings_writer.subscribe(doc, 0u32, json_term_writer.term(), ctx);
|
||||
}
|
||||
serde_json::Value::String(text) => match infer_type_from_str(text) {
|
||||
TextOrDateTime::Text(text) => {
|
||||
let mut token_stream = text_analyzer.token_stream(text);
|
||||
// TODO make sure the chain position works out.
|
||||
json_term_writer.close_path_and_set_type(Type::Str);
|
||||
let indexing_position = positions_per_path.get_position(json_term_writer.term());
|
||||
postings_writer.index_text(
|
||||
doc,
|
||||
&mut *token_stream,
|
||||
term_buffer,
|
||||
json_term_writer.term_buffer,
|
||||
ctx,
|
||||
indexing_position,
|
||||
);
|
||||
}
|
||||
ReferenceValueLeaf::U64(val) => {
|
||||
set_path_id(
|
||||
term_buffer,
|
||||
ctx.path_to_unordered_id
|
||||
.get_or_allocate_unordered_id(json_path_writer.as_str()),
|
||||
);
|
||||
term_buffer.append_type_and_fast_value(val);
|
||||
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
|
||||
}
|
||||
ReferenceValueLeaf::I64(val) => {
|
||||
set_path_id(
|
||||
term_buffer,
|
||||
ctx.path_to_unordered_id
|
||||
.get_or_allocate_unordered_id(json_path_writer.as_str()),
|
||||
);
|
||||
term_buffer.append_type_and_fast_value(val);
|
||||
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
|
||||
}
|
||||
ReferenceValueLeaf::F64(val) => {
|
||||
set_path_id(
|
||||
term_buffer,
|
||||
ctx.path_to_unordered_id
|
||||
.get_or_allocate_unordered_id(json_path_writer.as_str()),
|
||||
);
|
||||
term_buffer.append_type_and_fast_value(val);
|
||||
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
|
||||
}
|
||||
ReferenceValueLeaf::Bool(val) => {
|
||||
set_path_id(
|
||||
term_buffer,
|
||||
ctx.path_to_unordered_id
|
||||
.get_or_allocate_unordered_id(json_path_writer.as_str()),
|
||||
);
|
||||
term_buffer.append_type_and_fast_value(val);
|
||||
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
|
||||
}
|
||||
ReferenceValueLeaf::Date(val) => {
|
||||
set_path_id(
|
||||
term_buffer,
|
||||
ctx.path_to_unordered_id
|
||||
.get_or_allocate_unordered_id(json_path_writer.as_str()),
|
||||
);
|
||||
term_buffer.append_type_and_fast_value(val);
|
||||
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
|
||||
}
|
||||
ReferenceValueLeaf::PreTokStr(_) => {
|
||||
unimplemented!(
|
||||
"Pre-tokenized string support in dynamic fields is not yet implemented"
|
||||
)
|
||||
}
|
||||
ReferenceValueLeaf::Bytes(_) => {
|
||||
unimplemented!("Bytes support in dynamic fields is not yet implemented")
|
||||
}
|
||||
ReferenceValueLeaf::Facet(_) => {
|
||||
unimplemented!("Facet support in dynamic fields is not yet implemented")
|
||||
}
|
||||
ReferenceValueLeaf::IpAddr(_) => {
|
||||
unimplemented!("IP address support in dynamic fields is not yet implemented")
|
||||
TextOrDateTime::DateTime(dt) => {
|
||||
json_term_writer.set_fast_value(DateTime::from_utc(dt));
|
||||
postings_writer.subscribe(doc, 0u32, json_term_writer.term(), ctx);
|
||||
}
|
||||
},
|
||||
ReferenceValue::Array(elements) => {
|
||||
for val in elements {
|
||||
serde_json::Value::Array(arr) => {
|
||||
for val in arr {
|
||||
index_json_value(
|
||||
doc,
|
||||
val,
|
||||
text_analyzer,
|
||||
term_buffer,
|
||||
json_path_writer,
|
||||
json_term_writer,
|
||||
postings_writer,
|
||||
ctx,
|
||||
positions_per_path,
|
||||
);
|
||||
}
|
||||
}
|
||||
ReferenceValue::Object(object) => {
|
||||
index_json_object::<V>(
|
||||
serde_json::Value::Object(map) => {
|
||||
index_json_object(
|
||||
doc,
|
||||
object,
|
||||
map,
|
||||
text_analyzer,
|
||||
term_buffer,
|
||||
json_path_writer,
|
||||
json_term_writer,
|
||||
postings_writer,
|
||||
ctx,
|
||||
positions_per_path,
|
||||
@@ -256,6 +185,21 @@ fn index_json_value<'a, V: Value<'a>>(
|
||||
}
|
||||
}
|
||||
|
||||
enum TextOrDateTime<'a> {
|
||||
Text(&'a str),
|
||||
DateTime(OffsetDateTime),
|
||||
}
|
||||
|
||||
fn infer_type_from_str(text: &str) -> TextOrDateTime {
|
||||
match OffsetDateTime::parse(text, &Rfc3339) {
|
||||
Ok(dt) => {
|
||||
let dt_utc = dt.to_offset(UtcOffset::UTC);
|
||||
TextOrDateTime::DateTime(dt_utc)
|
||||
}
|
||||
Err(_) => TextOrDateTime::Text(text),
|
||||
}
|
||||
}
|
||||
|
||||
// Tries to infer a JSON type from a string.
|
||||
pub fn convert_to_fast_value_and_get_term(
|
||||
json_term_writer: &mut JsonTermWriter,
|
||||
@@ -328,7 +272,7 @@ pub struct JsonTermWriter<'a> {
|
||||
/// In other words,
|
||||
/// - `k8s.node` ends up as `["k8s", "node"]`.
|
||||
/// - `k8s\.node` ends up as `["k8s.node"]`.
|
||||
pub fn split_json_path(json_path: &str) -> Vec<String> {
|
||||
fn split_json_path(json_path: &str) -> Vec<String> {
|
||||
let mut escaped_state: bool = false;
|
||||
let mut json_path_segments = Vec::new();
|
||||
let mut buffer = String::new();
|
||||
@@ -368,13 +312,17 @@ pub(crate) fn encode_column_name(
|
||||
json_path: &str,
|
||||
expand_dots_enabled: bool,
|
||||
) -> String {
|
||||
let mut path = JsonPathWriter::default();
|
||||
path.push(field_name);
|
||||
path.set_expand_dots(expand_dots_enabled);
|
||||
for segment in split_json_path(json_path) {
|
||||
path.push(&segment);
|
||||
let mut column_key: String = String::with_capacity(field_name.len() + json_path.len() + 1);
|
||||
column_key.push_str(field_name);
|
||||
for mut segment in split_json_path(json_path) {
|
||||
column_key.push_str(JSON_PATH_SEGMENT_SEP_STR);
|
||||
if expand_dots_enabled {
|
||||
// We need to replace `.` by JSON_PATH_SEGMENT_SEP.
|
||||
unsafe { replace_in_place(b'.', JSON_PATH_SEGMENT_SEP, segment.as_bytes_mut()) };
|
||||
}
|
||||
column_key.push_str(&segment);
|
||||
}
|
||||
path.into()
|
||||
column_key
|
||||
}
|
||||
|
||||
impl<'a> JsonTermWriter<'a> {
|
||||
@@ -414,7 +362,6 @@ impl<'a> JsonTermWriter<'a> {
|
||||
self.term_buffer.append_bytes(&[typ.to_code()]);
|
||||
}
|
||||
|
||||
// TODO: Remove this function and use JsonPathWriter instead.
|
||||
pub fn push_path_segment(&mut self, segment: &str) {
|
||||
// the path stack should never be empty.
|
||||
self.trim_to_end_of_path();
|
||||
|
||||
@@ -1,14 +1,32 @@
|
||||
mod executor;
|
||||
pub mod index;
|
||||
mod index_meta;
|
||||
mod inverted_index_reader;
|
||||
#[doc(hidden)]
|
||||
pub mod json_utils;
|
||||
pub mod searcher;
|
||||
mod segment;
|
||||
mod segment_component;
|
||||
mod segment_id;
|
||||
mod segment_reader;
|
||||
mod single_segment_index_writer;
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
pub use self::executor::Executor;
|
||||
pub use self::index::{Index, IndexBuilder};
|
||||
pub use self::index_meta::{
|
||||
IndexMeta, IndexSettings, IndexSortByField, Order, SegmentMeta, SegmentMetaInventory,
|
||||
};
|
||||
pub use self::inverted_index_reader::InvertedIndexReader;
|
||||
pub use self::searcher::{Searcher, SearcherGeneration};
|
||||
pub use self::segment::Segment;
|
||||
pub use self::segment_component::SegmentComponent;
|
||||
pub use self::segment_id::SegmentId;
|
||||
pub use self::segment_reader::SegmentReader;
|
||||
pub use self::single_segment_index_writer::SingleSegmentIndexWriter;
|
||||
|
||||
/// The meta file contains all the information about the list of segments and the schema
|
||||
/// of the index.
|
||||
|
||||
@@ -3,11 +3,9 @@ use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use crate::collector::Collector;
|
||||
use crate::core::Executor;
|
||||
use crate::index::SegmentReader;
|
||||
use crate::core::{Executor, SegmentReader};
|
||||
use crate::query::{Bm25StatisticsProvider, EnableScoring, Query};
|
||||
use crate::schema::document::DocumentDeserialize;
|
||||
use crate::schema::{Schema, Term};
|
||||
use crate::schema::{Document, Schema, Term};
|
||||
use crate::space_usage::SearcherSpaceUsage;
|
||||
use crate::store::{CacheStats, StoreReader};
|
||||
use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject};
|
||||
@@ -85,7 +83,7 @@ impl Searcher {
|
||||
///
|
||||
/// The searcher uses the segment ordinal to route the
|
||||
/// request to the right `Segment`.
|
||||
pub fn doc<D: DocumentDeserialize>(&self, doc_address: DocAddress) -> crate::Result<D> {
|
||||
pub fn doc(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
||||
store_reader.get(doc_address.doc_id)
|
||||
}
|
||||
@@ -105,10 +103,7 @@ impl Searcher {
|
||||
|
||||
/// Fetches a document in an asynchronous manner.
|
||||
#[cfg(feature = "quickwit")]
|
||||
pub async fn doc_async<D: DocumentDeserialize>(
|
||||
&self,
|
||||
doc_address: DocAddress,
|
||||
) -> crate::Result<D> {
|
||||
pub async fn doc_async(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
||||
store_reader.get_async(doc_address.doc_id).await
|
||||
}
|
||||
|
||||
@@ -2,9 +2,9 @@ use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use super::SegmentComponent;
|
||||
use crate::core::{Index, SegmentId, SegmentMeta};
|
||||
use crate::directory::error::{OpenReadError, OpenWriteError};
|
||||
use crate::directory::{Directory, FileSlice, WritePtr};
|
||||
use crate::index::{Index, SegmentId, SegmentMeta};
|
||||
use crate::schema::Schema;
|
||||
use crate::Opstamp;
|
||||
|
||||
@@ -1,17 +1,12 @@
|
||||
use std::collections::HashMap;
|
||||
use std::ops::BitOrAssign;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{fmt, io};
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use itertools::Itertools;
|
||||
|
||||
use crate::core::{InvertedIndexReader, Segment, SegmentComponent, SegmentId};
|
||||
use crate::directory::{CompositeFile, FileSlice};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::{intersect_alive_bitsets, AliveBitSet, FacetReader, FastFieldReaders};
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
|
||||
use crate::index::{InvertedIndexReader, Segment, SegmentComponent, SegmentId};
|
||||
use crate::json_utils::json_path_sep_to_dot;
|
||||
use crate::schema::{Field, IndexRecordOption, Schema, Type};
|
||||
use crate::space_usage::SegmentSpaceUsage;
|
||||
use crate::store::StoreReader;
|
||||
@@ -285,103 +280,6 @@ impl SegmentReader {
|
||||
Ok(inv_idx_reader)
|
||||
}
|
||||
|
||||
/// Returns the list of fields that have been indexed in the segment.
|
||||
/// The field list includes the field defined in the schema as well as the fields
|
||||
/// that have been indexed as a part of a JSON field.
|
||||
/// The returned field name is the full field name, including the name of the JSON field.
|
||||
///
|
||||
/// The returned field names can be used in queries.
|
||||
///
|
||||
/// Notice: If your data contains JSON fields this is **very expensive**, as it requires
|
||||
/// browsing through the inverted index term dictionary and the columnar field dictionary.
|
||||
///
|
||||
/// Disclaimer: Some fields may not be listed here. For instance, if the schema contains a json
|
||||
/// field that is not indexed nor a fast field but is stored, it is possible for the field
|
||||
/// to not be listed.
|
||||
pub fn fields_metadata(&self) -> crate::Result<Vec<FieldMetadata>> {
|
||||
let mut indexed_fields: Vec<FieldMetadata> = Vec::new();
|
||||
let mut map_to_canonical = FnvHashMap::default();
|
||||
for (field, field_entry) in self.schema().fields() {
|
||||
let field_name = field_entry.name().to_string();
|
||||
let is_indexed = field_entry.is_indexed();
|
||||
|
||||
if is_indexed {
|
||||
let is_json = field_entry.field_type().value_type() == Type::Json;
|
||||
if is_json {
|
||||
let inv_index = self.inverted_index(field)?;
|
||||
let encoded_fields_in_index = inv_index.list_encoded_fields()?;
|
||||
let mut build_path = |field_name: &str, mut json_path: String| {
|
||||
// In this case we need to map the potential fast field to the field name
|
||||
// accepted by the query parser.
|
||||
let create_canonical =
|
||||
!field_entry.is_expand_dots_enabled() && json_path.contains('.');
|
||||
if create_canonical {
|
||||
// Without expand dots enabled dots need to be escaped.
|
||||
let escaped_json_path = json_path.replace('.', "\\.");
|
||||
let full_path = format!("{}.{}", field_name, escaped_json_path);
|
||||
let full_path_unescaped = format!("{}.{}", field_name, &json_path);
|
||||
map_to_canonical.insert(full_path_unescaped, full_path.to_string());
|
||||
full_path
|
||||
} else {
|
||||
// With expand dots enabled, we can use '.' instead of '\u{1}'.
|
||||
json_path_sep_to_dot(&mut json_path);
|
||||
format!("{}.{}", field_name, json_path)
|
||||
}
|
||||
};
|
||||
indexed_fields.extend(
|
||||
encoded_fields_in_index
|
||||
.into_iter()
|
||||
.map(|(name, typ)| (build_path(&field_name, name), typ))
|
||||
.map(|(field_name, typ)| FieldMetadata {
|
||||
indexed: true,
|
||||
stored: false,
|
||||
field_name,
|
||||
fast: false,
|
||||
typ,
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
indexed_fields.push(FieldMetadata {
|
||||
indexed: true,
|
||||
stored: false,
|
||||
field_name: field_name.to_string(),
|
||||
fast: false,
|
||||
typ: field_entry.field_type().value_type(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut fast_fields: Vec<FieldMetadata> = self
|
||||
.fast_fields()
|
||||
.columnar()
|
||||
.iter_columns()?
|
||||
.map(|(mut field_name, handle)| {
|
||||
json_path_sep_to_dot(&mut field_name);
|
||||
// map to canonical path, to avoid similar but different entries.
|
||||
// Eventually we should just accept '.' seperated for all cases.
|
||||
let field_name = map_to_canonical
|
||||
.get(&field_name)
|
||||
.unwrap_or(&field_name)
|
||||
.to_string();
|
||||
FieldMetadata {
|
||||
indexed: false,
|
||||
stored: false,
|
||||
field_name,
|
||||
fast: true,
|
||||
typ: Type::from(handle.column_type()),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// Since the type is encoded differently in the fast field and in the inverted index,
|
||||
// the order of the fields is not guaranteed to be the same. Therefore, we sort the fields.
|
||||
// If we are sure that the order is the same, we can remove this sort.
|
||||
indexed_fields.sort_unstable();
|
||||
fast_fields.sort_unstable();
|
||||
let merged = merge_field_meta_data(vec![indexed_fields, fast_fields], &self.schema);
|
||||
|
||||
Ok(merged)
|
||||
}
|
||||
|
||||
/// Returns the segment id
|
||||
pub fn segment_id(&self) -> SegmentId {
|
||||
self.segment_id
|
||||
@@ -432,65 +330,6 @@ impl SegmentReader {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
/// FieldMetadata
|
||||
pub struct FieldMetadata {
|
||||
/// The field name
|
||||
// Notice: Don't reorder the declaration of 1.field_name 2.typ, as it is used for ordering by
|
||||
// field_name then typ.
|
||||
pub field_name: String,
|
||||
/// The field type
|
||||
// Notice: Don't reorder the declaration of 1.field_name 2.typ, as it is used for ordering by
|
||||
// field_name then typ.
|
||||
pub typ: Type,
|
||||
/// Is the field indexed for search
|
||||
pub indexed: bool,
|
||||
/// Is the field stored in the doc store
|
||||
pub stored: bool,
|
||||
/// Is the field stored in the columnar storage
|
||||
pub fast: bool,
|
||||
}
|
||||
impl BitOrAssign for FieldMetadata {
|
||||
fn bitor_assign(&mut self, rhs: Self) {
|
||||
assert!(self.field_name == rhs.field_name);
|
||||
assert!(self.typ == rhs.typ);
|
||||
self.indexed |= rhs.indexed;
|
||||
self.stored |= rhs.stored;
|
||||
self.fast |= rhs.fast;
|
||||
}
|
||||
}
|
||||
|
||||
// Maybe too slow for the high cardinality case
|
||||
fn is_field_stored(field_name: &str, schema: &Schema) -> bool {
|
||||
schema
|
||||
.find_field(field_name)
|
||||
.map(|(field, _path)| schema.get_field_entry(field).is_stored())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Helper to merge the field metadata from multiple segments.
|
||||
pub fn merge_field_meta_data(
|
||||
field_metadatas: Vec<Vec<FieldMetadata>>,
|
||||
schema: &Schema,
|
||||
) -> Vec<FieldMetadata> {
|
||||
let mut merged_field_metadata = Vec::new();
|
||||
for (_key, mut group) in &field_metadatas
|
||||
.into_iter()
|
||||
.kmerge_by(|left, right| left < right)
|
||||
// TODO: Remove allocation
|
||||
.group_by(|el| (el.field_name.to_string(), el.typ))
|
||||
{
|
||||
let mut merged: FieldMetadata = group.next().unwrap();
|
||||
for el in group {
|
||||
merged |= el;
|
||||
}
|
||||
// Currently is_field_stored is maybe too slow for the high cardinality case
|
||||
merged.stored = is_field_stored(&merged.field_name, schema);
|
||||
merged_field_metadata.push(merged);
|
||||
}
|
||||
merged_field_metadata
|
||||
}
|
||||
|
||||
fn intersect_alive_bitset(
|
||||
left_opt: Option<AliveBitSet>,
|
||||
right_opt: Option<AliveBitSet>,
|
||||
@@ -514,127 +353,9 @@ impl fmt::Debug for SegmentReader {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::index::Index;
|
||||
use crate::schema::{Schema, SchemaBuilder, Term, STORED, TEXT};
|
||||
use crate::{DocId, IndexWriter};
|
||||
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_same() {
|
||||
let schema = SchemaBuilder::new().build();
|
||||
let field_metadata1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast: true,
|
||||
};
|
||||
let field_metadata2 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast: true,
|
||||
};
|
||||
let res = merge_field_meta_data(
|
||||
vec![vec![field_metadata1.clone()], vec![field_metadata2]],
|
||||
&schema,
|
||||
);
|
||||
assert_eq!(res, vec![field_metadata1]);
|
||||
}
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_different() {
|
||||
let schema = SchemaBuilder::new().build();
|
||||
let field_metadata1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast: true,
|
||||
};
|
||||
let field_metadata2 = FieldMetadata {
|
||||
field_name: "b".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast: true,
|
||||
};
|
||||
let field_metadata3 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast: false,
|
||||
};
|
||||
let res = merge_field_meta_data(
|
||||
vec![
|
||||
vec![field_metadata1.clone(), field_metadata2.clone()],
|
||||
vec![field_metadata3],
|
||||
],
|
||||
&schema,
|
||||
);
|
||||
let field_metadata_expected1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast: true,
|
||||
};
|
||||
assert_eq!(res, vec![field_metadata_expected1, field_metadata2.clone()]);
|
||||
}
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_merge() {
|
||||
use pretty_assertions::assert_eq;
|
||||
let get_meta_data = |name: &str, typ: Type| FieldMetadata {
|
||||
field_name: name.to_string(),
|
||||
typ,
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast: true,
|
||||
};
|
||||
let schema = SchemaBuilder::new().build();
|
||||
let mut metas = vec![get_meta_data("d", Type::Str), get_meta_data("e", Type::U64)];
|
||||
metas.sort();
|
||||
let res = merge_field_meta_data(vec![vec![get_meta_data("e", Type::Str)], metas], &schema);
|
||||
assert_eq!(
|
||||
res,
|
||||
vec![
|
||||
get_meta_data("d", Type::Str),
|
||||
get_meta_data("e", Type::Str),
|
||||
get_meta_data("e", Type::U64),
|
||||
]
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_bitxor() {
|
||||
let field_metadata1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast: true,
|
||||
};
|
||||
let field_metadata2 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast: false,
|
||||
};
|
||||
let field_metadata_expected = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast: true,
|
||||
};
|
||||
let mut res1 = field_metadata1.clone();
|
||||
res1 |= field_metadata2.clone();
|
||||
let mut res2 = field_metadata2.clone();
|
||||
res2 |= field_metadata1;
|
||||
assert_eq!(res1, field_metadata_expected);
|
||||
assert_eq!(res2, field_metadata_expected);
|
||||
}
|
||||
use crate::core::Index;
|
||||
use crate::schema::{Schema, Term, STORED, TEXT};
|
||||
use crate::DocId;
|
||||
|
||||
#[test]
|
||||
fn test_num_alive() -> crate::Result<()> {
|
||||
@@ -645,7 +366,7 @@ mod test {
|
||||
let name = schema.get_field("name").unwrap();
|
||||
|
||||
{
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(name => "tantivy"))?;
|
||||
index_writer.add_document(doc!(name => "horse"))?;
|
||||
index_writer.add_document(doc!(name => "jockey"))?;
|
||||
@@ -671,7 +392,7 @@ mod test {
|
||||
let name = schema.get_field("name").unwrap();
|
||||
|
||||
{
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(name => "tantivy"))?;
|
||||
index_writer.add_document(doc!(name => "horse"))?;
|
||||
index_writer.add_document(doc!(name => "jockey"))?;
|
||||
@@ -681,7 +402,7 @@ mod test {
|
||||
}
|
||||
|
||||
{
|
||||
let mut index_writer2: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut index_writer2 = index.writer(50_000_000)?;
|
||||
index_writer2.delete_term(Term::from_field_text(name, "horse"));
|
||||
index_writer2.delete_term(Term::from_field_text(name, "cap"));
|
||||
|
||||
@@ -1,20 +1,16 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use crate::indexer::operation::AddOperation;
|
||||
use crate::indexer::segment_updater::save_metas;
|
||||
use crate::indexer::SegmentWriter;
|
||||
use crate::schema::document::Document;
|
||||
use crate::{Directory, Index, IndexMeta, Opstamp, Segment, TantivyDocument};
|
||||
use crate::{Directory, Document, Index, IndexMeta, Opstamp, Segment};
|
||||
|
||||
#[doc(hidden)]
|
||||
pub struct SingleSegmentIndexWriter<D: Document = TantivyDocument> {
|
||||
pub struct SingleSegmentIndexWriter {
|
||||
segment_writer: SegmentWriter,
|
||||
segment: Segment,
|
||||
opstamp: Opstamp,
|
||||
_phantom: PhantomData<D>,
|
||||
}
|
||||
|
||||
impl<D: Document> SingleSegmentIndexWriter<D> {
|
||||
impl SingleSegmentIndexWriter {
|
||||
pub fn new(index: Index, mem_budget: usize) -> crate::Result<Self> {
|
||||
let segment = index.new_segment();
|
||||
let segment_writer = SegmentWriter::for_segment(mem_budget, segment.clone())?;
|
||||
@@ -22,7 +18,6 @@ impl<D: Document> SingleSegmentIndexWriter<D> {
|
||||
segment_writer,
|
||||
segment,
|
||||
opstamp: 0,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -30,7 +25,7 @@ impl<D: Document> SingleSegmentIndexWriter<D> {
|
||||
self.segment_writer.mem_usage()
|
||||
}
|
||||
|
||||
pub fn add_document(&mut self, document: D) -> crate::Result<()> {
|
||||
pub fn add_document(&mut self, document: Document) -> crate::Result<()> {
|
||||
let opstamp = self.opstamp;
|
||||
self.opstamp += 1;
|
||||
self.segment_writer
|
||||
@@ -1,13 +1,12 @@
|
||||
use crate::collector::Count;
|
||||
use crate::directory::{RamDirectory, WatchCallback};
|
||||
use crate::indexer::{LogMergePolicy, NoMergePolicy};
|
||||
use crate::json_utils::JsonTermWriter;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::TermQuery;
|
||||
use crate::schema::{Field, IndexRecordOption, Schema, Type, INDEXED, STRING, TEXT};
|
||||
use crate::schema::{Field, IndexRecordOption, Schema, INDEXED, STRING, TEXT};
|
||||
use crate::tokenizer::TokenizerManager;
|
||||
use crate::{
|
||||
Directory, DocSet, Index, IndexBuilder, IndexReader, IndexSettings, IndexWriter, Postings,
|
||||
ReloadPolicy, SegmentId, TantivyDocument, Term,
|
||||
Directory, Document, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy, SegmentId,
|
||||
Term,
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -122,7 +121,7 @@ fn test_index_on_commit_reload_policy() -> crate::Result<()> {
|
||||
let index = Index::create_in_ram(schema);
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
@@ -148,7 +147,7 @@ mod mmap_specific {
|
||||
let index = Index::create_in_dir(tempdir_path, schema).unwrap();
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
@@ -160,7 +159,7 @@ mod mmap_specific {
|
||||
let schema = throw_away_schema();
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let mut index = Index::create_from_tempdir(schema)?;
|
||||
let mut writer: IndexWriter = index.writer_for_tests()?;
|
||||
let mut writer = index.writer_for_tests()?;
|
||||
writer.commit()?;
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
@@ -190,7 +189,7 @@ mod mmap_specific {
|
||||
let read_index = Index::open_in_dir(&tempdir_path).unwrap();
|
||||
let reader = read_index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
@@ -209,7 +208,7 @@ fn test_index_on_commit_reload_policy_aux(
|
||||
.watch(WatchCallback::new(move || {
|
||||
let _ = sender.send(());
|
||||
}));
|
||||
let mut writer: IndexWriter = index.writer_for_tests()?;
|
||||
let mut writer = index.writer_for_tests()?;
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
writer.add_document(doc!(field=>1u64))?;
|
||||
writer.commit().unwrap();
|
||||
@@ -243,7 +242,7 @@ fn garbage_collect_works_as_intended() -> crate::Result<()> {
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let index = Index::create(directory.clone(), schema, IndexSettings::default())?;
|
||||
|
||||
let mut writer: IndexWriter = index.writer_with_num_threads(1, 32_000_000).unwrap();
|
||||
let mut writer = index.writer_with_num_threads(1, 32_000_000).unwrap();
|
||||
for _seg in 0..8 {
|
||||
for i in 0u64..1_000u64 {
|
||||
writer.add_document(doc!(field => i))?;
|
||||
@@ -307,7 +306,7 @@ fn test_merging_segment_update_docfreq() {
|
||||
let id_field = schema_builder.add_text_field("id", STRING);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
for _ in 0..5 {
|
||||
writer.add_document(doc!(text_field=>"hello")).unwrap();
|
||||
@@ -318,13 +317,13 @@ fn test_merging_segment_update_docfreq() {
|
||||
writer
|
||||
.add_document(doc!(text_field=>"hello", id_field=>"TO_BE_DELETED"))
|
||||
.unwrap();
|
||||
writer.add_document(TantivyDocument::default()).unwrap();
|
||||
writer.add_document(Document::default()).unwrap();
|
||||
writer.commit().unwrap();
|
||||
for _ in 0..7 {
|
||||
writer.add_document(doc!(text_field=>"hello")).unwrap();
|
||||
}
|
||||
writer.add_document(TantivyDocument::default()).unwrap();
|
||||
writer.add_document(TantivyDocument::default()).unwrap();
|
||||
writer.add_document(Document::default()).unwrap();
|
||||
writer.add_document(Document::default()).unwrap();
|
||||
writer.delete_term(Term::from_field_text(id_field, "TO_BE_DELETED"));
|
||||
writer.commit().unwrap();
|
||||
|
||||
@@ -345,132 +344,3 @@ fn test_merging_segment_update_docfreq() {
|
||||
let term_info = inv_index.get_term_info(&term).unwrap().unwrap();
|
||||
assert_eq!(term_info.doc_freq, 12);
|
||||
}
|
||||
|
||||
// motivated by https://github.com/quickwit-oss/quickwit/issues/4130
|
||||
#[test]
|
||||
fn test_positions_merge_bug_non_text_json_vint() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_json_field("dynamic", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut merge_policy = LogMergePolicy::default();
|
||||
merge_policy.set_min_num_segments(2);
|
||||
writer.set_merge_policy(Box::new(merge_policy));
|
||||
// Here a string would work.
|
||||
let doc_json = r#"{"tenant_id":75}"#;
|
||||
let vals = serde_json::from_str(doc_json).unwrap();
|
||||
let mut doc = TantivyDocument::default();
|
||||
doc.add_object(field, vals);
|
||||
writer.add_document(doc.clone()).unwrap();
|
||||
writer.commit().unwrap();
|
||||
writer.add_document(doc.clone()).unwrap();
|
||||
writer.commit().unwrap();
|
||||
writer.wait_merging_threads().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||
}
|
||||
|
||||
// Same as above but with bitpacked blocks
|
||||
#[test]
|
||||
fn test_positions_merge_bug_non_text_json_bitpacked_block() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_json_field("dynamic", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut merge_policy = LogMergePolicy::default();
|
||||
merge_policy.set_min_num_segments(2);
|
||||
writer.set_merge_policy(Box::new(merge_policy));
|
||||
// Here a string would work.
|
||||
let doc_json = r#"{"tenant_id":75}"#;
|
||||
let vals = serde_json::from_str(doc_json).unwrap();
|
||||
let mut doc = TantivyDocument::default();
|
||||
doc.add_object(field, vals);
|
||||
for _ in 0..128 {
|
||||
writer.add_document(doc.clone()).unwrap();
|
||||
}
|
||||
writer.commit().unwrap();
|
||||
writer.add_document(doc.clone()).unwrap();
|
||||
writer.commit().unwrap();
|
||||
writer.wait_merging_threads().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_text_json_term_freq() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_json_field("dynamic", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// Here a string would work.
|
||||
let doc_json = r#"{"tenant_id":75}"#;
|
||||
let vals = serde_json::from_str(doc_json).unwrap();
|
||||
let mut doc = TantivyDocument::default();
|
||||
doc.add_object(field, vals);
|
||||
writer.add_document(doc.clone()).unwrap();
|
||||
writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
let inv_idx = segment_reader.inverted_index(field).unwrap();
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_term_writer.push_path_segment("tenant_id");
|
||||
json_term_writer.close_path_and_set_type(Type::U64);
|
||||
json_term_writer.set_fast_value(75u64);
|
||||
let postings = inv_idx
|
||||
.read_postings(
|
||||
json_term_writer.term(),
|
||||
IndexRecordOption::WithFreqsAndPositions,
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.term_freq(), 1u32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_text_json_term_freq_bitpacked() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_json_field("dynamic", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// Here a string would work.
|
||||
let doc_json = r#"{"tenant_id":75}"#;
|
||||
let vals = serde_json::from_str(doc_json).unwrap();
|
||||
let mut doc = TantivyDocument::default();
|
||||
doc.add_object(field, vals);
|
||||
let num_docs = 132;
|
||||
for _ in 0..num_docs {
|
||||
writer.add_document(doc.clone()).unwrap();
|
||||
}
|
||||
writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
let inv_idx = segment_reader.inverted_index(field).unwrap();
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_term_writer.push_path_segment("tenant_id");
|
||||
json_term_writer.close_path_and_set_type(Type::U64);
|
||||
json_term_writer.set_fast_value(75u64);
|
||||
let mut postings = inv_idx
|
||||
.read_postings(
|
||||
json_term_writer.term(),
|
||||
IndexRecordOption::WithFreqsAndPositions,
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(postings.doc(), 0);
|
||||
assert_eq!(postings.term_freq(), 1u32);
|
||||
for i in 1..num_docs {
|
||||
assert_eq!(postings.advance(), i);
|
||||
assert_eq!(postings.term_freq(), 1u32);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,8 +222,8 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// registered (and whose [`WatchHandle`] is still alive) are triggered.
|
||||
///
|
||||
/// Internally, tantivy only uses this API to detect new commits to implement the
|
||||
/// `OnCommitWithDelay` `ReloadPolicy`. Not implementing watch in a `Directory` only prevents
|
||||
/// the `OnCommitWithDelay` `ReloadPolicy` to work properly.
|
||||
/// `OnCommit` `ReloadPolicy`. Not implementing watch in a `Directory` only prevents the
|
||||
/// `OnCommit` `ReloadPolicy` to work properly.
|
||||
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle>;
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::directory::error::Incompatibility;
|
||||
use crate::directory::{AntiCallToken, FileSlice, TerminatingWrite};
|
||||
use crate::{Version, INDEX_FORMAT_OLDEST_SUPPORTED_VERSION, INDEX_FORMAT_VERSION};
|
||||
use crate::{Version, INDEX_FORMAT_VERSION};
|
||||
|
||||
const FOOTER_MAX_LEN: u32 = 50_000;
|
||||
|
||||
@@ -102,11 +102,10 @@ impl Footer {
|
||||
/// Confirms that the index will be read correctly by this version of tantivy
|
||||
/// Has to be called after `extract_footer` to make sure it's not accessing uninitialised memory
|
||||
pub fn is_compatible(&self) -> Result<(), Incompatibility> {
|
||||
const SUPPORTED_INDEX_FORMAT_VERSION_RANGE: std::ops::RangeInclusive<u32> =
|
||||
INDEX_FORMAT_OLDEST_SUPPORTED_VERSION..=INDEX_FORMAT_VERSION;
|
||||
|
||||
let library_version = crate::version();
|
||||
if !SUPPORTED_INDEX_FORMAT_VERSION_RANGE.contains(&self.version.index_format_version) {
|
||||
if self.version.index_format_version < 4
|
||||
|| self.version.index_format_version > INDEX_FORMAT_VERSION
|
||||
{
|
||||
return Err(Incompatibility::IndexMismatch {
|
||||
library_version: library_version.clone(),
|
||||
index_version: self.version.clone(),
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, BufWriter, Read, Write};
|
||||
use std::io::{self, BufWriter, Read, Seek, Write};
|
||||
use std::ops::Deref;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use common::StableDeref;
|
||||
use fs4::FileExt;
|
||||
#[cfg(all(feature = "mmap", unix))]
|
||||
pub use memmap2::Advice;
|
||||
use memmap2::Mmap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tempfile::TempDir;
|
||||
@@ -23,6 +21,8 @@ use crate::directory::{
|
||||
AntiCallToken, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes, TerminatingWrite,
|
||||
WatchCallback, WatchHandle, WritePtr,
|
||||
};
|
||||
#[cfg(unix)]
|
||||
use crate::Advice;
|
||||
|
||||
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
@@ -328,6 +328,12 @@ impl Write for SafeFileWriter {
|
||||
}
|
||||
}
|
||||
|
||||
impl Seek for SafeFileWriter {
|
||||
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
|
||||
self.0.seek(pos)
|
||||
}
|
||||
}
|
||||
|
||||
impl TerminatingWrite for SafeFileWriter {
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||
self.0.flush()?;
|
||||
@@ -479,7 +485,6 @@ impl Directory for MmapDirectory {
|
||||
let file: File = OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true) //< if the file does not exist yet, create it.
|
||||
.truncate(false)
|
||||
.open(full_path)
|
||||
.map_err(LockError::wrap_io_error)?;
|
||||
if lock.is_blocking {
|
||||
@@ -534,7 +539,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::indexer::LogMergePolicy;
|
||||
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
||||
use crate::{Index, IndexSettings, IndexWriter, ReloadPolicy};
|
||||
use crate::{Index, IndexSettings, ReloadPolicy};
|
||||
|
||||
#[test]
|
||||
fn test_open_non_existent_path() {
|
||||
@@ -646,7 +651,7 @@ mod tests {
|
||||
let index =
|
||||
Index::create(mmap_directory.clone(), schema, IndexSettings::default()).unwrap();
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut log_merge_policy = LogMergePolicy::default();
|
||||
log_merge_policy.set_min_num_segments(3);
|
||||
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
||||
@@ -674,7 +679,7 @@ mod tests {
|
||||
let num_segments = reader.searcher().segment_readers().len();
|
||||
assert!(num_segments <= 4);
|
||||
let num_components_except_deletes_and_tempstore =
|
||||
crate::index::SegmentComponent::iterator().len() - 2;
|
||||
crate::core::SegmentComponent::iterator().len() - 2;
|
||||
let max_num_mmapped = num_components_except_deletes_and_tempstore * num_segments;
|
||||
assert_eventually(|| {
|
||||
let num_mmapped = mmap_directory.get_cache_info().mmapped.len();
|
||||
|
||||
@@ -42,9 +42,6 @@ pub struct GarbageCollectionResult {
|
||||
pub failed_to_delete_files: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "mmap", unix))]
|
||||
pub use memmap2::Advice;
|
||||
|
||||
pub use self::managed_directory::ManagedDirectory;
|
||||
#[cfg(feature = "mmap")]
|
||||
pub use self::mmap_directory::MmapDirectory;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io::{self, BufWriter, Cursor, Write};
|
||||
use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{fmt, result};
|
||||
@@ -48,6 +48,12 @@ impl Drop for VecWriter {
|
||||
}
|
||||
}
|
||||
|
||||
impl Seek for VecWriter {
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
self.data.seek(pos)
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for VecWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.is_flushed = false;
|
||||
@@ -85,7 +91,7 @@ impl InnerDirectory {
|
||||
self.fs
|
||||
.get(path)
|
||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
||||
.cloned()
|
||||
.map(Clone::clone)
|
||||
}
|
||||
|
||||
fn delete(&mut self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
|
||||
@@ -17,7 +17,7 @@ pub trait DocSet: Send {
|
||||
///
|
||||
/// The DocId of the next element is returned.
|
||||
/// In other words we should always have :
|
||||
/// ```compile_fail
|
||||
/// ```ignore
|
||||
/// let doc = docset.advance();
|
||||
/// assert_eq!(doc, docset.doc());
|
||||
/// ```
|
||||
|
||||
10
src/error.rs
10
src/error.rs
@@ -11,7 +11,6 @@ use crate::directory::error::{
|
||||
Incompatibility, LockError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
||||
};
|
||||
use crate::fastfield::FastFieldNotAvailableError;
|
||||
use crate::schema::document::DeserializeError;
|
||||
use crate::{query, schema};
|
||||
|
||||
/// Represents a `DataCorruption` error.
|
||||
@@ -107,9 +106,6 @@ pub enum TantivyError {
|
||||
/// e.g. a datastructure is incorrectly inititalized.
|
||||
#[error("Internal error: '{0}'")]
|
||||
InternalError(String),
|
||||
#[error("Deserialize error: {0}")]
|
||||
/// An error occurred while attempting to deserialize a document.
|
||||
DeserializeError(DeserializeError),
|
||||
}
|
||||
|
||||
impl From<io::Error> for TantivyError {
|
||||
@@ -180,9 +176,3 @@ impl From<rayon::ThreadPoolBuildError> for TantivyError {
|
||||
TantivyError::SystemError(error.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DeserializeError> for TantivyError {
|
||||
fn from(error: DeserializeError) -> TantivyError {
|
||||
TantivyError::DeserializeError(error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,9 +62,8 @@ impl FacetReader {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::schema::document::Value;
|
||||
use crate::schema::{Facet, FacetOptions, SchemaBuilder, STORED};
|
||||
use crate::{DocAddress, Index, IndexWriter, TantivyDocument};
|
||||
use crate::schema::{Facet, FacetOptions, SchemaBuilder, Value, STORED};
|
||||
use crate::{DocAddress, Document, Index};
|
||||
|
||||
#[test]
|
||||
fn test_facet_only_indexed() {
|
||||
@@ -72,7 +71,7 @@ mod tests {
|
||||
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(facet_field=>Facet::from_text("/a/b").unwrap()))
|
||||
.unwrap();
|
||||
@@ -86,10 +85,8 @@ mod tests {
|
||||
let mut facet = Facet::default();
|
||||
facet_reader.facet_from_ord(0, &mut facet).unwrap();
|
||||
assert_eq!(facet.to_path_string(), "/a/b");
|
||||
let doc = searcher
|
||||
.doc::<TantivyDocument>(DocAddress::new(0u32, 0u32))
|
||||
.unwrap();
|
||||
let value = doc.get_first(facet_field).and_then(|v| v.as_facet());
|
||||
let doc = searcher.doc(DocAddress::new(0u32, 0u32)).unwrap();
|
||||
let value = doc.get_first(facet_field).and_then(Value::as_facet);
|
||||
assert_eq!(value, None);
|
||||
}
|
||||
|
||||
@@ -99,7 +96,7 @@ mod tests {
|
||||
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(facet_field=>Facet::from_text("/parent/child1").unwrap()))
|
||||
.unwrap();
|
||||
@@ -145,8 +142,8 @@ mod tests {
|
||||
let mut facet_ords = Vec::new();
|
||||
facet_ords.extend(facet_reader.facet_ords(0u32));
|
||||
assert_eq!(&facet_ords, &[0u64]);
|
||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0u32, 0u32))?;
|
||||
let value: Option<&Facet> = doc.get_first(facet_field).and_then(|v| v.as_facet());
|
||||
let doc = searcher.doc(DocAddress::new(0u32, 0u32))?;
|
||||
let value: Option<&Facet> = doc.get_first(facet_field).and_then(Value::as_facet);
|
||||
assert_eq!(value, Facet::from_text("/a/b").ok().as_ref());
|
||||
Ok(())
|
||||
}
|
||||
@@ -159,7 +156,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b").unwrap()))?;
|
||||
index_writer.add_document(TantivyDocument::default())?;
|
||||
index_writer.add_document(Document::default())?;
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let facet_reader = searcher.segment_reader(0u32).facet_reader("facet").unwrap();
|
||||
@@ -179,8 +176,8 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(TantivyDocument::default())?;
|
||||
index_writer.add_document(TantivyDocument::default())?;
|
||||
index_writer.add_document(Document::default())?;
|
||||
index_writer.add_document(Document::default())?;
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let facet_reader = searcher.segment_reader(0u32).facet_reader("facet").unwrap();
|
||||
|
||||
@@ -90,12 +90,12 @@ mod tests {
|
||||
use crate::directory::{Directory, RamDirectory, WritePtr};
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::schema::{
|
||||
Facet, FacetOptions, Field, JsonObjectOptions, Schema, SchemaBuilder, TantivyDocument,
|
||||
Document, Facet, FacetOptions, Field, JsonObjectOptions, Schema, SchemaBuilder,
|
||||
TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
|
||||
};
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::tokenizer::{LowerCaser, RawTokenizer, TextAnalyzer, TokenizerManager};
|
||||
use crate::{DateOptions, DateTimePrecision, Index, IndexWriter, SegmentId, SegmentReader};
|
||||
use crate::{DateOptions, DateTimePrecision, Index, SegmentId, SegmentReader};
|
||||
|
||||
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -131,7 +131,7 @@ mod tests {
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
|
||||
assert_eq!(file.len(), 80);
|
||||
assert_eq!(file.len(), 93);
|
||||
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
|
||||
let column = fast_field_readers
|
||||
.u64("field")
|
||||
@@ -181,7 +181,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 108);
|
||||
assert_eq!(file.len(), 121);
|
||||
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
|
||||
let col = fast_field_readers
|
||||
.u64("field")
|
||||
@@ -214,7 +214,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 81);
|
||||
assert_eq!(file.len(), 94);
|
||||
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
|
||||
let fast_field_reader = fast_field_readers
|
||||
.u64("field")
|
||||
@@ -246,7 +246,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 4476);
|
||||
assert_eq!(file.len(), 4489);
|
||||
{
|
||||
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
|
||||
let col = fast_field_readers
|
||||
@@ -271,7 +271,7 @@ mod tests {
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
|
||||
for i in -100i64..10_000i64 {
|
||||
let mut doc = TantivyDocument::default();
|
||||
let mut doc = Document::default();
|
||||
doc.add_i64(i64_field, i);
|
||||
fast_field_writers.add_document(&doc).unwrap();
|
||||
}
|
||||
@@ -279,7 +279,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 252);
|
||||
assert_eq!(file.len(), 265);
|
||||
|
||||
{
|
||||
let fast_field_readers = FastFieldReaders::open(file, schema).unwrap();
|
||||
@@ -312,7 +312,7 @@ mod tests {
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
|
||||
let doc = TantivyDocument::default();
|
||||
let doc = Document::default();
|
||||
fast_field_writers.add_document(&doc).unwrap();
|
||||
fast_field_writers.serialize(&mut write, None).unwrap();
|
||||
write.terminate().unwrap();
|
||||
@@ -345,7 +345,7 @@ mod tests {
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
|
||||
let doc = TantivyDocument::default();
|
||||
let doc = Document::default();
|
||||
fast_field_writers.add_document(&doc).unwrap();
|
||||
fast_field_writers.serialize(&mut write, None).unwrap();
|
||||
write.terminate().unwrap();
|
||||
@@ -416,7 +416,7 @@ mod tests {
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer
|
||||
.add_document(doc!(date_field => DateTime::from_utc(OffsetDateTime::now_utc())))
|
||||
@@ -452,7 +452,7 @@ mod tests {
|
||||
|
||||
{
|
||||
// first segment
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
@@ -506,7 +506,7 @@ mod tests {
|
||||
|
||||
{
|
||||
// second segment
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
@@ -537,7 +537,7 @@ mod tests {
|
||||
// Merging the segments
|
||||
{
|
||||
let segment_ids = index.searchable_segment_ids().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer.merge(&segment_ids).wait().unwrap();
|
||||
index_writer.wait_merging_threads().unwrap();
|
||||
}
|
||||
@@ -662,7 +662,7 @@ mod tests {
|
||||
// Merging the segments
|
||||
{
|
||||
let segment_ids = index.searchable_segment_ids()?;
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
@@ -773,7 +773,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 84);
|
||||
assert_eq!(file.len(), 102);
|
||||
let fast_field_readers = FastFieldReaders::open(file, schema).unwrap();
|
||||
let bool_col = fast_field_readers.bool("field_bool").unwrap();
|
||||
assert_eq!(bool_col.first(0), Some(true));
|
||||
@@ -805,7 +805,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 96);
|
||||
assert_eq!(file.len(), 114);
|
||||
let readers = FastFieldReaders::open(file, schema).unwrap();
|
||||
let bool_col = readers.bool("field_bool").unwrap();
|
||||
for i in 0..25 {
|
||||
@@ -824,13 +824,13 @@ mod tests {
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(path).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
|
||||
let doc = TantivyDocument::default();
|
||||
let doc = Document::default();
|
||||
fast_field_writers.add_document(&doc).unwrap();
|
||||
fast_field_writers.serialize(&mut write, None).unwrap();
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 86);
|
||||
assert_eq!(file.len(), 104);
|
||||
let fastfield_readers = FastFieldReaders::open(file, schema).unwrap();
|
||||
let col = fastfield_readers.bool("field_bool").unwrap();
|
||||
assert_eq!(col.first(0), None);
|
||||
@@ -846,7 +846,7 @@ mod tests {
|
||||
assert_eq!(col.get_val(0), true);
|
||||
}
|
||||
|
||||
fn get_index(docs: &[crate::TantivyDocument], schema: &Schema) -> crate::Result<RamDirectory> {
|
||||
fn get_index(docs: &[crate::Document], schema: &Schema) -> crate::Result<RamDirectory> {
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
@@ -888,7 +888,7 @@ mod tests {
|
||||
let field = schema_builder.add_date_field("field", date_options);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let docs: Vec<TantivyDocument> = times.iter().map(|time| doc!(field=>*time)).collect();
|
||||
let docs: Vec<Document> = times.iter().map(|time| doc!(field=>*time)).collect();
|
||||
|
||||
let directory = get_index(&docs[..], &schema).unwrap();
|
||||
let path = Path::new("test");
|
||||
@@ -962,15 +962,11 @@ mod tests {
|
||||
let ip_field = schema_builder.add_u64_field("ip", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let ip_addr = Ipv6Addr::new(1, 2, 3, 4, 5, 1, 2, 3);
|
||||
index_writer
|
||||
.add_document(TantivyDocument::default())
|
||||
.unwrap();
|
||||
index_writer.add_document(Document::default()).unwrap();
|
||||
index_writer.add_document(doc!(ip_field=>ip_addr)).unwrap();
|
||||
index_writer
|
||||
.add_document(TantivyDocument::default())
|
||||
.unwrap();
|
||||
index_writer.add_document(Document::default()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let fastfields = searcher.segment_reader(0u32).fast_fields();
|
||||
@@ -1090,7 +1086,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", json_option);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"attr.age": 32})))
|
||||
.unwrap();
|
||||
@@ -1116,7 +1112,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", json_option);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"age": 32})))
|
||||
.unwrap();
|
||||
@@ -1143,7 +1139,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", json_option);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"attr.age": 32})))
|
||||
.unwrap();
|
||||
@@ -1166,7 +1162,7 @@ mod tests {
|
||||
let field_with_dot = schema_builder.add_i64_field("field.with.dot", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(field_with_dot => 32i64))
|
||||
.unwrap();
|
||||
@@ -1188,7 +1184,7 @@ mod tests {
|
||||
let shadowing_json_field = schema_builder.add_json_field("jsonfield.attr", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json_field=> json!({"attr": {"age": 32}}), shadowing_json_field=>json!({"age": 33})))
|
||||
.unwrap();
|
||||
@@ -1219,7 +1215,7 @@ mod tests {
|
||||
|
||||
let mut index = Index::create_in_ram(schema);
|
||||
index.set_fast_field_tokenizers(ff_tokenizer_manager);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(text_field => "Test1 test2"))
|
||||
.unwrap();
|
||||
@@ -1248,7 +1244,7 @@ mod tests {
|
||||
let log_field = schema_builder.add_text_field("log_level", text_fieldtype);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(log_field => "info"))
|
||||
.unwrap();
|
||||
@@ -1281,25 +1277,18 @@ mod tests {
|
||||
let shadowing_json_field = schema_builder.add_json_field("jsonfield.attr", json_option);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json_field=> json!({"attr.age": 32}), shadowing_json_field=>json!({"age": 33})))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let fast_field_reader = searcher.segment_reader(0u32).fast_fields();
|
||||
// Supported for now, maybe dropped in the future.
|
||||
let column = fast_field_reader
|
||||
.column_opt::<i64>("jsonfield.attr.age")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let vals: Vec<i64> = column.values_for_doc(0u32).collect();
|
||||
assert_eq!(&vals, &[33]);
|
||||
let column = fast_field_reader
|
||||
.column_opt::<i64>("jsonfield\\.attr.age")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let vals: Vec<i64> = column.values_for_doc(0u32).collect();
|
||||
assert_eq!(&vals, &[33]);
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user