mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-04 08:12:54 +00:00
Compare commits
1 Commits
trinity/yo
...
columnread
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
85ebb3c420 |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
cpp/* linguist-vendored
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -9,6 +9,7 @@ target/release
|
|||||||
Cargo.lock
|
Cargo.lock
|
||||||
benchmark
|
benchmark
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
cpp/simdcomp/bitpackingbenchmark
|
||||||
*.bk
|
*.bk
|
||||||
.idea
|
.idea
|
||||||
trace.dat
|
trace.dat
|
||||||
|
|||||||
30
CHANGELOG.md
30
CHANGELOG.md
@@ -1,32 +1,10 @@
|
|||||||
Tantivy 0.19
|
Tantivy 0.19
|
||||||
================================
|
================================
|
||||||
|
|
||||||
- Limit fast fields to u32 (`get_val(u32)`) [#1644](https://github.com/quickwit-oss/tantivy/pull/1644) (@PSeitz)
|
|
||||||
- Major bugfix: Fix missing fieldnorms for u64, i64, f64, bool, bytes and date [#1620](https://github.com/quickwit-oss/tantivy/pull/1620) (@PSeitz)
|
|
||||||
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
|
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
|
||||||
The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
||||||
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing). (@evanxg852000)
|
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing).
|
||||||
- Add IP address field type [#1553](https://github.com/quickwit-oss/tantivy/pull/1553) (@PSeitz)
|
- Remove Searcher pool and make `Searcher` cloneable.
|
||||||
- Add boolean field type [#1382](https://github.com/quickwit-oss/tantivy/pull/1382) (@boraarslan)
|
|
||||||
- Remove Searcher pool and make `Searcher` cloneable. (@PSeitz)
|
|
||||||
- Validate settings on create [#1570](https://github.com/quickwit-oss/tantivy/pull/1570 (@PSeitz)
|
|
||||||
- Fix interpolation overflow in linear interpolation fastfield codec [#1480](https://github.com/quickwit-oss/tantivy/pull/1480 (@PSeitz @fulmicoton)
|
|
||||||
- Detect and apply gcd on fastfield codecs [#1418](https://github.com/quickwit-oss/tantivy/pull/1418) (@PSeitz)
|
|
||||||
- Doc store
|
|
||||||
- use separate thread to compress block store [#1389](https://github.com/quickwit-oss/tantivy/pull/1389) [#1510](https://github.com/quickwit-oss/tantivy/pull/1510 (@PSeitz @fulmicoton)
|
|
||||||
- Expose doc store cache size [#1403](https://github.com/quickwit-oss/tantivy/pull/1403) (@PSeitz)
|
|
||||||
- Enable compression levels for doc store [#1378](https://github.com/quickwit-oss/tantivy/pull/1378) (@PSeitz)
|
|
||||||
- Make block size configurable [#1374](https://github.com/quickwit-oss/tantivy/pull/1374) (@kryesh)
|
|
||||||
- Make `tantivy::TantivyError` cloneable [#1402](https://github.com/quickwit-oss/tantivy/pull/1402) (@PSeitz)
|
|
||||||
- Add support for phrase slop in query language [#1393](https://github.com/quickwit-oss/tantivy/pull/1393) (@saroh)
|
|
||||||
- Aggregation
|
|
||||||
- Add support for keyed parameter in range and histgram aggregations [#1424](https://github.com/quickwit-oss/tantivy/pull/1424) (@k-yomo)
|
|
||||||
- Add aggregation bucket limit [#1363](https://github.com/quickwit-oss/tantivy/pull/1363) (@PSeitz)
|
|
||||||
- Faster indexing
|
|
||||||
- [#1610](https://github.com/quickwit-oss/tantivy/pull/1610 (@PSeitz)
|
|
||||||
- [#1594](https://github.com/quickwit-oss/tantivy/pull/1594 (@PSeitz)
|
|
||||||
- [#1582](https://github.com/quickwit-oss/tantivy/pull/1582 (@PSeitz)
|
|
||||||
- [#1611](https://github.com/quickwit-oss/tantivy/pull/1611 (@PSeitz)
|
|
||||||
|
|
||||||
Tantivy 0.18
|
Tantivy 0.18
|
||||||
================================
|
================================
|
||||||
@@ -44,10 +22,6 @@ Tantivy 0.18
|
|||||||
- Add terms aggregation (@PSeitz)
|
- Add terms aggregation (@PSeitz)
|
||||||
- Add support for zstd compression (@kryesh)
|
- Add support for zstd compression (@kryesh)
|
||||||
|
|
||||||
Tantivy 0.18.1
|
|
||||||
================================
|
|
||||||
- Hotfix: positions computation. #1629 (@fmassot, @fulmicoton, @PSeitz)
|
|
||||||
|
|
||||||
Tantivy 0.17
|
Tantivy 0.17
|
||||||
================================
|
================================
|
||||||
|
|
||||||
|
|||||||
17
Cargo.toml
17
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy"
|
name = "tantivy"
|
||||||
version = "0.19.0-dev"
|
version = "0.18.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
@@ -11,21 +11,19 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
|||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
keywords = ["search", "information", "retrieval"]
|
keywords = ["search", "information", "retrieval"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.62"
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
oneshot = "0.1.5"
|
oneshot = "0.1.3"
|
||||||
base64 = "0.13.0"
|
base64 = "0.13.0"
|
||||||
byteorder = "1.4.3"
|
byteorder = "1.4.3"
|
||||||
crc32fast = "1.3.2"
|
crc32fast = "1.3.2"
|
||||||
once_cell = "1.10.0"
|
once_cell = "1.10.0"
|
||||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||||
aho-corasick = "0.7"
|
tantivy-fst = "0.3.0"
|
||||||
tantivy-fst = "0.4.0"
|
|
||||||
memmap2 = { version = "0.5.3", optional = true }
|
memmap2 = { version = "0.5.3", optional = true }
|
||||||
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
||||||
brotli = { version = "3.3.4", optional = true }
|
brotli = { version = "3.3.4", optional = true }
|
||||||
zstd = { version = "0.11", optional = true, default-features = false }
|
zstd = { version = "0.11", optional = true }
|
||||||
snap = { version = "1.0.5", optional = true }
|
snap = { version = "1.0.5", optional = true }
|
||||||
tempfile = { version = "3.3.0", optional = true }
|
tempfile = { version = "3.3.0", optional = true }
|
||||||
log = "0.4.16"
|
log = "0.4.16"
|
||||||
@@ -46,7 +44,7 @@ rust-stemmers = "1.2.0"
|
|||||||
downcast-rs = "1.2.0"
|
downcast-rs = "1.2.0"
|
||||||
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
||||||
census = "0.4.0"
|
census = "0.4.0"
|
||||||
rustc-hash = "1.1.0"
|
fnv = "1.0.7"
|
||||||
thiserror = "1.0.30"
|
thiserror = "1.0.30"
|
||||||
htmlescape = "0.3.1"
|
htmlescape = "0.3.1"
|
||||||
fail = "0.5.0"
|
fail = "0.5.0"
|
||||||
@@ -58,10 +56,9 @@ lru = "0.7.5"
|
|||||||
fastdivide = "0.4.0"
|
fastdivide = "0.4.0"
|
||||||
itertools = "0.10.3"
|
itertools = "0.10.3"
|
||||||
measure_time = "0.8.2"
|
measure_time = "0.8.2"
|
||||||
ciborium = { version = "0.2", optional = true}
|
serde_cbor = { version = "0.11.2", optional = true }
|
||||||
async-trait = "0.1.53"
|
async-trait = "0.1.53"
|
||||||
arc-swap = "1.5.0"
|
arc-swap = "1.5.0"
|
||||||
yoke = { version = "0.6.2", features = ["derive"] }
|
|
||||||
|
|
||||||
[target.'cfg(windows)'.dependencies]
|
[target.'cfg(windows)'.dependencies]
|
||||||
winapi = "0.3.9"
|
winapi = "0.3.9"
|
||||||
@@ -103,7 +100,7 @@ zstd-compression = ["zstd"]
|
|||||||
failpoints = ["fail/failpoints"]
|
failpoints = ["fail/failpoints"]
|
||||||
unstable = [] # useful for benches.
|
unstable = [] # useful for benches.
|
||||||
|
|
||||||
quickwit = ["ciborium"]
|
quickwit = ["serde_cbor"]
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
|
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ Distributed search is out of the scope of Tantivy, but if you are looking for th
|
|||||||
|
|
||||||
# Getting started
|
# Getting started
|
||||||
|
|
||||||
Tantivy works on stable Rust and supports Linux, macOS, and Windows.
|
Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows.
|
||||||
|
|
||||||
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
||||||
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
|
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
|
||||||
@@ -81,13 +81,9 @@ There are many ways to support this project.
|
|||||||
|
|
||||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||||
|
|
||||||
## Minimum supported Rust version
|
|
||||||
|
|
||||||
Tantivy currently requires at least Rust 1.62 or later to compile.
|
|
||||||
|
|
||||||
## Clone and build locally
|
## Clone and build locally
|
||||||
|
|
||||||
Tantivy compiles on stable Rust.
|
Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
|
||||||
To check out and run tests, you can simply run:
|
To check out and run tests, you can simply run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|||||||
100000
benches/hdfs_with_array.json
100000
benches/hdfs_with_array.json
File diff suppressed because it is too large
Load Diff
@@ -1,159 +1,116 @@
|
|||||||
use criterion::{criterion_group, criterion_main, Criterion};
|
use criterion::{criterion_group, criterion_main, Criterion};
|
||||||
use itertools::Itertools;
|
|
||||||
use pprof::criterion::{Output, PProfProfiler};
|
use pprof::criterion::{Output, PProfProfiler};
|
||||||
use serde_json::{self, Value as JsonValue};
|
use tantivy::schema::{INDEXED, STORED, STRING, TEXT};
|
||||||
use tantivy::directory::RamDirectory;
|
use tantivy::Index;
|
||||||
use tantivy::schema::{
|
|
||||||
FieldValue, TextFieldIndexing, TextOptions, Value, INDEXED, STORED, STRING, TEXT,
|
|
||||||
};
|
|
||||||
use tantivy::{Document, Index, IndexBuilder};
|
|
||||||
|
|
||||||
const HDFS_LOGS: &str = include_str!("hdfs.json");
|
const HDFS_LOGS: &str = include_str!("hdfs.json");
|
||||||
const NUM_REPEATS: usize = 20;
|
const NUM_REPEATS: usize = 2;
|
||||||
|
|
||||||
pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
let schema = {
|
||||||
let text_indexing_options = TextFieldIndexing::default()
|
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||||
.set_tokenizer("default")
|
schema_builder.add_u64_field("timestamp", INDEXED);
|
||||||
.set_fieldnorms(false)
|
schema_builder.add_text_field("body", TEXT);
|
||||||
.set_index_option(tantivy::schema::IndexRecordOption::WithFreqsAndPositions);
|
schema_builder.add_text_field("severity", STRING);
|
||||||
let mut text_options = TextOptions::default().set_indexing_options(text_indexing_options);
|
schema_builder.build()
|
||||||
let text_field = schema_builder.add_text_field("body", text_options);
|
};
|
||||||
let schema = schema_builder.build();
|
let schema_with_store = {
|
||||||
|
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||||
// prepare doc
|
schema_builder.add_u64_field("timestamp", INDEXED | STORED);
|
||||||
let mut documents_no_array = Vec::new();
|
schema_builder.add_text_field("body", TEXT | STORED);
|
||||||
let mut documents_with_array = Vec::new();
|
schema_builder.add_text_field("severity", STRING | STORED);
|
||||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
schema_builder.build()
|
||||||
let json_obj: serde_json::Map<String, JsonValue> = serde_json::from_str(doc_json).unwrap();
|
};
|
||||||
let text = json_obj.get("body").unwrap().as_str().unwrap();
|
let dynamic_schema = {
|
||||||
let mut doc_no_array = Document::new();
|
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||||
doc_no_array.add_text(text_field, text);
|
schema_builder.add_json_field("json", TEXT);
|
||||||
documents_no_array.push(doc_no_array);
|
schema_builder.build()
|
||||||
let mut doc_with_array = Document::new();
|
};
|
||||||
doc_with_array.add_borrowed_values(text.to_owned(), |text| {
|
|
||||||
text.split(' ')
|
|
||||||
.map(|text| FieldValue::new(text_field, text.into()))
|
|
||||||
.collect()
|
|
||||||
});
|
|
||||||
documents_with_array.push(doc_with_array);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut group = c.benchmark_group("index-hdfs");
|
let mut group = c.benchmark_group("index-hdfs");
|
||||||
group.sample_size(20);
|
group.sample_size(20);
|
||||||
group.bench_function("index-hdfs-no-commit", |b| {
|
group.bench_function("index-hdfs-no-commit", |b| {
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let ram_directory = RamDirectory::create();
|
let index = Index::create_in_ram(schema.clone());
|
||||||
let mut index_writer = IndexBuilder::new()
|
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
.schema(schema.clone())
|
|
||||||
.single_segment_index_writer(ram_directory, 100_000_000)
|
|
||||||
.unwrap();
|
|
||||||
for _ in 0..NUM_REPEATS {
|
for _ in 0..NUM_REPEATS {
|
||||||
let documents_cloned = documents_no_array.clone();
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
for doc in documents_cloned {
|
let doc = schema.parse_document(doc_json).unwrap();
|
||||||
index_writer.add_document(doc).unwrap();
|
index_writer.add_document(doc).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
group.bench_function("index-hdfs-with-array-no-commit", |b| {
|
group.bench_function("index-hdfs-with-commit", |b| {
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let ram_directory = RamDirectory::create();
|
let index = Index::create_in_ram(schema.clone());
|
||||||
let mut index_writer = IndexBuilder::new()
|
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
.schema(schema.clone())
|
|
||||||
.single_segment_index_writer(ram_directory, 100_000_000)
|
|
||||||
.unwrap();
|
|
||||||
for _ in 0..NUM_REPEATS {
|
for _ in 0..NUM_REPEATS {
|
||||||
let documents_with_array_cloned = documents_with_array.clone();
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
for doc in documents_with_array_cloned {
|
let doc = schema.parse_document(doc_json).unwrap();
|
||||||
|
index_writer.add_document(doc).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
})
|
||||||
|
});
|
||||||
|
group.bench_function("index-hdfs-no-commit-with-docstore", |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
let index = Index::create_in_ram(schema_with_store.clone());
|
||||||
|
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
|
for _ in 0..NUM_REPEATS {
|
||||||
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
|
let doc = schema.parse_document(doc_json).unwrap();
|
||||||
index_writer.add_document(doc).unwrap();
|
index_writer.add_document(doc).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
// group.bench_function("index-hdfs-with-commit", |b| {
|
group.bench_function("index-hdfs-with-commit-with-docstore", |b| {
|
||||||
// b.iter(|| {
|
b.iter(|| {
|
||||||
// let ram_directory = RamDirectory::create();
|
let index = Index::create_in_ram(schema_with_store.clone());
|
||||||
// let mut index_writer = IndexBuilder::new()
|
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
// .schema(schema.clone())
|
for _ in 0..NUM_REPEATS {
|
||||||
// .single_segment_index_writer(ram_directory, 100_000_000)
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
// .unwrap();
|
let doc = schema.parse_document(doc_json).unwrap();
|
||||||
// for _ in 0..NUM_REPEATS {
|
index_writer.add_document(doc).unwrap();
|
||||||
// for doc_json in HDFS_LOGS.trim().split("\n") {
|
}
|
||||||
// let doc = schema.parse_document(doc_json).unwrap();
|
}
|
||||||
// index_writer.add_document(doc).unwrap();
|
index_writer.commit().unwrap();
|
||||||
// }
|
})
|
||||||
// }
|
});
|
||||||
// index_writer.commit().unwrap();
|
group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
|
||||||
// })
|
b.iter(|| {
|
||||||
// });
|
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||||
// group.bench_function("index-hdfs-no-commit-with-docstore", |b| {
|
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||||
// b.iter(|| {
|
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
// let ram_directory = RamDirectory::create();
|
for _ in 0..NUM_REPEATS {
|
||||||
// let mut index_writer = IndexBuilder::new()
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
// .schema(schema.clone())
|
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||||
// .single_segment_index_writer(ram_directory, 100_000_000)
|
serde_json::from_str(doc_json).unwrap();
|
||||||
// .unwrap();
|
let doc = tantivy::doc!(json_field=>json_val);
|
||||||
// for _ in 0..NUM_REPEATS {
|
index_writer.add_document(doc).unwrap();
|
||||||
// for doc_json in HDFS_LOGS.trim().split("\n") {
|
}
|
||||||
// let doc = schema.parse_document(doc_json).unwrap();
|
}
|
||||||
// index_writer.add_document(doc).unwrap();
|
index_writer.commit().unwrap();
|
||||||
// }
|
})
|
||||||
// }
|
});
|
||||||
// })
|
group.bench_function("index-hdfs-with-commit-json-without-docstore", |b| {
|
||||||
// });
|
b.iter(|| {
|
||||||
// group.bench_function("index-hdfs-with-commit-with-docstore", |b| {
|
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||||
// b.iter(|| {
|
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||||
// let ram_directory = RamDirectory::create();
|
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
// let mut index_writer = IndexBuilder::new()
|
for _ in 0..NUM_REPEATS {
|
||||||
// .schema(schema.clone())
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
// .single_segment_index_writer(ram_directory, 100_000_000)
|
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||||
// .unwrap();
|
serde_json::from_str(doc_json).unwrap();
|
||||||
// for _ in 0..NUM_REPEATS {
|
let doc = tantivy::doc!(json_field=>json_val);
|
||||||
// for doc_json in HDFS_LOGS.trim().split("\n") {
|
index_writer.add_document(doc).unwrap();
|
||||||
// let doc = schema.parse_document(doc_json).unwrap();
|
}
|
||||||
// index_writer.add_document(doc).unwrap();
|
}
|
||||||
// }
|
index_writer.commit().unwrap();
|
||||||
// }
|
})
|
||||||
// index_writer.commit().unwrap();
|
});
|
||||||
// })
|
|
||||||
// });
|
|
||||||
// group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
|
|
||||||
// b.iter(|| {
|
|
||||||
// let ram_directory = RamDirectory::create();
|
|
||||||
// let mut index_writer = IndexBuilder::new()
|
|
||||||
// .schema(schema.clone())
|
|
||||||
// .single_segment_index_writer(ram_directory, 100_000_000)
|
|
||||||
// .unwrap();
|
|
||||||
// for _ in 0..NUM_REPEATS {
|
|
||||||
// for doc_json in HDFS_LOGS.trim().split("\n") {
|
|
||||||
// let json_val: serde_json::Map<String, serde_json::Value> =
|
|
||||||
// serde_json::from_str(doc_json).unwrap();
|
|
||||||
// let doc = tantivy::doc!(json_field=>json_val);
|
|
||||||
// index_writer.add_document(doc).unwrap();
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// index_writer.commit().unwrap();
|
|
||||||
// })
|
|
||||||
// });
|
|
||||||
// group.bench_function("index-hdfs-with-commit-json-without-docstore", |b| {
|
|
||||||
// b.iter(|| {
|
|
||||||
// let ram_directory = RamDirectory::create();
|
|
||||||
// let mut index_writer = IndexBuilder::new()
|
|
||||||
// .schema(schema.clone())
|
|
||||||
// .single_segment_index_writer(ram_directory, 100_000_000)
|
|
||||||
// .unwrap();
|
|
||||||
// for _ in 0..NUM_REPEATS {
|
|
||||||
// for doc_json in HDFS_LOGS.trim().split("\n") {
|
|
||||||
// let json_val: serde_json::Map<String, serde_json::Value> =
|
|
||||||
// serde_json::from_str(doc_json).unwrap();
|
|
||||||
// let doc = tantivy::doc!(json_field=>json_val);
|
|
||||||
// index_writer.add_document(doc).unwrap();
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// index_writer.commit().unwrap();
|
|
||||||
// })
|
|
||||||
//});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
criterion_group! {
|
criterion_group! {
|
||||||
|
|||||||
@@ -87,15 +87,15 @@ impl BitUnpacker {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn get(&self, idx: u32, data: &[u8]) -> u64 {
|
pub fn get(&self, idx: u64, data: &[u8]) -> u64 {
|
||||||
if self.num_bits == 0 {
|
if self.num_bits == 0 {
|
||||||
return 0u64;
|
return 0u64;
|
||||||
}
|
}
|
||||||
let addr_in_bits = idx * self.num_bits as u32;
|
let addr_in_bits = idx * self.num_bits;
|
||||||
let addr = addr_in_bits >> 3;
|
let addr = addr_in_bits >> 3;
|
||||||
let bit_shift = addr_in_bits & 7;
|
let bit_shift = addr_in_bits & 7;
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
addr + 8 <= data.len() as u32,
|
addr + 8 <= data.len() as u64,
|
||||||
"The fast field field should have been padded with 7 bytes."
|
"The fast field field should have been padded with 7 bytes."
|
||||||
);
|
);
|
||||||
let bytes: [u8; 8] = (&data[(addr as usize)..(addr as usize) + 8])
|
let bytes: [u8; 8] = (&data[(addr as usize)..(addr as usize) + 8])
|
||||||
@@ -130,7 +130,7 @@ mod test {
|
|||||||
fn test_bitpacker_util(len: usize, num_bits: u8) {
|
fn test_bitpacker_util(len: usize, num_bits: u8) {
|
||||||
let (bitunpacker, vals, data) = create_fastfield_bitpacker(len, num_bits);
|
let (bitunpacker, vals, data) = create_fastfield_bitpacker(len, num_bits);
|
||||||
for (i, val) in vals.iter().enumerate() {
|
for (i, val) in vals.iter().enumerate() {
|
||||||
assert_eq!(bitunpacker.get(i as u32, &data), *val);
|
assert_eq!(bitunpacker.get(i as u64, &data), *val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ impl BlockedBitpacker {
|
|||||||
let pos_in_block = idx % BLOCK_SIZE as usize;
|
let pos_in_block = idx % BLOCK_SIZE as usize;
|
||||||
if let Some(metadata) = self.offset_and_bits.get(metadata_pos) {
|
if let Some(metadata) = self.offset_and_bits.get(metadata_pos) {
|
||||||
let unpacked = BitUnpacker::new(metadata.num_bits()).get(
|
let unpacked = BitUnpacker::new(metadata.num_bits()).get(
|
||||||
pos_in_block as u32,
|
pos_in_block as u64,
|
||||||
&self.compressed_blocks[metadata.offset() as usize..],
|
&self.compressed_blocks[metadata.offset() as usize..],
|
||||||
);
|
);
|
||||||
unpacked + metadata.base_value()
|
unpacked + metadata.base_value()
|
||||||
|
|||||||
@@ -259,7 +259,11 @@ impl BitSet {
|
|||||||
// we do not check saturated els.
|
// we do not check saturated els.
|
||||||
let higher = el / 64u32;
|
let higher = el / 64u32;
|
||||||
let lower = el % 64u32;
|
let lower = el % 64u32;
|
||||||
self.len += u64::from(self.tinysets[higher as usize].insert_mut(lower));
|
self.len += if self.tinysets[higher as usize].insert_mut(lower) {
|
||||||
|
1
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inserts an element in the `BitSet`
|
/// Inserts an element in the `BitSet`
|
||||||
@@ -268,7 +272,11 @@ impl BitSet {
|
|||||||
// we do not check saturated els.
|
// we do not check saturated els.
|
||||||
let higher = el / 64u32;
|
let higher = el / 64u32;
|
||||||
let lower = el % 64u32;
|
let lower = el % 64u32;
|
||||||
self.len -= u64::from(self.tinysets[higher as usize].remove_mut(lower));
|
self.len -= if self.tinysets[higher as usize].remove_mut(lower) {
|
||||||
|
1
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true iff the elements is in the `BitSet`.
|
/// Returns true iff the elements is in the `BitSet`.
|
||||||
@@ -277,7 +285,7 @@ impl BitSet {
|
|||||||
self.tinyset(el / 64u32).contains(el % 64)
|
self.tinyset(el / 64u32).contains(el % 64)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the first non-empty `TinySet` associated with a bucket lower
|
/// Returns the first non-empty `TinySet` associated to a bucket lower
|
||||||
/// or greater than bucket.
|
/// or greater than bucket.
|
||||||
///
|
///
|
||||||
/// Reminder: the tiny set with the bucket `bucket`, represents the
|
/// Reminder: the tiny set with the bucket `bucket`, represents the
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use std::borrow::Cow;
|
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
use std::{fmt, io};
|
use std::{fmt, io};
|
||||||
|
|
||||||
@@ -108,19 +107,6 @@ impl FixedSize for u64 {
|
|||||||
const SIZE_IN_BYTES: usize = 8;
|
const SIZE_IN_BYTES: usize = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BinarySerializable for u128 {
|
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
writer.write_u128::<Endianness>(*self)
|
|
||||||
}
|
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
reader.read_u128::<Endianness>()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FixedSize for u128 {
|
|
||||||
const SIZE_IN_BYTES: usize = 16;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BinarySerializable for f32 {
|
impl BinarySerializable for f32 {
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_f32::<Endianness>(*self)
|
writer.write_f32::<Endianness>(*self)
|
||||||
@@ -175,7 +161,8 @@ impl FixedSize for u8 {
|
|||||||
|
|
||||||
impl BinarySerializable for bool {
|
impl BinarySerializable for bool {
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_u8(u8::from(*self))
|
let val = if *self { 1 } else { 0 };
|
||||||
|
writer.write_u8(val)
|
||||||
}
|
}
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
|
||||||
let val = reader.read_u8()?;
|
let val = reader.read_u8()?;
|
||||||
@@ -211,23 +198,6 @@ impl BinarySerializable for String {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> BinarySerializable for Cow<'a, str> {
|
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
let data: &[u8] = self.as_bytes();
|
|
||||||
VInt(data.len() as u64).serialize(writer)?;
|
|
||||||
writer.write_all(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
let string_length = VInt::deserialize(reader)?.val() as usize;
|
|
||||||
let mut result = String::with_capacity(string_length);
|
|
||||||
reader
|
|
||||||
.take(string_length as u64)
|
|
||||||
.read_to_string(&mut result)?;
|
|
||||||
Ok(Cow::Owned(result))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
|
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ to get tantivy to fit your use case:
|
|||||||
|
|
||||||
*Example 1* You could for instance use hadoop to build a very large search index in a timely manner, copy all of the resulting segment files in the same directory and edit the `meta.json` to get a functional index.[^2]
|
*Example 1* You could for instance use hadoop to build a very large search index in a timely manner, copy all of the resulting segment files in the same directory and edit the `meta.json` to get a functional index.[^2]
|
||||||
|
|
||||||
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated with segment `D-7`.
|
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`.
|
||||||
|
|
||||||
## Merging
|
## Merging
|
||||||
|
|
||||||
|
|||||||
@@ -105,7 +105,7 @@ impl SegmentCollector for StatsSegmentCollector {
|
|||||||
type Fruit = Option<Stats>;
|
type Fruit = Option<Stats>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: Score) {
|
fn collect(&mut self, doc: u32, _score: Score) {
|
||||||
let value = self.fast_field_reader.get_val(doc) as f64;
|
let value = self.fast_field_reader.get_val(doc as u64) as f64;
|
||||||
self.stats.count += 1;
|
self.stats.count += 1;
|
||||||
self.stats.sum += value;
|
self.stats.sum += value;
|
||||||
self.stats.squared_sum += value * value;
|
self.stats.squared_sum += value * value;
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// on its id.
|
// on its id.
|
||||||
//
|
//
|
||||||
// Note that `tantivy` does nothing to enforce the idea that
|
// Note that `tantivy` does nothing to enforce the idea that
|
||||||
// there is only one document associated with this id.
|
// there is only one document associated to this id.
|
||||||
//
|
//
|
||||||
// Also you might have noticed that we apply the delete before
|
// Also you might have noticed that we apply the delete before
|
||||||
// having committed. This does not matter really...
|
// having committed. This does not matter really...
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// A segment contains different data structure.
|
// A segment contains different data structure.
|
||||||
// Inverted index stands for the combination of
|
// Inverted index stands for the combination of
|
||||||
// - the term dictionary
|
// - the term dictionary
|
||||||
// - the inverted lists associated with each terms and their positions
|
// - the inverted lists associated to each terms and their positions
|
||||||
let inverted_index = segment_reader.inverted_index(title)?;
|
let inverted_index = segment_reader.inverted_index(title)?;
|
||||||
|
|
||||||
// A `Term` is a text token associated with a field.
|
// A `Term` is a text token associated with a field.
|
||||||
@@ -105,7 +105,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// A segment contains different data structure.
|
// A segment contains different data structure.
|
||||||
// Inverted index stands for the combination of
|
// Inverted index stands for the combination of
|
||||||
// - the term dictionary
|
// - the term dictionary
|
||||||
// - the inverted lists associated with each terms and their positions
|
// - the inverted lists associated to each terms and their positions
|
||||||
let inverted_index = segment_reader.inverted_index(title)?;
|
let inverted_index = segment_reader.inverted_index(title)?;
|
||||||
|
|
||||||
// This segment posting object is like a cursor over the documents matching the term.
|
// This segment posting object is like a cursor over the documents matching the term.
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ impl Warmer for DynamicPriceColumn {
|
|||||||
let product_id_reader = segment.fast_fields().u64(self.field)?;
|
let product_id_reader = segment.fast_fields().u64(self.field)?;
|
||||||
let product_ids: Vec<ProductId> = segment
|
let product_ids: Vec<ProductId> = segment
|
||||||
.doc_ids_alive()
|
.doc_ids_alive()
|
||||||
.map(|doc| product_id_reader.get_val(doc))
|
.map(|doc| product_id_reader.get_val(doc as u64))
|
||||||
.collect();
|
.collect();
|
||||||
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
|
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
|
||||||
let mut price_vals: Vec<Price> = Vec::new();
|
let mut price_vals: Vec<Price> = Vec::new();
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ mod tests {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut a = 0u64;
|
let mut a = 0u64;
|
||||||
for _ in 0..n {
|
for _ in 0..n {
|
||||||
a = column.get_val(a as u32);
|
a = column.get_val(a as u64);
|
||||||
}
|
}
|
||||||
a
|
a
|
||||||
});
|
});
|
||||||
@@ -100,10 +100,9 @@ mod tests {
|
|||||||
|
|
||||||
fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn Column<u128>> {
|
fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn Column<u128>> {
|
||||||
let mut out = vec![];
|
let mut out = vec![];
|
||||||
let iter_gen = || data.iter().cloned();
|
serialize_u128(VecColumn::from(&data), &mut out).unwrap();
|
||||||
serialize_u128(iter_gen, data.len() as u32, &mut out).unwrap();
|
|
||||||
let out = OwnedBytes::new(out);
|
let out = OwnedBytes::new(out);
|
||||||
open_u128::<u128>(out).unwrap()
|
open_u128(out).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
@@ -111,15 +110,7 @@ mod tests {
|
|||||||
let (major_item, _minor_item, data) = get_data_50percent_item();
|
let (major_item, _minor_item, data) = get_data_50percent_item();
|
||||||
let column = get_u128_column_from_data(&data);
|
let column = get_u128_column_from_data(&data);
|
||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| column.get_between_vals(major_item..=major_item));
|
||||||
let mut positions = Vec::new();
|
|
||||||
column.get_positions_for_value_range(
|
|
||||||
major_item..=major_item,
|
|
||||||
0..data.len() as u32,
|
|
||||||
&mut positions,
|
|
||||||
);
|
|
||||||
positions
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
@@ -127,15 +118,7 @@ mod tests {
|
|||||||
let (_major_item, minor_item, data) = get_data_50percent_item();
|
let (_major_item, minor_item, data) = get_data_50percent_item();
|
||||||
let column = get_u128_column_from_data(&data);
|
let column = get_u128_column_from_data(&data);
|
||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| column.get_between_vals(minor_item..=minor_item));
|
||||||
let mut positions = Vec::new();
|
|
||||||
column.get_positions_for_value_range(
|
|
||||||
minor_item..=minor_item,
|
|
||||||
0..data.len() as u32,
|
|
||||||
&mut positions,
|
|
||||||
);
|
|
||||||
positions
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
@@ -143,15 +126,7 @@ mod tests {
|
|||||||
let (_major_item, _minor_item, data) = get_data_50percent_item();
|
let (_major_item, _minor_item, data) = get_data_50percent_item();
|
||||||
let column = get_u128_column_from_data(&data);
|
let column = get_u128_column_from_data(&data);
|
||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| column.get_between_vals(0..=u128::MAX));
|
||||||
let mut positions = Vec::new();
|
|
||||||
column.get_positions_for_value_range(
|
|
||||||
0..=u128::MAX,
|
|
||||||
0..data.len() as u32,
|
|
||||||
&mut positions,
|
|
||||||
);
|
|
||||||
positions
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
@@ -161,7 +136,7 @@ mod tests {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut a = 0u128;
|
let mut a = 0u128;
|
||||||
for i in 0u64..column.num_vals() as u64 {
|
for i in 0u64..column.num_vals() as u64 {
|
||||||
a += column.get_val(i as u32);
|
a += column.get_val(i);
|
||||||
}
|
}
|
||||||
a
|
a
|
||||||
});
|
});
|
||||||
@@ -175,7 +150,7 @@ mod tests {
|
|||||||
let n = column.num_vals();
|
let n = column.num_vals();
|
||||||
let mut a = 0u128;
|
let mut a = 0u128;
|
||||||
for i in (0..n / 5).map(|val| val * 5) {
|
for i in (0..n / 5).map(|val| val * 5) {
|
||||||
a += column.get_val(i);
|
a += column.get_val(i as u64);
|
||||||
}
|
}
|
||||||
a
|
a
|
||||||
});
|
});
|
||||||
@@ -200,9 +175,9 @@ mod tests {
|
|||||||
let n = permutation.len();
|
let n = permutation.len();
|
||||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut a = 0;
|
let mut a = 0u64;
|
||||||
for i in (0..n / 7).map(|val| val * 7) {
|
for i in (0..n / 7).map(|val| val * 7) {
|
||||||
a += column.get_val(i as u32);
|
a += column.get_val(i as u64);
|
||||||
}
|
}
|
||||||
a
|
a
|
||||||
});
|
});
|
||||||
@@ -215,7 +190,7 @@ mod tests {
|
|||||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut a = 0u64;
|
let mut a = 0u64;
|
||||||
for i in 0u32..n as u32 {
|
for i in 0u64..n as u64 {
|
||||||
a += column.get_val(i);
|
a += column.get_val(i);
|
||||||
}
|
}
|
||||||
a
|
a
|
||||||
@@ -229,8 +204,8 @@ mod tests {
|
|||||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut a = 0u64;
|
let mut a = 0u64;
|
||||||
for i in 0..n {
|
for i in 0..n as u64 {
|
||||||
a += column.get_val(i as u32);
|
a += column.get_val(i);
|
||||||
}
|
}
|
||||||
a
|
a
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ pub struct BitpackedReader {
|
|||||||
|
|
||||||
impl Column for BitpackedReader {
|
impl Column for BitpackedReader {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_val(&self, doc: u32) -> u64 {
|
fn get_val(&self, doc: u64) -> u64 {
|
||||||
self.bit_unpacker.get(doc, &self.data)
|
self.bit_unpacker.get(doc, &self.data)
|
||||||
}
|
}
|
||||||
#[inline]
|
#[inline]
|
||||||
@@ -30,7 +30,7 @@ impl Column for BitpackedReader {
|
|||||||
self.normalized_header.max_value
|
self.normalized_header.max_value
|
||||||
}
|
}
|
||||||
#[inline]
|
#[inline]
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u64 {
|
||||||
self.normalized_header.num_vals
|
self.normalized_header.num_vals
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -75,7 +75,7 @@ impl FastFieldCodec for BitpackedCodec {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn estimate(column: &dyn Column) -> Option<f32> {
|
fn estimate(column: &impl Column) -> Option<f32> {
|
||||||
let num_bits = compute_num_bits(column.max_value());
|
let num_bits = compute_num_bits(column.max_value());
|
||||||
let num_bits_uncompressed = 64;
|
let num_bits_uncompressed = 64;
|
||||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ impl BinarySerializable for Block {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_num_blocks(num_vals: u32) -> usize {
|
fn compute_num_blocks(num_vals: u64) -> usize {
|
||||||
(num_vals as usize + CHUNK_SIZE - 1) / CHUNK_SIZE
|
(num_vals as usize + CHUNK_SIZE - 1) / CHUNK_SIZE
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -71,14 +71,14 @@ impl FastFieldCodec for BlockwiseLinearCodec {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Estimate first_chunk and extrapolate
|
// Estimate first_chunk and extrapolate
|
||||||
fn estimate(column: &dyn crate::Column) -> Option<f32> {
|
fn estimate(column: &impl crate::Column) -> Option<f32> {
|
||||||
if column.num_vals() < 10 * CHUNK_SIZE as u32 {
|
if column.num_vals() < 10 * CHUNK_SIZE as u64 {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE as usize).collect();
|
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE as usize).collect();
|
||||||
let line = Line::train(&VecColumn::from(&first_chunk));
|
let line = Line::train(&VecColumn::from(&first_chunk));
|
||||||
for (i, buffer_val) in first_chunk.iter_mut().enumerate() {
|
for (i, buffer_val) in first_chunk.iter_mut().enumerate() {
|
||||||
let interpolated_val = line.eval(i as u32);
|
let interpolated_val = line.eval(i as u64);
|
||||||
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
||||||
}
|
}
|
||||||
let estimated_bit_width = first_chunk
|
let estimated_bit_width = first_chunk
|
||||||
@@ -95,12 +95,12 @@ impl FastFieldCodec for BlockwiseLinearCodec {
|
|||||||
};
|
};
|
||||||
let num_bits = estimated_bit_width as u64 * column.num_vals() as u64
|
let num_bits = estimated_bit_width as u64 * column.num_vals() as u64
|
||||||
// function metadata per block
|
// function metadata per block
|
||||||
+ metadata_per_block as u64 * (column.num_vals() as u64 / CHUNK_SIZE as u64);
|
+ metadata_per_block as u64 * (column.num_vals() / CHUNK_SIZE as u64);
|
||||||
let num_bits_uncompressed = 64 * column.num_vals();
|
let num_bits_uncompressed = 64 * column.num_vals();
|
||||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn serialize(column: &dyn Column, wrt: &mut impl io::Write) -> io::Result<()> {
|
fn serialize(column: &dyn crate::Column, wrt: &mut impl io::Write) -> io::Result<()> {
|
||||||
// The BitpackedReader assumes a normalized vector.
|
// The BitpackedReader assumes a normalized vector.
|
||||||
assert_eq!(column.min_value(), 0);
|
assert_eq!(column.min_value(), 0);
|
||||||
let mut buffer = Vec::with_capacity(CHUNK_SIZE);
|
let mut buffer = Vec::with_capacity(CHUNK_SIZE);
|
||||||
@@ -121,7 +121,7 @@ impl FastFieldCodec for BlockwiseLinearCodec {
|
|||||||
assert!(!buffer.is_empty());
|
assert!(!buffer.is_empty());
|
||||||
|
|
||||||
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||||
let interpolated_val = line.eval(i as u32);
|
let interpolated_val = line.eval(i as u64);
|
||||||
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
||||||
}
|
}
|
||||||
let bit_width = buffer.iter().copied().map(compute_num_bits).max().unwrap();
|
let bit_width = buffer.iter().copied().map(compute_num_bits).max().unwrap();
|
||||||
@@ -161,9 +161,9 @@ pub struct BlockwiseLinearReader {
|
|||||||
|
|
||||||
impl Column for BlockwiseLinearReader {
|
impl Column for BlockwiseLinearReader {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn get_val(&self, idx: u32) -> u64 {
|
fn get_val(&self, idx: u64) -> u64 {
|
||||||
let block_id = (idx / CHUNK_SIZE as u32) as usize;
|
let block_id = (idx / CHUNK_SIZE as u64) as usize;
|
||||||
let idx_within_block = idx % (CHUNK_SIZE as u32);
|
let idx_within_block = idx % (CHUNK_SIZE as u64);
|
||||||
let block = &self.blocks[block_id];
|
let block = &self.blocks[block_id];
|
||||||
let interpoled_val: u64 = block.line.eval(idx_within_block);
|
let interpoled_val: u64 = block.line.eval(idx_within_block);
|
||||||
let block_bytes = &self.data[block.data_start_offset..];
|
let block_bytes = &self.data[block.data_start_offset..];
|
||||||
@@ -180,7 +180,7 @@ impl Column for BlockwiseLinearReader {
|
|||||||
self.normalized_header.max_value
|
self.normalized_header.max_value
|
||||||
}
|
}
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u64 {
|
||||||
self.normalized_header.num_vals
|
self.normalized_header.num_vals
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,20 +1,24 @@
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::ops::{Range, RangeInclusive};
|
use std::ops::RangeInclusive;
|
||||||
|
|
||||||
use tantivy_bitpacker::minmax;
|
use tantivy_bitpacker::minmax;
|
||||||
|
|
||||||
use crate::monotonic_mapping::StrictlyMonotonicFn;
|
|
||||||
|
|
||||||
/// `Column` provides columnar access on a field.
|
|
||||||
pub trait Column<T: PartialOrd = u64>: Send + Sync {
|
pub trait Column<T: PartialOrd = u64>: Send + Sync {
|
||||||
/// Return the value associated with the given idx.
|
/// Return a `ColumnReader`.
|
||||||
|
fn reader(&self) -> Box<dyn ColumnReader<T> + '_> {
|
||||||
|
Box::new(ColumnReaderAdapter { column: self })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the value associated to the given idx.
|
||||||
///
|
///
|
||||||
/// This accessor should return as fast as possible.
|
/// This accessor should return as fast as possible.
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
///
|
///
|
||||||
/// May panic if `idx` is greater than the column length.
|
/// May panic if `idx` is greater than the column length.
|
||||||
fn get_val(&self, idx: u32) -> T;
|
///
|
||||||
|
/// TODO remove to force people to use `.reader()`.
|
||||||
|
fn get_val(&self, idx: u64) -> T;
|
||||||
|
|
||||||
/// Fills an output buffer with the fast field values
|
/// Fills an output buffer with the fast field values
|
||||||
/// associated with the `DocId` going from
|
/// associated with the `DocId` going from
|
||||||
@@ -27,28 +31,21 @@ pub trait Column<T: PartialOrd = u64>: Send + Sync {
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||||
for (out, idx) in output.iter_mut().zip(start..) {
|
for (out, idx) in output.iter_mut().zip(start..) {
|
||||||
*out = self.get_val(idx as u32);
|
*out = self.get_val(idx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the positions of values which are in the provided value range.
|
/// Return the positions of values which are in the provided range.
|
||||||
///
|
|
||||||
/// Note that position == docid for single value fast fields
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_positions_for_value_range(
|
fn get_between_vals(&self, range: RangeInclusive<T>) -> Vec<u64> {
|
||||||
&self,
|
let mut vals = Vec::new();
|
||||||
value_range: RangeInclusive<T>,
|
for idx in 0..self.num_vals() {
|
||||||
doc_id_range: Range<u32>,
|
|
||||||
positions: &mut Vec<u32>,
|
|
||||||
) {
|
|
||||||
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
|
|
||||||
|
|
||||||
for idx in doc_id_range.start..doc_id_range.end {
|
|
||||||
let val = self.get_val(idx);
|
let val = self.get_val(idx);
|
||||||
if value_range.contains(&val) {
|
if range.contains(&val) {
|
||||||
positions.push(idx);
|
vals.push(idx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
vals
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the minimum value for this fast field.
|
/// Returns the minimum value for this fast field.
|
||||||
@@ -67,16 +64,43 @@ pub trait Column<T: PartialOrd = u64>: Send + Sync {
|
|||||||
/// `.max_value()`.
|
/// `.max_value()`.
|
||||||
fn max_value(&self) -> T;
|
fn max_value(&self) -> T;
|
||||||
|
|
||||||
/// The number of values in the column.
|
fn num_vals(&self) -> u64;
|
||||||
fn num_vals(&self) -> u32;
|
|
||||||
|
|
||||||
/// Returns a iterator over the data
|
/// Returns a iterator over the data
|
||||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
|
///
|
||||||
|
/// TODO get rid of `.iter()` and extend ColumnReader instead.
|
||||||
|
fn iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// VecColumn provides `Column` over a slice.
|
/// `ColumnReader` makes it possible to read forward through a column.
|
||||||
|
///
|
||||||
|
/// TODO add methods to make it possible to scan the column and replace `.iter()`
|
||||||
|
pub trait ColumnReader<T = u64> {
|
||||||
|
fn seek(&mut self, idx: u64) -> T;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct ColumnReaderAdapter<'a, C: ?Sized> {
|
||||||
|
column: &'a C,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, C: ?Sized> From<&'a C> for ColumnReaderAdapter<'a, C> {
|
||||||
|
fn from(column: &'a C) -> Self {
|
||||||
|
ColumnReaderAdapter { column }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T, C: ?Sized> ColumnReader<T> for ColumnReaderAdapter<'a, C>
|
||||||
|
where
|
||||||
|
C: Column<T>,
|
||||||
|
T: PartialOrd<T>,
|
||||||
|
{
|
||||||
|
fn seek(&mut self, idx: u64) -> T {
|
||||||
|
self.column.get_val(idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct VecColumn<'a, T = u64> {
|
pub struct VecColumn<'a, T = u64> {
|
||||||
values: &'a [T],
|
values: &'a [T],
|
||||||
min_value: T,
|
min_value: T,
|
||||||
@@ -84,7 +108,7 @@ pub struct VecColumn<'a, T = u64> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, C: Column<T>, T: Copy + PartialOrd> Column<T> for &'a C {
|
impl<'a, C: Column<T>, T: Copy + PartialOrd> Column<T> for &'a C {
|
||||||
fn get_val(&self, idx: u32) -> T {
|
fn get_val(&self, idx: u64) -> T {
|
||||||
(*self).get_val(idx)
|
(*self).get_val(idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,11 +120,15 @@ impl<'a, C: Column<T>, T: Copy + PartialOrd> Column<T> for &'a C {
|
|||||||
(*self).max_value()
|
(*self).max_value()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u64 {
|
||||||
(*self).num_vals()
|
(*self).num_vals()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
|
fn reader(&self) -> Box<dyn ColumnReader<T> + '_> {
|
||||||
|
(*self).reader()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||||
(*self).iter()
|
(*self).iter()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,7 +138,7 @@ impl<'a, C: Column<T>, T: Copy + PartialOrd> Column<T> for &'a C {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Copy + PartialOrd + Send + Sync> Column<T> for VecColumn<'a, T> {
|
impl<'a, T: Copy + PartialOrd + Send + Sync> Column<T> for VecColumn<'a, T> {
|
||||||
fn get_val(&self, position: u32) -> T {
|
fn get_val(&self, position: u64) -> T {
|
||||||
self.values[position as usize]
|
self.values[position as usize]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -126,8 +154,8 @@ impl<'a, T: Copy + PartialOrd + Send + Sync> Column<T> for VecColumn<'a, T> {
|
|||||||
self.max_value
|
self.max_value
|
||||||
}
|
}
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u64 {
|
||||||
self.values.len() as u32
|
self.values.len() as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||||
@@ -155,30 +183,16 @@ struct MonotonicMappingColumn<C, T, Input> {
|
|||||||
_phantom: PhantomData<Input>,
|
_phantom: PhantomData<Input>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a view of a column transformed by a strictly monotonic mapping. See
|
/// Creates a view of a column transformed by a monotonic mapping.
|
||||||
/// [`StrictlyMonotonicFn`].
|
pub fn monotonic_map_column<C, T, Input: PartialOrd, Output: PartialOrd>(
|
||||||
///
|
|
||||||
/// E.g. apply a gcd monotonic_mapping([100, 200, 300]) == [1, 2, 3]
|
|
||||||
/// monotonic_mapping.mapping() is expected to be injective, and we should always have
|
|
||||||
/// monotonic_mapping.inverse(monotonic_mapping.mapping(el)) == el
|
|
||||||
///
|
|
||||||
/// The inverse of the mapping is required for:
|
|
||||||
/// `fn get_positions_for_value_range(&self, range: RangeInclusive<T>) -> Vec<u64> `
|
|
||||||
/// The user provides the original value range and we need to monotonic map them in the same way the
|
|
||||||
/// serialization does before calling the underlying column.
|
|
||||||
///
|
|
||||||
/// Note that when opening a codec, the monotonic_mapping should be the inverse of the mapping
|
|
||||||
/// during serialization. And therefore the monotonic_mapping_inv when opening is the same as
|
|
||||||
/// monotonic_mapping during serialization.
|
|
||||||
pub fn monotonic_map_column<C, T, Input, Output>(
|
|
||||||
from_column: C,
|
from_column: C,
|
||||||
monotonic_mapping: T,
|
monotonic_mapping: T,
|
||||||
) -> impl Column<Output>
|
) -> impl Column<Output>
|
||||||
where
|
where
|
||||||
C: Column<Input>,
|
C: Column<Input>,
|
||||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
T: Fn(Input) -> Output + Send + Sync,
|
||||||
Input: PartialOrd + Send + Sync + Clone,
|
Input: Send + Sync,
|
||||||
Output: PartialOrd + Send + Sync + Clone,
|
Output: Send + Sync,
|
||||||
{
|
{
|
||||||
MonotonicMappingColumn {
|
MonotonicMappingColumn {
|
||||||
from_column,
|
from_column,
|
||||||
@@ -187,59 +201,68 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C, T, Input, Output> Column<Output> for MonotonicMappingColumn<C, T, Input>
|
impl<C, T, Input: PartialOrd, Output: PartialOrd> Column<Output>
|
||||||
|
for MonotonicMappingColumn<C, T, Input>
|
||||||
where
|
where
|
||||||
C: Column<Input>,
|
C: Column<Input>,
|
||||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
T: Fn(Input) -> Output + Send + Sync,
|
||||||
Input: PartialOrd + Send + Sync + Clone,
|
Input: Send + Sync,
|
||||||
Output: PartialOrd + Send + Sync + Clone,
|
Output: Send + Sync,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_val(&self, idx: u32) -> Output {
|
fn get_val(&self, idx: u64) -> Output {
|
||||||
let from_val = self.from_column.get_val(idx);
|
let from_val = self.from_column.get_val(idx);
|
||||||
self.monotonic_mapping.mapping(from_val)
|
(self.monotonic_mapping)(from_val)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn min_value(&self) -> Output {
|
fn min_value(&self) -> Output {
|
||||||
let from_min_value = self.from_column.min_value();
|
let from_min_value = self.from_column.min_value();
|
||||||
self.monotonic_mapping.mapping(from_min_value)
|
(self.monotonic_mapping)(from_min_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn max_value(&self) -> Output {
|
fn max_value(&self) -> Output {
|
||||||
let from_max_value = self.from_column.max_value();
|
let from_max_value = self.from_column.max_value();
|
||||||
self.monotonic_mapping.mapping(from_max_value)
|
(self.monotonic_mapping)(from_max_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u64 {
|
||||||
self.from_column.num_vals()
|
self.from_column.num_vals()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = Output> + '_> {
|
fn iter(&self) -> Box<dyn Iterator<Item = Output> + '_> {
|
||||||
Box::new(
|
Box::new(self.from_column.iter().map(&self.monotonic_mapping))
|
||||||
self.from_column
|
|
||||||
.iter()
|
|
||||||
.map(|el| self.monotonic_mapping.mapping(el)),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_positions_for_value_range(
|
fn reader(&self) -> Box<dyn ColumnReader<Output> + '_> {
|
||||||
&self,
|
Box::new(MonotonicMappingColumnReader {
|
||||||
range: RangeInclusive<Output>,
|
col_reader: ColumnReaderAdapter::from(&self.from_column),
|
||||||
doc_id_range: Range<u32>,
|
monotonic_mapping: &self.monotonic_mapping,
|
||||||
positions: &mut Vec<u32>,
|
intermdiary_type: PhantomData,
|
||||||
) {
|
})
|
||||||
self.from_column.get_positions_for_value_range(
|
|
||||||
self.monotonic_mapping.inverse(range.start().clone())
|
|
||||||
..=self.monotonic_mapping.inverse(range.end().clone()),
|
|
||||||
doc_id_range,
|
|
||||||
positions,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We voluntarily do not implement get_range as it yields a regression,
|
// We voluntarily do not implement get_range as it yields a regression,
|
||||||
// and we do not have any specialized implementation anyway.
|
// and we do not have any specialized implementation anyway.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct MonotonicMappingColumnReader<'a, ColR, Transform, U> {
|
||||||
|
col_reader: ColR,
|
||||||
|
monotonic_mapping: &'a Transform,
|
||||||
|
intermdiary_type: PhantomData<U>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, U, V, ColR, Transform> ColumnReader<V>
|
||||||
|
for MonotonicMappingColumnReader<'a, ColR, Transform, U>
|
||||||
|
where
|
||||||
|
ColR: ColumnReader<U> + 'a,
|
||||||
|
Transform: Fn(U) -> V,
|
||||||
|
{
|
||||||
|
fn seek(&mut self, idx: u64) -> V {
|
||||||
|
let intermediary_value = self.col_reader.seek(idx);
|
||||||
|
(*self.monotonic_mapping)(intermediary_value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct IterColumn<T>(T);
|
pub struct IterColumn<T>(T);
|
||||||
|
|
||||||
impl<T> From<T> for IterColumn<T>
|
impl<T> From<T> for IterColumn<T>
|
||||||
@@ -255,7 +278,7 @@ where
|
|||||||
T: Iterator + Clone + ExactSizeIterator + Send + Sync,
|
T: Iterator + Clone + ExactSizeIterator + Send + Sync,
|
||||||
T::Item: PartialOrd,
|
T::Item: PartialOrd,
|
||||||
{
|
{
|
||||||
fn get_val(&self, idx: u32) -> T::Item {
|
fn get_val(&self, idx: u64) -> T::Item {
|
||||||
self.0.clone().nth(idx as usize).unwrap()
|
self.0.clone().nth(idx as usize).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -267,8 +290,8 @@ where
|
|||||||
self.0.clone().last().unwrap()
|
self.0.clone().last().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u64 {
|
||||||
self.0.len() as u32
|
self.0.len() as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = T::Item> + '_> {
|
fn iter(&self) -> Box<dyn Iterator<Item = T::Item> + '_> {
|
||||||
@@ -279,22 +302,19 @@ where
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::monotonic_mapping::{
|
use crate::MonotonicallyMappableToU64;
|
||||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternalBaseval,
|
|
||||||
StrictlyMonotonicMappingToInternalGCDBaseval,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_monotonic_mapping() {
|
fn test_monotonic_mapping() {
|
||||||
let vals = &[3u64, 5u64][..];
|
let vals = &[1u64, 3u64][..];
|
||||||
let col = VecColumn::from(vals);
|
let col = VecColumn::from(vals);
|
||||||
let mapped = monotonic_map_column(col, StrictlyMonotonicMappingToInternalBaseval::new(2));
|
let mapped = monotonic_map_column(col, |el| el + 4);
|
||||||
assert_eq!(mapped.min_value(), 1u64);
|
assert_eq!(mapped.min_value(), 5u64);
|
||||||
assert_eq!(mapped.max_value(), 3u64);
|
assert_eq!(mapped.max_value(), 7u64);
|
||||||
assert_eq!(mapped.num_vals(), 2);
|
assert_eq!(mapped.num_vals(), 2);
|
||||||
assert_eq!(mapped.num_vals(), 2);
|
assert_eq!(mapped.num_vals(), 2);
|
||||||
assert_eq!(mapped.get_val(0), 1);
|
assert_eq!(mapped.get_val(0), 5);
|
||||||
assert_eq!(mapped.get_val(1), 3);
|
assert_eq!(mapped.get_val(1), 7);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -306,15 +326,10 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_monotonic_mapping_iter() {
|
fn test_monotonic_mapping_iter() {
|
||||||
let vals: Vec<u64> = (10..110u64).map(|el| el * 10).collect();
|
let vals: Vec<u64> = (-1..99).map(i64::to_u64).collect();
|
||||||
let col = VecColumn::from(&vals);
|
let col = VecColumn::from(&vals);
|
||||||
let mapped = monotonic_map_column(
|
let mapped = monotonic_map_column(col, |el| i64::from_u64(el) * 10i64);
|
||||||
col,
|
let val_i64s: Vec<i64> = mapped.iter().collect();
|
||||||
StrictlyMonotonicMappingInverter::from(
|
|
||||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
let val_i64s: Vec<u64> = mapped.iter().collect();
|
|
||||||
for i in 0..100 {
|
for i in 0..100 {
|
||||||
assert_eq!(val_i64s[i as usize], mapped.get_val(i));
|
assert_eq!(val_i64s[i as usize], mapped.get_val(i));
|
||||||
}
|
}
|
||||||
@@ -322,26 +337,20 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_monotonic_mapping_get_range() {
|
fn test_monotonic_mapping_get_range() {
|
||||||
let vals: Vec<u64> = (0..100u64).map(|el| el * 10).collect();
|
let vals: Vec<u64> = (-1..99).map(i64::to_u64).collect();
|
||||||
let col = VecColumn::from(&vals);
|
let col = VecColumn::from(&vals);
|
||||||
let mapped = monotonic_map_column(
|
let mapped = monotonic_map_column(col, |el| i64::from_u64(el) * 10i64);
|
||||||
col,
|
assert_eq!(mapped.min_value(), -10i64);
|
||||||
StrictlyMonotonicMappingInverter::from(
|
assert_eq!(mapped.max_value(), 980i64);
|
||||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 0),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(mapped.min_value(), 0u64);
|
|
||||||
assert_eq!(mapped.max_value(), 9900u64);
|
|
||||||
assert_eq!(mapped.num_vals(), 100);
|
assert_eq!(mapped.num_vals(), 100);
|
||||||
let val_u64s: Vec<u64> = mapped.iter().collect();
|
let val_i64s: Vec<i64> = mapped.iter().collect();
|
||||||
assert_eq!(val_u64s.len(), 100);
|
assert_eq!(val_i64s.len(), 100);
|
||||||
for i in 0..100 {
|
for i in 0..100 {
|
||||||
assert_eq!(val_u64s[i as usize], mapped.get_val(i));
|
assert_eq!(val_i64s[i as usize], mapped.get_val(i));
|
||||||
assert_eq!(val_u64s[i as usize], vals[i as usize] * 10);
|
assert_eq!(val_i64s[i as usize], i64::from_u64(vals[i as usize]) * 10);
|
||||||
}
|
}
|
||||||
let mut buf = [0u64; 20];
|
let mut buf = [0i64; 20];
|
||||||
mapped.get_range(7, &mut buf[..]);
|
mapped.get_range(7, &mut buf[..]);
|
||||||
assert_eq!(&val_u64s[7..][..20], &buf);
|
assert_eq!(&val_i64s[7..][..20], &buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ fn num_bits(val: u128) -> u8 {
|
|||||||
/// metadata.
|
/// metadata.
|
||||||
pub fn get_compact_space(
|
pub fn get_compact_space(
|
||||||
values_deduped_sorted: &BTreeSet<u128>,
|
values_deduped_sorted: &BTreeSet<u128>,
|
||||||
total_num_values: u32,
|
total_num_values: u64,
|
||||||
cost_per_blank: usize,
|
cost_per_blank: usize,
|
||||||
) -> CompactSpace {
|
) -> CompactSpace {
|
||||||
let mut compact_space_builder = CompactSpaceBuilder::new();
|
let mut compact_space_builder = CompactSpaceBuilder::new();
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use std::{
|
|||||||
cmp::Ordering,
|
cmp::Ordering,
|
||||||
collections::BTreeSet,
|
collections::BTreeSet,
|
||||||
io::{self, Write},
|
io::{self, Write},
|
||||||
ops::{Range, RangeInclusive},
|
ops::RangeInclusive,
|
||||||
};
|
};
|
||||||
|
|
||||||
use common::{BinarySerializable, CountingWriter, VInt, VIntU128};
|
use common::{BinarySerializable, CountingWriter, VInt, VIntU128};
|
||||||
@@ -165,16 +165,16 @@ pub struct IPCodecParams {
|
|||||||
bit_unpacker: BitUnpacker,
|
bit_unpacker: BitUnpacker,
|
||||||
min_value: u128,
|
min_value: u128,
|
||||||
max_value: u128,
|
max_value: u128,
|
||||||
num_vals: u32,
|
num_vals: u64,
|
||||||
num_bits: u8,
|
num_bits: u8,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CompactSpaceCompressor {
|
impl CompactSpaceCompressor {
|
||||||
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
|
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
|
||||||
pub fn train_from(iter: impl Iterator<Item = u128>, num_vals: u32) -> Self {
|
pub fn train_from(column: &impl Column<u128>) -> Self {
|
||||||
let mut values_sorted = BTreeSet::new();
|
let mut values_sorted = BTreeSet::new();
|
||||||
values_sorted.extend(iter);
|
values_sorted.extend(column.iter());
|
||||||
let total_num_values = num_vals;
|
let total_num_values = column.num_vals();
|
||||||
|
|
||||||
let compact_space =
|
let compact_space =
|
||||||
get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS);
|
get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS);
|
||||||
@@ -200,7 +200,7 @@ impl CompactSpaceCompressor {
|
|||||||
bit_unpacker: BitUnpacker::new(num_bits),
|
bit_unpacker: BitUnpacker::new(num_bits),
|
||||||
min_value,
|
min_value,
|
||||||
max_value,
|
max_value,
|
||||||
num_vals: total_num_values,
|
num_vals: total_num_values as u64,
|
||||||
num_bits,
|
num_bits,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -267,7 +267,7 @@ impl BinarySerializable for IPCodecParams {
|
|||||||
let _header_flags = u64::deserialize(reader)?;
|
let _header_flags = u64::deserialize(reader)?;
|
||||||
let min_value = VIntU128::deserialize(reader)?.0;
|
let min_value = VIntU128::deserialize(reader)?.0;
|
||||||
let max_value = VIntU128::deserialize(reader)?.0;
|
let max_value = VIntU128::deserialize(reader)?.0;
|
||||||
let num_vals = VIntU128::deserialize(reader)?.0 as u32;
|
let num_vals = VIntU128::deserialize(reader)?.0 as u64;
|
||||||
let num_bits = u8::deserialize(reader)?;
|
let num_bits = u8::deserialize(reader)?;
|
||||||
let compact_space = CompactSpace::deserialize(reader)?;
|
let compact_space = CompactSpace::deserialize(reader)?;
|
||||||
|
|
||||||
@@ -284,7 +284,7 @@ impl BinarySerializable for IPCodecParams {
|
|||||||
|
|
||||||
impl Column<u128> for CompactSpaceDecompressor {
|
impl Column<u128> for CompactSpaceDecompressor {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_val(&self, doc: u32) -> u128 {
|
fn get_val(&self, doc: u64) -> u128 {
|
||||||
self.get(doc)
|
self.get(doc)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -296,7 +296,7 @@ impl Column<u128> for CompactSpaceDecompressor {
|
|||||||
self.max_value()
|
self.max_value()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u64 {
|
||||||
self.params.num_vals
|
self.params.num_vals
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -304,15 +304,8 @@ impl Column<u128> for CompactSpaceDecompressor {
|
|||||||
fn iter(&self) -> Box<dyn Iterator<Item = u128> + '_> {
|
fn iter(&self) -> Box<dyn Iterator<Item = u128> + '_> {
|
||||||
Box::new(self.iter())
|
Box::new(self.iter())
|
||||||
}
|
}
|
||||||
|
fn get_between_vals(&self, range: RangeInclusive<u128>) -> Vec<u64> {
|
||||||
#[inline]
|
self.get_between_vals(range)
|
||||||
fn get_positions_for_value_range(
|
|
||||||
&self,
|
|
||||||
value_range: RangeInclusive<u128>,
|
|
||||||
doc_id_range: Range<u32>,
|
|
||||||
positions: &mut Vec<u32>,
|
|
||||||
) {
|
|
||||||
self.get_positions_for_value_range(value_range, doc_id_range, positions)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -347,19 +340,12 @@ impl CompactSpaceDecompressor {
|
|||||||
/// Comparing on compact space: Real dataset 1.08 GElements/s
|
/// Comparing on compact space: Real dataset 1.08 GElements/s
|
||||||
///
|
///
|
||||||
/// Comparing on original space: Real dataset .06 GElements/s (not completely optimized)
|
/// Comparing on original space: Real dataset .06 GElements/s (not completely optimized)
|
||||||
#[inline]
|
pub fn get_between_vals(&self, range: RangeInclusive<u128>) -> Vec<u64> {
|
||||||
pub fn get_positions_for_value_range(
|
if range.start() > range.end() {
|
||||||
&self,
|
return Vec::new();
|
||||||
value_range: RangeInclusive<u128>,
|
|
||||||
doc_id_range: Range<u32>,
|
|
||||||
positions: &mut Vec<u32>,
|
|
||||||
) {
|
|
||||||
if value_range.start() > value_range.end() {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
|
let from_value = *range.start();
|
||||||
let from_value = *value_range.start();
|
let to_value = *range.end();
|
||||||
let to_value = *value_range.end();
|
|
||||||
assert!(to_value >= from_value);
|
assert!(to_value >= from_value);
|
||||||
let compact_from = self.u128_to_compact(from_value);
|
let compact_from = self.u128_to_compact(from_value);
|
||||||
let compact_to = self.u128_to_compact(to_value);
|
let compact_to = self.u128_to_compact(to_value);
|
||||||
@@ -367,7 +353,7 @@ impl CompactSpaceDecompressor {
|
|||||||
// Quick return, if both ranges fall into the same non-mapped space, the range can't cover
|
// Quick return, if both ranges fall into the same non-mapped space, the range can't cover
|
||||||
// any values, so we can early exit
|
// any values, so we can early exit
|
||||||
match (compact_to, compact_from) {
|
match (compact_to, compact_from) {
|
||||||
(Err(pos1), Err(pos2)) if pos1 == pos2 => return,
|
(Err(pos1), Err(pos2)) if pos1 == pos2 => return Vec::new(),
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -389,28 +375,27 @@ impl CompactSpaceDecompressor {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let range = compact_from..=compact_to;
|
let range = compact_from..=compact_to;
|
||||||
|
let mut positions = Vec::new();
|
||||||
let scan_num_docs = doc_id_range.end - doc_id_range.start;
|
|
||||||
|
|
||||||
let step_size = 4;
|
let step_size = 4;
|
||||||
let cutoff = doc_id_range.start + scan_num_docs - scan_num_docs % step_size;
|
let cutoff = self.params.num_vals - self.params.num_vals % step_size;
|
||||||
|
|
||||||
let mut push_if_in_range = |idx, val| {
|
let mut push_if_in_range = |idx, val| {
|
||||||
if range.contains(&val) {
|
if range.contains(&val) {
|
||||||
positions.push(idx);
|
positions.push(idx);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let get_val = |idx| self.params.bit_unpacker.get(idx, &self.data);
|
let get_val = |idx| self.params.bit_unpacker.get(idx as u64, &self.data);
|
||||||
// unrolled loop
|
// unrolled loop
|
||||||
for idx in (doc_id_range.start..cutoff).step_by(step_size as usize) {
|
for idx in (0..cutoff).step_by(step_size as usize) {
|
||||||
let idx1 = idx;
|
let idx1 = idx;
|
||||||
let idx2 = idx + 1;
|
let idx2 = idx + 1;
|
||||||
let idx3 = idx + 2;
|
let idx3 = idx + 2;
|
||||||
let idx4 = idx + 3;
|
let idx4 = idx + 3;
|
||||||
let val1 = get_val(idx1 as u32);
|
let val1 = get_val(idx1);
|
||||||
let val2 = get_val(idx2 as u32);
|
let val2 = get_val(idx2);
|
||||||
let val3 = get_val(idx3 as u32);
|
let val3 = get_val(idx3);
|
||||||
let val4 = get_val(idx4 as u32);
|
let val4 = get_val(idx4);
|
||||||
push_if_in_range(idx1, val1);
|
push_if_in_range(idx1, val1);
|
||||||
push_if_in_range(idx2, val2);
|
push_if_in_range(idx2, val2);
|
||||||
push_if_in_range(idx3, val3);
|
push_if_in_range(idx3, val3);
|
||||||
@@ -418,15 +403,17 @@ impl CompactSpaceDecompressor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// handle rest
|
// handle rest
|
||||||
for idx in cutoff..doc_id_range.end {
|
for idx in cutoff..self.params.num_vals {
|
||||||
push_if_in_range(idx, get_val(idx as u32));
|
push_if_in_range(idx, get_val(idx));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
positions
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn iter_compact(&self) -> impl Iterator<Item = u64> + '_ {
|
fn iter_compact(&self) -> impl Iterator<Item = u64> + '_ {
|
||||||
(0..self.params.num_vals)
|
(0..self.params.num_vals)
|
||||||
.map(move |idx| self.params.bit_unpacker.get(idx, &self.data) as u64)
|
.map(move |idx| self.params.bit_unpacker.get(idx as u64, &self.data) as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@@ -438,7 +425,7 @@ impl CompactSpaceDecompressor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn get(&self, idx: u32) -> u128 {
|
pub fn get(&self, idx: u64) -> u128 {
|
||||||
let compact = self.params.bit_unpacker.get(idx, &self.data);
|
let compact = self.params.bit_unpacker.get(idx, &self.data);
|
||||||
self.compact_to_u128(compact)
|
self.compact_to_u128(compact)
|
||||||
}
|
}
|
||||||
@@ -456,7 +443,7 @@ impl CompactSpaceDecompressor {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::{open_u128, serialize_u128};
|
use crate::{open_u128, serialize_u128, VecColumn};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn compact_space_test() {
|
fn compact_space_test() {
|
||||||
@@ -465,7 +452,7 @@ mod tests {
|
|||||||
]
|
]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
let compact_space = get_compact_space(ips, ips.len() as u32, 11);
|
let compact_space = get_compact_space(ips, ips.len() as u64, 11);
|
||||||
let amplitude = compact_space.amplitude_compact_space();
|
let amplitude = compact_space.amplitude_compact_space();
|
||||||
assert_eq!(amplitude, 17);
|
assert_eq!(amplitude, 17);
|
||||||
assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
|
assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
|
||||||
@@ -496,7 +483,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn compact_space_amplitude_test() {
|
fn compact_space_amplitude_test() {
|
||||||
let ips = &[100000u128, 1000000].into_iter().collect();
|
let ips = &[100000u128, 1000000].into_iter().collect();
|
||||||
let compact_space = get_compact_space(ips, ips.len() as u32, 1);
|
let compact_space = get_compact_space(ips, ips.len() as u64, 1);
|
||||||
let amplitude = compact_space.amplitude_compact_space();
|
let amplitude = compact_space.amplitude_compact_space();
|
||||||
assert_eq!(amplitude, 2);
|
assert_eq!(amplitude, 2);
|
||||||
}
|
}
|
||||||
@@ -504,21 +491,16 @@ mod tests {
|
|||||||
fn test_all(data: OwnedBytes, expected: &[u128]) {
|
fn test_all(data: OwnedBytes, expected: &[u128]) {
|
||||||
let decompressor = CompactSpaceDecompressor::open(data).unwrap();
|
let decompressor = CompactSpaceDecompressor::open(data).unwrap();
|
||||||
for (idx, expected_val) in expected.iter().cloned().enumerate() {
|
for (idx, expected_val) in expected.iter().cloned().enumerate() {
|
||||||
let val = decompressor.get(idx as u32);
|
let val = decompressor.get(idx as u64);
|
||||||
assert_eq!(val, expected_val);
|
assert_eq!(val, expected_val);
|
||||||
|
|
||||||
let test_range = |range: RangeInclusive<u128>| {
|
let test_range = |range: RangeInclusive<u128>| {
|
||||||
let expected_positions = expected
|
let expected_positions = expected
|
||||||
.iter()
|
.iter()
|
||||||
.positions(|val| range.contains(val))
|
.positions(|val| range.contains(val))
|
||||||
.map(|pos| pos as u32)
|
.map(|pos| pos as u64)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let mut positions = Vec::new();
|
let positions = decompressor.get_between_vals(range);
|
||||||
decompressor.get_positions_for_value_range(
|
|
||||||
range,
|
|
||||||
0..decompressor.num_vals(),
|
|
||||||
&mut positions,
|
|
||||||
);
|
|
||||||
assert_eq!(positions, expected_positions);
|
assert_eq!(positions, expected_positions);
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -531,12 +513,7 @@ mod tests {
|
|||||||
|
|
||||||
fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes {
|
fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes {
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
serialize_u128(
|
serialize_u128(VecColumn::from(u128_vals), &mut out).unwrap();
|
||||||
|| u128_vals.iter().cloned(),
|
|
||||||
u128_vals.len() as u32,
|
|
||||||
&mut out,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let data = OwnedBytes::new(out);
|
let data = OwnedBytes::new(out);
|
||||||
test_all(data.clone(), u128_vals);
|
test_all(data.clone(), u128_vals);
|
||||||
@@ -558,107 +535,24 @@ mod tests {
|
|||||||
];
|
];
|
||||||
let data = test_aux_vals(vals);
|
let data = test_aux_vals(vals);
|
||||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||||
let complete_range = 0..vals.len() as u32;
|
let positions = decomp.get_between_vals(0..=1);
|
||||||
for (pos, val) in vals.iter().enumerate() {
|
|
||||||
let val = *val as u128;
|
|
||||||
let pos = pos as u32;
|
|
||||||
let mut positions = Vec::new();
|
|
||||||
decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
|
||||||
assert_eq!(positions, vec![pos]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// handle docid range out of bounds
|
|
||||||
let positions = get_positions_for_value_range_helper(&decomp, 0..=1, 1..u32::MAX);
|
|
||||||
assert_eq!(positions, vec![]);
|
|
||||||
|
|
||||||
let positions =
|
|
||||||
get_positions_for_value_range_helper(&decomp, 0..=1, complete_range.clone());
|
|
||||||
assert_eq!(positions, vec![0]);
|
assert_eq!(positions, vec![0]);
|
||||||
let positions =
|
let positions = decomp.get_between_vals(0..=2);
|
||||||
get_positions_for_value_range_helper(&decomp, 0..=2, complete_range.clone());
|
|
||||||
assert_eq!(positions, vec![0]);
|
assert_eq!(positions, vec![0]);
|
||||||
let positions =
|
let positions = decomp.get_between_vals(0..=3);
|
||||||
get_positions_for_value_range_helper(&decomp, 0..=3, complete_range.clone());
|
|
||||||
assert_eq!(positions, vec![0, 2]);
|
assert_eq!(positions, vec![0, 2]);
|
||||||
assert_eq!(
|
assert_eq!(decomp.get_between_vals(99999u128..=99999u128), vec![3]);
|
||||||
get_positions_for_value_range_helper(
|
assert_eq!(decomp.get_between_vals(99999u128..=100000u128), vec![3, 4]);
|
||||||
&decomp,
|
assert_eq!(decomp.get_between_vals(99998u128..=100000u128), vec![3, 4]);
|
||||||
99999u128..=99999u128,
|
assert_eq!(decomp.get_between_vals(99998u128..=99999u128), vec![3]);
|
||||||
complete_range.clone()
|
assert_eq!(decomp.get_between_vals(99998u128..=99998u128), vec![]);
|
||||||
),
|
assert_eq!(decomp.get_between_vals(333u128..=333u128), vec![8]);
|
||||||
vec![3]
|
assert_eq!(decomp.get_between_vals(332u128..=333u128), vec![8]);
|
||||||
);
|
assert_eq!(decomp.get_between_vals(332u128..=334u128), vec![8]);
|
||||||
assert_eq!(
|
assert_eq!(decomp.get_between_vals(333u128..=334u128), vec![8]);
|
||||||
get_positions_for_value_range_helper(
|
|
||||||
&decomp,
|
|
||||||
99999u128..=100000u128,
|
|
||||||
complete_range.clone()
|
|
||||||
),
|
|
||||||
vec![3, 4]
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_positions_for_value_range_helper(
|
|
||||||
&decomp,
|
|
||||||
99998u128..=100000u128,
|
|
||||||
complete_range.clone()
|
|
||||||
),
|
|
||||||
vec![3, 4]
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_positions_for_value_range_helper(
|
|
||||||
&decomp,
|
|
||||||
99998u128..=99999u128,
|
|
||||||
complete_range.clone()
|
|
||||||
),
|
|
||||||
vec![3]
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_positions_for_value_range_helper(
|
|
||||||
&decomp,
|
|
||||||
99998u128..=99998u128,
|
|
||||||
complete_range.clone()
|
|
||||||
),
|
|
||||||
vec![]
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_positions_for_value_range_helper(
|
|
||||||
&decomp,
|
|
||||||
333u128..=333u128,
|
|
||||||
complete_range.clone()
|
|
||||||
),
|
|
||||||
vec![8]
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_positions_for_value_range_helper(
|
|
||||||
&decomp,
|
|
||||||
332u128..=333u128,
|
|
||||||
complete_range.clone()
|
|
||||||
),
|
|
||||||
vec![8]
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_positions_for_value_range_helper(
|
|
||||||
&decomp,
|
|
||||||
332u128..=334u128,
|
|
||||||
complete_range.clone()
|
|
||||||
),
|
|
||||||
vec![8]
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_positions_for_value_range_helper(
|
|
||||||
&decomp,
|
|
||||||
333u128..=334u128,
|
|
||||||
complete_range.clone()
|
|
||||||
),
|
|
||||||
vec![8]
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_positions_for_value_range_helper(
|
decomp.get_between_vals(4_000_211_221u128..=5_000_000_000u128),
|
||||||
&decomp,
|
|
||||||
4_000_211_221u128..=5_000_000_000u128,
|
|
||||||
complete_range.clone()
|
|
||||||
),
|
|
||||||
vec![6, 7]
|
vec![6, 7]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -683,29 +577,12 @@ mod tests {
|
|||||||
];
|
];
|
||||||
let data = test_aux_vals(vals);
|
let data = test_aux_vals(vals);
|
||||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||||
let complete_range = 0..vals.len() as u32;
|
let positions = decomp.get_between_vals(0..=5);
|
||||||
assert_eq!(
|
assert_eq!(positions, vec![]);
|
||||||
get_positions_for_value_range_helper(&decomp, 0..=5, complete_range.clone()),
|
let positions = decomp.get_between_vals(0..=100);
|
||||||
vec![]
|
assert_eq!(positions, vec![0]);
|
||||||
);
|
let positions = decomp.get_between_vals(0..=105);
|
||||||
assert_eq!(
|
assert_eq!(positions, vec![0]);
|
||||||
get_positions_for_value_range_helper(&decomp, 0..=100, complete_range.clone()),
|
|
||||||
vec![0]
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_positions_for_value_range_helper(&decomp, 0..=105, complete_range.clone()),
|
|
||||||
vec![0]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_positions_for_value_range_helper<C: Column<T> + ?Sized, T: PartialOrd>(
|
|
||||||
column: &C,
|
|
||||||
value_range: RangeInclusive<T>,
|
|
||||||
doc_id_range: Range<u32>,
|
|
||||||
) -> Vec<u32> {
|
|
||||||
let mut positions = Vec::new();
|
|
||||||
column.get_positions_for_value_range(value_range, doc_id_range, &mut positions);
|
|
||||||
positions
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -726,33 +603,13 @@ mod tests {
|
|||||||
5_000_000_000,
|
5_000_000_000,
|
||||||
];
|
];
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
serialize_u128(|| vals.iter().cloned(), vals.len() as u32, &mut out).unwrap();
|
serialize_u128(VecColumn::from(vals), &mut out).unwrap();
|
||||||
let decomp = open_u128::<u128>(OwnedBytes::new(out)).unwrap();
|
let decomp = open_u128(OwnedBytes::new(out)).unwrap();
|
||||||
let complete_range = 0..vals.len() as u32;
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(decomp.get_between_vals(199..=200), vec![0]);
|
||||||
get_positions_for_value_range_helper(&*decomp, 199..=200, complete_range.clone()),
|
assert_eq!(decomp.get_between_vals(199..=201), vec![0, 1]);
|
||||||
vec![0]
|
assert_eq!(decomp.get_between_vals(200..=200), vec![0]);
|
||||||
);
|
assert_eq!(decomp.get_between_vals(1_000_000..=1_000_000), vec![11]);
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
get_positions_for_value_range_helper(&*decomp, 199..=201, complete_range.clone()),
|
|
||||||
vec![0, 1]
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
get_positions_for_value_range_helper(&*decomp, 200..=200, complete_range.clone()),
|
|
||||||
vec![0]
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
get_positions_for_value_range_helper(
|
|
||||||
&*decomp,
|
|
||||||
1_000_000..=1_000_000,
|
|
||||||
complete_range.clone()
|
|
||||||
),
|
|
||||||
vec![11]
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -1,12 +1,5 @@
|
|||||||
#![warn(missing_docs)]
|
|
||||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||||
|
|
||||||
//! # `fastfield_codecs`
|
|
||||||
//!
|
|
||||||
//! - Columnar storage of data for tantivy [`Column`].
|
|
||||||
//! - Encode data in different codecs.
|
|
||||||
//! - Monotonically map values to u64/u128
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate more_asserts;
|
extern crate more_asserts;
|
||||||
@@ -20,10 +13,6 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use common::BinarySerializable;
|
use common::BinarySerializable;
|
||||||
use compact_space::CompactSpaceDecompressor;
|
use compact_space::CompactSpaceDecompressor;
|
||||||
use monotonic_mapping::{
|
|
||||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
|
||||||
StrictlyMonotonicMappingToInternalBaseval, StrictlyMonotonicMappingToInternalGCDBaseval,
|
|
||||||
};
|
|
||||||
use ownedbytes::OwnedBytes;
|
use ownedbytes::OwnedBytes;
|
||||||
use serialize::Header;
|
use serialize::Header;
|
||||||
|
|
||||||
@@ -33,7 +22,6 @@ mod compact_space;
|
|||||||
mod line;
|
mod line;
|
||||||
mod linear;
|
mod linear;
|
||||||
mod monotonic_mapping;
|
mod monotonic_mapping;
|
||||||
mod monotonic_mapping_u128;
|
|
||||||
|
|
||||||
mod column;
|
mod column;
|
||||||
mod gcd;
|
mod gcd;
|
||||||
@@ -41,26 +29,18 @@ mod serialize;
|
|||||||
|
|
||||||
use self::bitpacked::BitpackedCodec;
|
use self::bitpacked::BitpackedCodec;
|
||||||
use self::blockwise_linear::BlockwiseLinearCodec;
|
use self::blockwise_linear::BlockwiseLinearCodec;
|
||||||
pub use self::column::{monotonic_map_column, Column, VecColumn};
|
pub use self::column::{monotonic_map_column, Column, ColumnReader, VecColumn};
|
||||||
use self::linear::LinearCodec;
|
use self::linear::LinearCodec;
|
||||||
pub use self::monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
pub use self::monotonic_mapping::MonotonicallyMappableToU64;
|
||||||
pub use self::monotonic_mapping_u128::MonotonicallyMappableToU128;
|
|
||||||
pub use self::serialize::{
|
pub use self::serialize::{
|
||||||
estimate, serialize, serialize_and_load, serialize_u128, NormalizedHeader,
|
estimate, serialize, serialize_and_load, serialize_u128, NormalizedHeader,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||||
#[repr(u8)]
|
#[repr(u8)]
|
||||||
/// Available codecs to use to encode the u64 (via [`MonotonicallyMappableToU64`]) converted data.
|
|
||||||
pub enum FastFieldCodecType {
|
pub enum FastFieldCodecType {
|
||||||
/// Bitpack all values in the value range. The number of bits is defined by the amplitude
|
|
||||||
/// `column.max_value() - column.min_value()`
|
|
||||||
Bitpacked = 1,
|
Bitpacked = 1,
|
||||||
/// Linear interpolation puts a line between the first and last value and then bitpacks the
|
|
||||||
/// values by the offset from the line. The number of bits is defined by the max deviation from
|
|
||||||
/// the line.
|
|
||||||
Linear = 2,
|
Linear = 2,
|
||||||
/// Same as [`FastFieldCodecType::Linear`], but encodes in blocks of 512 elements.
|
|
||||||
BlockwiseLinear = 3,
|
BlockwiseLinear = 3,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,11 +58,11 @@ impl BinarySerializable for FastFieldCodecType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FastFieldCodecType {
|
impl FastFieldCodecType {
|
||||||
pub(crate) fn to_code(self) -> u8 {
|
pub fn to_code(self) -> u8 {
|
||||||
self as u8
|
self as u8
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
pub fn from_code(code: u8) -> Option<Self> {
|
||||||
match code {
|
match code {
|
||||||
1 => Some(Self::Bitpacked),
|
1 => Some(Self::Bitpacked),
|
||||||
2 => Some(Self::Linear),
|
2 => Some(Self::Linear),
|
||||||
@@ -93,13 +73,8 @@ impl FastFieldCodecType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||||
pub fn open_u128<Item: MonotonicallyMappableToU128>(
|
pub fn open_u128(bytes: OwnedBytes) -> io::Result<Arc<dyn Column<u128>>> {
|
||||||
bytes: OwnedBytes,
|
Ok(Arc::new(CompactSpaceDecompressor::open(bytes)?))
|
||||||
) -> io::Result<Arc<dyn Column<Item>>> {
|
|
||||||
let reader = CompactSpaceDecompressor::open(bytes)?;
|
|
||||||
let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<Item>> =
|
|
||||||
StrictlyMonotonicMappingToInternal::<Item>::new().into();
|
|
||||||
Ok(Arc::new(monotonic_map_column(reader, inverted)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||||
@@ -124,15 +99,11 @@ fn open_specific_codec<C: FastFieldCodec, Item: MonotonicallyMappableToU64>(
|
|||||||
let reader = C::open_from_bytes(bytes, normalized_header)?;
|
let reader = C::open_from_bytes(bytes, normalized_header)?;
|
||||||
let min_value = header.min_value;
|
let min_value = header.min_value;
|
||||||
if let Some(gcd) = header.gcd {
|
if let Some(gcd) = header.gcd {
|
||||||
let mapping = StrictlyMonotonicMappingInverter::from(
|
let monotonic_mapping = move |val: u64| Item::from_u64(min_value + val * gcd.get());
|
||||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd.get(), min_value),
|
Ok(Arc::new(monotonic_map_column(reader, monotonic_mapping)))
|
||||||
);
|
|
||||||
Ok(Arc::new(monotonic_map_column(reader, mapping)))
|
|
||||||
} else {
|
} else {
|
||||||
let mapping = StrictlyMonotonicMappingInverter::from(
|
let monotonic_mapping = move |val: u64| Item::from_u64(min_value + val);
|
||||||
StrictlyMonotonicMappingToInternalBaseval::new(min_value),
|
Ok(Arc::new(monotonic_map_column(reader, monotonic_mapping)))
|
||||||
);
|
|
||||||
Ok(Arc::new(monotonic_map_column(reader, mapping)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,7 +123,7 @@ trait FastFieldCodec: 'static {
|
|||||||
///
|
///
|
||||||
/// The column iterator should be preferred over using column `get_val` method for
|
/// The column iterator should be preferred over using column `get_val` method for
|
||||||
/// performance reasons.
|
/// performance reasons.
|
||||||
fn serialize(column: &dyn Column, write: &mut impl Write) -> io::Result<()>;
|
fn serialize(column: &dyn Column<u64>, write: &mut impl Write) -> io::Result<()>;
|
||||||
|
|
||||||
/// Returns an estimate of the compression ratio.
|
/// Returns an estimate of the compression ratio.
|
||||||
/// If the codec is not applicable, returns `None`.
|
/// If the codec is not applicable, returns `None`.
|
||||||
@@ -161,10 +132,9 @@ trait FastFieldCodec: 'static {
|
|||||||
///
|
///
|
||||||
/// It could make sense to also return a value representing
|
/// It could make sense to also return a value representing
|
||||||
/// computational complexity.
|
/// computational complexity.
|
||||||
fn estimate(column: &dyn Column) -> Option<f32>;
|
fn estimate(column: &impl Column) -> Option<f32>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The list of all available codecs for u64 convertible data.
|
|
||||||
pub const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
|
pub const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
|
||||||
FastFieldCodecType::Bitpacked,
|
FastFieldCodecType::Bitpacked,
|
||||||
FastFieldCodecType::BlockwiseLinear,
|
FastFieldCodecType::BlockwiseLinear,
|
||||||
@@ -173,7 +143,6 @@ pub const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use proptest::prelude::*;
|
use proptest::prelude::*;
|
||||||
use proptest::strategy::Strategy;
|
use proptest::strategy::Strategy;
|
||||||
use proptest::{prop_oneof, proptest};
|
use proptest::{prop_oneof, proptest};
|
||||||
@@ -199,32 +168,15 @@ mod tests {
|
|||||||
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
|
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
|
||||||
|
|
||||||
let reader = crate::open::<u64>(OwnedBytes::new(out)).unwrap();
|
let reader = crate::open::<u64>(OwnedBytes::new(out)).unwrap();
|
||||||
assert_eq!(reader.num_vals(), data.len() as u32);
|
assert_eq!(reader.num_vals(), data.len() as u64);
|
||||||
for (doc, orig_val) in data.iter().copied().enumerate() {
|
for (doc, orig_val) in data.iter().copied().enumerate() {
|
||||||
let val = reader.get_val(doc as u32);
|
let val = reader.get_val(doc as u64);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
val, orig_val,
|
val, orig_val,
|
||||||
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data \
|
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data \
|
||||||
`{data:?}`",
|
`{data:?}`",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if !data.is_empty() {
|
|
||||||
let test_rand_idx = rand::thread_rng().gen_range(0..=data.len() - 1);
|
|
||||||
let expected_positions: Vec<u32> = data
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter(|(_, el)| **el == data[test_rand_idx])
|
|
||||||
.map(|(pos, _)| pos as u32)
|
|
||||||
.collect();
|
|
||||||
let mut positions = Vec::new();
|
|
||||||
reader.get_positions_for_value_range(
|
|
||||||
data[test_rand_idx]..=data[test_rand_idx],
|
|
||||||
0..data.len() as u32,
|
|
||||||
&mut positions,
|
|
||||||
);
|
|
||||||
assert_eq!(expected_positions, positions);
|
|
||||||
}
|
|
||||||
Some((estimation, actual_compression))
|
Some((estimation, actual_compression))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -360,7 +312,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
|
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
|
||||||
let mut data: Vec<u64> = (201..=20000_u64).collect();
|
let mut data: Vec<u64> = (200..=20000_u64).collect();
|
||||||
data.push(1_000_000);
|
data.push(1_000_000);
|
||||||
let data: VecColumn = data.as_slice().into();
|
let data: VecColumn = data.as_slice().into();
|
||||||
|
|
||||||
@@ -434,7 +386,7 @@ mod bench {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut sum = 0u64;
|
let mut sum = 0u64;
|
||||||
for pos in value_iter() {
|
for pos in value_iter() {
|
||||||
let val = col.get_val(pos as u32);
|
let val = col.get_val(pos as u64);
|
||||||
sum = sum.wrapping_add(val);
|
sum = sum.wrapping_add(val);
|
||||||
}
|
}
|
||||||
sum
|
sum
|
||||||
@@ -446,7 +398,7 @@ mod bench {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut sum = 0u64;
|
let mut sum = 0u64;
|
||||||
for pos in value_iter() {
|
for pos in value_iter() {
|
||||||
let val = col.get_val(pos as u32);
|
let val = col.get_val(pos as u64);
|
||||||
sum = sum.wrapping_add(val);
|
sum = sum.wrapping_add(val);
|
||||||
}
|
}
|
||||||
sum
|
sum
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use std::io;
|
use std::io;
|
||||||
use std::num::NonZeroU32;
|
use std::num::NonZeroU64;
|
||||||
|
|
||||||
use common::{BinarySerializable, VInt};
|
use common::{BinarySerializable, VInt};
|
||||||
|
|
||||||
@@ -29,7 +29,7 @@ pub struct Line {
|
|||||||
/// compute_slope(y0, y1)
|
/// compute_slope(y0, y1)
|
||||||
/// = compute_slope(y0 + X % 2^64, y1 + X % 2^64)
|
/// = compute_slope(y0 + X % 2^64, y1 + X % 2^64)
|
||||||
/// `
|
/// `
|
||||||
fn compute_slope(y0: u64, y1: u64, num_vals: NonZeroU32) -> u64 {
|
fn compute_slope(y0: u64, y1: u64, num_vals: NonZeroU64) -> u64 {
|
||||||
let dy = y1.wrapping_sub(y0);
|
let dy = y1.wrapping_sub(y0);
|
||||||
let sign = dy <= (1 << 63);
|
let sign = dy <= (1 << 63);
|
||||||
let abs_dy = if sign {
|
let abs_dy = if sign {
|
||||||
@@ -43,7 +43,7 @@ fn compute_slope(y0: u64, y1: u64, num_vals: NonZeroU32) -> u64 {
|
|||||||
return 0u64;
|
return 0u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
let abs_slope = (abs_dy << 32) / num_vals.get() as u64;
|
let abs_slope = (abs_dy << 32) / num_vals.get();
|
||||||
if sign {
|
if sign {
|
||||||
abs_slope
|
abs_slope
|
||||||
} else {
|
} else {
|
||||||
@@ -62,43 +62,30 @@ fn compute_slope(y0: u64, y1: u64, num_vals: NonZeroU32) -> u64 {
|
|||||||
|
|
||||||
impl Line {
|
impl Line {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn eval(&self, x: u32) -> u64 {
|
pub fn eval(&self, x: u64) -> u64 {
|
||||||
let linear_part = ((x as u64).wrapping_mul(self.slope) >> 32) as i32 as u64;
|
let linear_part = (x.wrapping_mul(self.slope) >> 32) as i32 as u64;
|
||||||
self.intercept.wrapping_add(linear_part)
|
self.intercept.wrapping_add(linear_part)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Same as train, but the intercept is only estimated from provided sample positions
|
// Same as train, but the intercept is only estimated from provided sample positions
|
||||||
pub fn estimate(sample_positions_and_values: &[(u64, u64)]) -> Self {
|
pub fn estimate(ys: &dyn Column, sample_positions: &[u64]) -> Self {
|
||||||
let first_val = sample_positions_and_values[0].1;
|
Self::train_from(ys, sample_positions.iter().cloned())
|
||||||
let last_val = sample_positions_and_values[sample_positions_and_values.len() - 1].1;
|
|
||||||
let num_vals = sample_positions_and_values[sample_positions_and_values.len() - 1].0 + 1;
|
|
||||||
Self::train_from(
|
|
||||||
first_val,
|
|
||||||
last_val,
|
|
||||||
num_vals as u32,
|
|
||||||
sample_positions_and_values.iter().cloned(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Intercept is only computed from provided positions
|
// Intercept is only computed from provided positions
|
||||||
fn train_from(
|
fn train_from(ys: &dyn Column, positions: impl Iterator<Item = u64>) -> Self {
|
||||||
first_val: u64,
|
let last_idx = if let Some(last_idx) = NonZeroU64::new(ys.num_vals() - 1) {
|
||||||
last_val: u64,
|
last_idx
|
||||||
num_vals: u32,
|
|
||||||
positions_and_values: impl Iterator<Item = (u64, u64)>,
|
|
||||||
) -> Self {
|
|
||||||
// TODO replace with let else
|
|
||||||
let idx_last_val = if let Some(idx_last_val) = NonZeroU32::new(num_vals - 1) {
|
|
||||||
idx_last_val
|
|
||||||
} else {
|
} else {
|
||||||
return Line::default();
|
return Line::default();
|
||||||
};
|
};
|
||||||
|
|
||||||
let y0 = first_val;
|
let mut ys_reader = ys.reader();
|
||||||
let y1 = last_val;
|
let y0 = ys_reader.seek(0);
|
||||||
|
let y1 = ys_reader.seek(last_idx.get());
|
||||||
|
|
||||||
// We first independently pick our slope.
|
// We first independently pick our slope.
|
||||||
let slope = compute_slope(y0, y1, idx_last_val);
|
let slope = compute_slope(y0, y1, last_idx);
|
||||||
|
|
||||||
// We picked our slope. Note that it does not have to be perfect.
|
// We picked our slope. Note that it does not have to be perfect.
|
||||||
// Now we need to compute the best intercept.
|
// Now we need to compute the best intercept.
|
||||||
@@ -128,8 +115,12 @@ impl Line {
|
|||||||
intercept: 0,
|
intercept: 0,
|
||||||
};
|
};
|
||||||
let heuristic_shift = y0.wrapping_sub(MID_POINT);
|
let heuristic_shift = y0.wrapping_sub(MID_POINT);
|
||||||
line.intercept = positions_and_values
|
let mut ys_reader = ys.reader();
|
||||||
.map(|(pos, y)| y.wrapping_sub(line.eval(pos as u32)))
|
line.intercept = positions
|
||||||
|
.map(|pos| {
|
||||||
|
let y = ys_reader.seek(pos);
|
||||||
|
y.wrapping_sub(line.eval(pos))
|
||||||
|
})
|
||||||
.min_by_key(|&val| val.wrapping_sub(heuristic_shift))
|
.min_by_key(|&val| val.wrapping_sub(heuristic_shift))
|
||||||
.unwrap_or(0u64); //< Never happens.
|
.unwrap_or(0u64); //< Never happens.
|
||||||
line
|
line
|
||||||
@@ -146,14 +137,7 @@ impl Line {
|
|||||||
/// This function is only invariable by translation if all of the
|
/// This function is only invariable by translation if all of the
|
||||||
/// `ys` are packaged into half of the space. (See heuristic below)
|
/// `ys` are packaged into half of the space. (See heuristic below)
|
||||||
pub fn train(ys: &dyn Column) -> Self {
|
pub fn train(ys: &dyn Column) -> Self {
|
||||||
let first_val = ys.iter().next().unwrap();
|
Self::train_from(ys, 0..ys.num_vals())
|
||||||
let last_val = ys.iter().nth(ys.num_vals() as usize - 1).unwrap();
|
|
||||||
Self::train_from(
|
|
||||||
first_val,
|
|
||||||
last_val,
|
|
||||||
ys.num_vals(),
|
|
||||||
ys.iter().enumerate().map(|(pos, val)| (pos as u64, val)),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -199,7 +183,7 @@ mod tests {
|
|||||||
let line = Line::train(&VecColumn::from(&ys));
|
let line = Line::train(&VecColumn::from(&ys));
|
||||||
ys.iter()
|
ys.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(x, y)| y.wrapping_sub(line.eval(x as u32)))
|
.map(|(x, y)| y.wrapping_sub(line.eval(x as u64)))
|
||||||
.max()
|
.max()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ pub struct LinearReader {
|
|||||||
|
|
||||||
impl Column for LinearReader {
|
impl Column for LinearReader {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_val(&self, doc: u32) -> u64 {
|
fn get_val(&self, doc: u64) -> u64 {
|
||||||
let interpoled_val: u64 = self.linear_params.line.eval(doc);
|
let interpoled_val: u64 = self.linear_params.line.eval(doc);
|
||||||
let bitpacked_diff = self.linear_params.bit_unpacker.get(doc, &self.data);
|
let bitpacked_diff = self.linear_params.bit_unpacker.get(doc, &self.data);
|
||||||
interpoled_val.wrapping_add(bitpacked_diff)
|
interpoled_val.wrapping_add(bitpacked_diff)
|
||||||
@@ -37,7 +37,7 @@ impl Column for LinearReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u64 {
|
||||||
self.header.num_vals
|
self.header.num_vals
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -93,7 +93,7 @@ impl FastFieldCodec for LinearCodec {
|
|||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(pos, actual_value)| {
|
.map(|(pos, actual_value)| {
|
||||||
let calculated_value = line.eval(pos as u32);
|
let calculated_value = line.eval(pos as u64);
|
||||||
actual_value.wrapping_sub(calculated_value)
|
actual_value.wrapping_sub(calculated_value)
|
||||||
})
|
})
|
||||||
.max()
|
.max()
|
||||||
@@ -108,7 +108,7 @@ impl FastFieldCodec for LinearCodec {
|
|||||||
|
|
||||||
let mut bit_packer = BitPacker::new();
|
let mut bit_packer = BitPacker::new();
|
||||||
for (pos, actual_value) in column.iter().enumerate() {
|
for (pos, actual_value) in column.iter().enumerate() {
|
||||||
let calculated_value = line.eval(pos as u32);
|
let calculated_value = line.eval(pos as u64);
|
||||||
let offset = actual_value.wrapping_sub(calculated_value);
|
let offset = actual_value.wrapping_sub(calculated_value);
|
||||||
bit_packer.write(offset, num_bits, write)?;
|
bit_packer.write(offset, num_bits, write)?;
|
||||||
}
|
}
|
||||||
@@ -121,26 +121,25 @@ impl FastFieldCodec for LinearCodec {
|
|||||||
/// where the local maxima for the deviation of the calculated value are and
|
/// where the local maxima for the deviation of the calculated value are and
|
||||||
/// the offset to shift all values to >=0 is also unknown.
|
/// the offset to shift all values to >=0 is also unknown.
|
||||||
#[allow(clippy::question_mark)]
|
#[allow(clippy::question_mark)]
|
||||||
fn estimate(column: &dyn Column) -> Option<f32> {
|
fn estimate(column: &impl Column) -> Option<f32> {
|
||||||
if column.num_vals() < 3 {
|
if column.num_vals() < 3 {
|
||||||
return None; // disable compressor for this case
|
return None; // disable compressor for this case
|
||||||
}
|
}
|
||||||
|
|
||||||
let limit_num_vals = column.num_vals().min(100_000);
|
// let's sample at 0%, 5%, 10% .. 95%, 100%
|
||||||
|
let num_vals = column.num_vals() as f32 / 100.0;
|
||||||
|
let sample_positions = (0..20)
|
||||||
|
.map(|pos| (num_vals * pos as f32 * 5.0) as u64)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let num_samples = 100;
|
let line = Line::estimate(column, &sample_positions);
|
||||||
let step_size = (limit_num_vals / num_samples).max(1); // 20 samples
|
|
||||||
let mut sample_positions_and_values: Vec<_> = Vec::new();
|
|
||||||
for (pos, val) in column.iter().enumerate().step_by(step_size as usize) {
|
|
||||||
sample_positions_and_values.push((pos as u64, val));
|
|
||||||
}
|
|
||||||
|
|
||||||
let line = Line::estimate(&sample_positions_and_values);
|
let mut column_reader = column.reader();
|
||||||
|
let estimated_bit_width = sample_positions
|
||||||
let estimated_bit_width = sample_positions_and_values
|
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(pos, actual_value)| {
|
.map(|pos| {
|
||||||
let interpolated_val = line.eval(pos as u32);
|
let actual_value = column_reader.seek(pos);
|
||||||
|
let interpolated_val = line.eval(pos as u64);
|
||||||
actual_value.wrapping_sub(interpolated_val)
|
actual_value.wrapping_sub(interpolated_val)
|
||||||
})
|
})
|
||||||
.map(|diff| ((diff as f32 * 1.5) * 2.0) as u64)
|
.map(|diff| ((diff as f32 * 1.5) * 2.0) as u64)
|
||||||
@@ -148,7 +147,6 @@ impl FastFieldCodec for LinearCodec {
|
|||||||
.max()
|
.max()
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
|
|
||||||
// Extrapolate to whole column
|
|
||||||
let num_bits = (estimated_bit_width as u64 * column.num_vals() as u64) + 64;
|
let num_bits = (estimated_bit_width as u64 * column.num_vals() as u64) + 64;
|
||||||
let num_bits_uncompressed = 64 * column.num_vals();
|
let num_bits_uncompressed = 64 * column.num_vals();
|
||||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ fn bench_ip() {
|
|||||||
{
|
{
|
||||||
let mut data = vec![];
|
let mut data = vec![];
|
||||||
for dataset in dataset.chunks(500_000) {
|
for dataset in dataset.chunks(500_000) {
|
||||||
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
|
serialize_u128(VecColumn::from(dataset), &mut data).unwrap();
|
||||||
}
|
}
|
||||||
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||||
println!("Compression 50_000 chunks {:.4}", compression);
|
println!("Compression 50_000 chunks {:.4}", compression);
|
||||||
@@ -101,10 +101,7 @@ fn bench_ip() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut data = vec![];
|
let mut data = vec![];
|
||||||
{
|
serialize_u128(VecColumn::from(&dataset), &mut data).unwrap();
|
||||||
print_time!("creation");
|
|
||||||
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||||
println!("Compression {:.2}", compression);
|
println!("Compression {:.2}", compression);
|
||||||
@@ -113,17 +110,11 @@ fn bench_ip() {
|
|||||||
(data.len() * 8) as f32 / dataset.len() as f32
|
(data.len() * 8) as f32 / dataset.len() as f32
|
||||||
);
|
);
|
||||||
|
|
||||||
let decompressor = open_u128::<u128>(OwnedBytes::new(data)).unwrap();
|
let decompressor = open_u128(OwnedBytes::new(data)).unwrap();
|
||||||
// Sample some ranges
|
// Sample some ranges
|
||||||
let mut doc_values = Vec::new();
|
|
||||||
for value in dataset.iter().take(1110).skip(1100).cloned() {
|
for value in dataset.iter().take(1110).skip(1100).cloned() {
|
||||||
doc_values.clear();
|
|
||||||
print_time!("get range");
|
print_time!("get range");
|
||||||
decompressor.get_positions_for_value_range(
|
let doc_values = decompressor.get_between_vals(value..=value);
|
||||||
value..=value,
|
|
||||||
0..decompressor.num_vals(),
|
|
||||||
&mut doc_values,
|
|
||||||
);
|
|
||||||
println!("{:?}", doc_values.len());
|
println!("{:?}", doc_values.len());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,3 @@
|
|||||||
use std::marker::PhantomData;
|
|
||||||
|
|
||||||
use fastdivide::DividerU64;
|
|
||||||
|
|
||||||
use crate::MonotonicallyMappableToU128;
|
|
||||||
|
|
||||||
/// Monotonic maps a value to u64 value space.
|
|
||||||
/// Monotonic mapping enables `PartialOrd` on u64 space without conversion to original space.
|
|
||||||
pub trait MonotonicallyMappableToU64: 'static + PartialOrd + Copy + Send + Sync {
|
pub trait MonotonicallyMappableToU64: 'static + PartialOrd + Copy + Send + Sync {
|
||||||
/// Converts a value to u64.
|
/// Converts a value to u64.
|
||||||
///
|
///
|
||||||
@@ -19,145 +11,6 @@ pub trait MonotonicallyMappableToU64: 'static + PartialOrd + Copy + Send + Sync
|
|||||||
fn from_u64(val: u64) -> Self;
|
fn from_u64(val: u64) -> Self;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Values need to be strictly monotonic mapped to a `Internal` value (u64 or u128) that can be
|
|
||||||
/// used in fast field codecs.
|
|
||||||
///
|
|
||||||
/// The monotonic mapping is required so that `PartialOrd` can be used on `Internal` without
|
|
||||||
/// converting to `External`.
|
|
||||||
///
|
|
||||||
/// All strictly monotonic functions are invertible because they are guaranteed to have a one-to-one
|
|
||||||
/// mapping from their range to their domain. The `inverse` method is required when opening a codec,
|
|
||||||
/// so a value can be converted back to its original domain (e.g. ip address or f64) from its
|
|
||||||
/// internal representation.
|
|
||||||
pub trait StrictlyMonotonicFn<External, Internal> {
|
|
||||||
/// Strictly monotonically maps the value from External to Internal.
|
|
||||||
fn mapping(&self, inp: External) -> Internal;
|
|
||||||
/// Inverse of `mapping`. Maps the value from Internal to External.
|
|
||||||
fn inverse(&self, out: Internal) -> External;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Inverts a strictly monotonic mapping from `StrictlyMonotonicFn<A, B>` to
|
|
||||||
/// `StrictlyMonotonicFn<B, A>`.
|
|
||||||
///
|
|
||||||
/// # Warning
|
|
||||||
///
|
|
||||||
/// This type comes with a footgun. A type being strictly monotonic does not impose that the inverse
|
|
||||||
/// mapping is strictly monotonic over the entire space External. e.g. a -> a * 2. Use at your own
|
|
||||||
/// risks.
|
|
||||||
pub(crate) struct StrictlyMonotonicMappingInverter<T> {
|
|
||||||
orig_mapping: T,
|
|
||||||
}
|
|
||||||
impl<T> From<T> for StrictlyMonotonicMappingInverter<T> {
|
|
||||||
fn from(orig_mapping: T) -> Self {
|
|
||||||
Self { orig_mapping }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<From, To, T> StrictlyMonotonicFn<To, From> for StrictlyMonotonicMappingInverter<T>
|
|
||||||
where T: StrictlyMonotonicFn<From, To>
|
|
||||||
{
|
|
||||||
fn mapping(&self, val: To) -> From {
|
|
||||||
self.orig_mapping.inverse(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inverse(&self, val: From) -> To {
|
|
||||||
self.orig_mapping.mapping(val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Applies the strictly monotonic mapping from `T` without any additional changes.
|
|
||||||
pub(crate) struct StrictlyMonotonicMappingToInternal<T> {
|
|
||||||
_phantom: PhantomData<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> StrictlyMonotonicMappingToInternal<T> {
|
|
||||||
pub(crate) fn new() -> StrictlyMonotonicMappingToInternal<T> {
|
|
||||||
Self {
|
|
||||||
_phantom: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<External: MonotonicallyMappableToU128, T: MonotonicallyMappableToU128>
|
|
||||||
StrictlyMonotonicFn<External, u128> for StrictlyMonotonicMappingToInternal<T>
|
|
||||||
where T: MonotonicallyMappableToU128
|
|
||||||
{
|
|
||||||
fn mapping(&self, inp: External) -> u128 {
|
|
||||||
External::to_u128(inp)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inverse(&self, out: u128) -> External {
|
|
||||||
External::from_u128(out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<External: MonotonicallyMappableToU64, T: MonotonicallyMappableToU64>
|
|
||||||
StrictlyMonotonicFn<External, u64> for StrictlyMonotonicMappingToInternal<T>
|
|
||||||
where T: MonotonicallyMappableToU64
|
|
||||||
{
|
|
||||||
fn mapping(&self, inp: External) -> u64 {
|
|
||||||
External::to_u64(inp)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inverse(&self, out: u64) -> External {
|
|
||||||
External::from_u64(out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Mapping dividing by gcd and a base value.
|
|
||||||
///
|
|
||||||
/// The function is assumed to be only called on values divided by passed
|
|
||||||
/// gcd value. (It is necessary for the function to be monotonic.)
|
|
||||||
pub(crate) struct StrictlyMonotonicMappingToInternalGCDBaseval {
|
|
||||||
gcd_divider: DividerU64,
|
|
||||||
gcd: u64,
|
|
||||||
min_value: u64,
|
|
||||||
}
|
|
||||||
impl StrictlyMonotonicMappingToInternalGCDBaseval {
|
|
||||||
pub(crate) fn new(gcd: u64, min_value: u64) -> Self {
|
|
||||||
let gcd_divider = DividerU64::divide_by(gcd);
|
|
||||||
Self {
|
|
||||||
gcd_divider,
|
|
||||||
gcd,
|
|
||||||
min_value,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
|
||||||
for StrictlyMonotonicMappingToInternalGCDBaseval
|
|
||||||
{
|
|
||||||
fn mapping(&self, inp: External) -> u64 {
|
|
||||||
self.gcd_divider
|
|
||||||
.divide(External::to_u64(inp) - self.min_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inverse(&self, out: u64) -> External {
|
|
||||||
External::from_u64(self.min_value + out * self.gcd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Strictly monotonic mapping with a base value.
|
|
||||||
pub(crate) struct StrictlyMonotonicMappingToInternalBaseval {
|
|
||||||
min_value: u64,
|
|
||||||
}
|
|
||||||
impl StrictlyMonotonicMappingToInternalBaseval {
|
|
||||||
pub(crate) fn new(min_value: u64) -> Self {
|
|
||||||
Self { min_value }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
|
||||||
for StrictlyMonotonicMappingToInternalBaseval
|
|
||||||
{
|
|
||||||
fn mapping(&self, val: External) -> u64 {
|
|
||||||
External::to_u64(val) - self.min_value
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inverse(&self, val: u64) -> External {
|
|
||||||
External::from_u64(self.min_value + val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MonotonicallyMappableToU64 for u64 {
|
impl MonotonicallyMappableToU64 for u64 {
|
||||||
fn to_u64(self) -> u64 {
|
fn to_u64(self) -> u64 {
|
||||||
self
|
self
|
||||||
@@ -183,7 +36,11 @@ impl MonotonicallyMappableToU64 for i64 {
|
|||||||
impl MonotonicallyMappableToU64 for bool {
|
impl MonotonicallyMappableToU64 for bool {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn to_u64(self) -> u64 {
|
fn to_u64(self) -> u64 {
|
||||||
u64::from(self)
|
if self {
|
||||||
|
1
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
@@ -201,33 +58,3 @@ impl MonotonicallyMappableToU64 for f64 {
|
|||||||
common::u64_to_f64(val)
|
common::u64_to_f64(val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn strictly_monotonic_test() {
|
|
||||||
// identity mapping
|
|
||||||
test_round_trip(&StrictlyMonotonicMappingToInternal::<u64>::new(), 100u64);
|
|
||||||
// round trip to i64
|
|
||||||
test_round_trip(&StrictlyMonotonicMappingToInternal::<i64>::new(), 100u64);
|
|
||||||
// identity mapping
|
|
||||||
test_round_trip(&StrictlyMonotonicMappingToInternal::<u128>::new(), 100u128);
|
|
||||||
|
|
||||||
// base value to i64 round trip
|
|
||||||
let mapping = StrictlyMonotonicMappingToInternalBaseval::new(100);
|
|
||||||
test_round_trip::<_, _, u64>(&mapping, 100i64);
|
|
||||||
// base value and gcd to u64 round trip
|
|
||||||
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100);
|
|
||||||
test_round_trip::<_, _, u64>(&mapping, 100u64);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_round_trip<T: StrictlyMonotonicFn<K, L>, K: std::fmt::Debug + Eq + Copy, L>(
|
|
||||||
mapping: &T,
|
|
||||||
test_val: K,
|
|
||||||
) {
|
|
||||||
assert_eq!(mapping.inverse(mapping.mapping(test_val)), test_val);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
use std::net::Ipv6Addr;
|
|
||||||
|
|
||||||
/// Montonic maps a value to u128 value space
|
|
||||||
/// Monotonic mapping enables `PartialOrd` on u128 space without conversion to original space.
|
|
||||||
pub trait MonotonicallyMappableToU128: 'static + PartialOrd + Copy + Send + Sync {
|
|
||||||
/// Converts a value to u128.
|
|
||||||
///
|
|
||||||
/// Internally all fast field values are encoded as u64.
|
|
||||||
fn to_u128(self) -> u128;
|
|
||||||
|
|
||||||
/// Converts a value from u128
|
|
||||||
///
|
|
||||||
/// Internally all fast field values are encoded as u64.
|
|
||||||
/// **Note: To be used for converting encoded Term, Posting values.**
|
|
||||||
fn from_u128(val: u128) -> Self;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MonotonicallyMappableToU128 for u128 {
|
|
||||||
fn to_u128(self) -> u128 {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_u128(val: u128) -> Self {
|
|
||||||
val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MonotonicallyMappableToU128 for Ipv6Addr {
|
|
||||||
fn to_u128(self) -> u128 {
|
|
||||||
ip_to_u128(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_u128(val: u128) -> Self {
|
|
||||||
Ipv6Addr::from(val.to_be_bytes())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ip_to_u128(ip_addr: Ipv6Addr) -> u128 {
|
|
||||||
u128::from_be_bytes(ip_addr.octets())
|
|
||||||
}
|
|
||||||
@@ -22,6 +22,7 @@ use std::num::NonZeroU64;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common::{BinarySerializable, VInt};
|
use common::{BinarySerializable, VInt};
|
||||||
|
use fastdivide::DividerU64;
|
||||||
use log::warn;
|
use log::warn;
|
||||||
use ownedbytes::OwnedBytes;
|
use ownedbytes::OwnedBytes;
|
||||||
|
|
||||||
@@ -29,10 +30,6 @@ use crate::bitpacked::BitpackedCodec;
|
|||||||
use crate::blockwise_linear::BlockwiseLinearCodec;
|
use crate::blockwise_linear::BlockwiseLinearCodec;
|
||||||
use crate::compact_space::CompactSpaceCompressor;
|
use crate::compact_space::CompactSpaceCompressor;
|
||||||
use crate::linear::LinearCodec;
|
use crate::linear::LinearCodec;
|
||||||
use crate::monotonic_mapping::{
|
|
||||||
StrictlyMonotonicFn, StrictlyMonotonicMappingToInternal,
|
|
||||||
StrictlyMonotonicMappingToInternalGCDBaseval,
|
|
||||||
};
|
|
||||||
use crate::{
|
use crate::{
|
||||||
monotonic_map_column, Column, FastFieldCodec, FastFieldCodecType, MonotonicallyMappableToU64,
|
monotonic_map_column, Column, FastFieldCodec, FastFieldCodecType, MonotonicallyMappableToU64,
|
||||||
VecColumn, ALL_CODEC_TYPES,
|
VecColumn, ALL_CODEC_TYPES,
|
||||||
@@ -40,20 +37,18 @@ use crate::{
|
|||||||
|
|
||||||
/// The normalized header gives some parameters after applying the following
|
/// The normalized header gives some parameters after applying the following
|
||||||
/// normalization of the vector:
|
/// normalization of the vector:
|
||||||
/// `val -> (val - min_value) / gcd`
|
/// val -> (val - min_value) / gcd
|
||||||
///
|
///
|
||||||
/// By design, after normalization, `min_value = 0` and `gcd = 1`.
|
/// By design, after normalization, `min_value = 0` and `gcd = 1`.
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct NormalizedHeader {
|
pub struct NormalizedHeader {
|
||||||
/// The number of values in the underlying column.
|
pub num_vals: u64,
|
||||||
pub num_vals: u32,
|
|
||||||
/// The max value of the underlying column.
|
|
||||||
pub max_value: u64,
|
pub max_value: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub(crate) struct Header {
|
pub(crate) struct Header {
|
||||||
pub num_vals: u32,
|
pub num_vals: u64,
|
||||||
pub min_value: u64,
|
pub min_value: u64,
|
||||||
pub max_value: u64,
|
pub max_value: u64,
|
||||||
pub gcd: Option<NonZeroU64>,
|
pub gcd: Option<NonZeroU64>,
|
||||||
@@ -62,11 +57,8 @@ pub(crate) struct Header {
|
|||||||
|
|
||||||
impl Header {
|
impl Header {
|
||||||
pub fn normalized(self) -> NormalizedHeader {
|
pub fn normalized(self) -> NormalizedHeader {
|
||||||
let gcd = self.gcd.map(|gcd| gcd.get()).unwrap_or(1);
|
let max_value =
|
||||||
let gcd_min_val_mapping =
|
(self.max_value - self.min_value) / self.gcd.map(|gcd| gcd.get()).unwrap_or(1);
|
||||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd, self.min_value);
|
|
||||||
|
|
||||||
let max_value = gcd_min_val_mapping.mapping(self.max_value);
|
|
||||||
NormalizedHeader {
|
NormalizedHeader {
|
||||||
num_vals: self.num_vals,
|
num_vals: self.num_vals,
|
||||||
max_value,
|
max_value,
|
||||||
@@ -74,7 +66,10 @@ impl Header {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn normalize_column<C: Column>(&self, from_column: C) -> impl Column {
|
pub fn normalize_column<C: Column>(&self, from_column: C) -> impl Column {
|
||||||
normalize_column(from_column, self.min_value, self.gcd)
|
let min_value = self.min_value;
|
||||||
|
let gcd = self.gcd.map(|gcd| gcd.get()).unwrap_or(1);
|
||||||
|
let divider = DividerU64::divide_by(gcd);
|
||||||
|
monotonic_map_column(from_column, move |val| divider.divide(val - min_value))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn compute_header(
|
pub fn compute_header(
|
||||||
@@ -86,8 +81,9 @@ impl Header {
|
|||||||
let max_value = column.max_value();
|
let max_value = column.max_value();
|
||||||
let gcd = crate::gcd::find_gcd(column.iter().map(|val| val - min_value))
|
let gcd = crate::gcd::find_gcd(column.iter().map(|val| val - min_value))
|
||||||
.filter(|gcd| gcd.get() > 1u64);
|
.filter(|gcd| gcd.get() > 1u64);
|
||||||
let normalized_column = normalize_column(column, min_value, gcd);
|
let divider = DividerU64::divide_by(gcd.map(|gcd| gcd.get()).unwrap_or(1u64));
|
||||||
let codec_type = detect_codec(normalized_column, codecs)?;
|
let shifted_column = monotonic_map_column(&column, |val| divider.divide(val - min_value));
|
||||||
|
let codec_type = detect_codec(shifted_column, codecs)?;
|
||||||
Some(Header {
|
Some(Header {
|
||||||
num_vals,
|
num_vals,
|
||||||
min_value,
|
min_value,
|
||||||
@@ -98,19 +94,9 @@ impl Header {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn normalize_column<C: Column>(
|
|
||||||
from_column: C,
|
|
||||||
min_value: u64,
|
|
||||||
gcd: Option<NonZeroU64>,
|
|
||||||
) -> impl Column {
|
|
||||||
let gcd = gcd.map(|gcd| gcd.get()).unwrap_or(1);
|
|
||||||
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd, min_value);
|
|
||||||
monotonic_map_column(from_column, mapping)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BinarySerializable for Header {
|
impl BinarySerializable for Header {
|
||||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
VInt(self.num_vals as u64).serialize(writer)?;
|
VInt(self.num_vals).serialize(writer)?;
|
||||||
VInt(self.min_value).serialize(writer)?;
|
VInt(self.min_value).serialize(writer)?;
|
||||||
VInt(self.max_value - self.min_value).serialize(writer)?;
|
VInt(self.max_value - self.min_value).serialize(writer)?;
|
||||||
if let Some(gcd) = self.gcd {
|
if let Some(gcd) = self.gcd {
|
||||||
@@ -123,7 +109,7 @@ impl BinarySerializable for Header {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
let num_vals = VInt::deserialize(reader)?.0 as u32;
|
let num_vals = VInt::deserialize(reader)?.0;
|
||||||
let min_value = VInt::deserialize(reader)?.0;
|
let min_value = VInt::deserialize(reader)?.0;
|
||||||
let amplitude = VInt::deserialize(reader)?.0;
|
let amplitude = VInt::deserialize(reader)?.0;
|
||||||
let max_value = min_value + amplitude;
|
let max_value = min_value + amplitude;
|
||||||
@@ -139,21 +125,16 @@ impl BinarySerializable for Header {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return estimated compression for given codec in the value range [0.0..1.0], where 1.0 means no
|
|
||||||
/// compression.
|
|
||||||
pub fn estimate<T: MonotonicallyMappableToU64>(
|
pub fn estimate<T: MonotonicallyMappableToU64>(
|
||||||
typed_column: impl Column<T>,
|
typed_column: impl Column<T>,
|
||||||
codec_type: FastFieldCodecType,
|
codec_type: FastFieldCodecType,
|
||||||
) -> Option<f32> {
|
) -> Option<f32> {
|
||||||
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
|
let column = monotonic_map_column(typed_column, T::to_u64);
|
||||||
let min_value = column.min_value();
|
let min_value = column.min_value();
|
||||||
let gcd = crate::gcd::find_gcd(column.iter().map(|val| val - min_value))
|
let gcd = crate::gcd::find_gcd(column.iter().map(|val| val - min_value))
|
||||||
.filter(|gcd| gcd.get() > 1u64);
|
.filter(|gcd| gcd.get() > 1u64);
|
||||||
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(
|
let divider = DividerU64::divide_by(gcd.map(|gcd| gcd.get()).unwrap_or(1u64));
|
||||||
gcd.map(|gcd| gcd.get()).unwrap_or(1u64),
|
let normalized_column = monotonic_map_column(&column, |val| divider.divide(val - min_value));
|
||||||
min_value,
|
|
||||||
);
|
|
||||||
let normalized_column = monotonic_map_column(&column, mapping);
|
|
||||||
match codec_type {
|
match codec_type {
|
||||||
FastFieldCodecType::Bitpacked => BitpackedCodec::estimate(&normalized_column),
|
FastFieldCodecType::Bitpacked => BitpackedCodec::estimate(&normalized_column),
|
||||||
FastFieldCodecType::Linear => LinearCodec::estimate(&normalized_column),
|
FastFieldCodecType::Linear => LinearCodec::estimate(&normalized_column),
|
||||||
@@ -161,26 +142,25 @@ pub fn estimate<T: MonotonicallyMappableToU64>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Serializes u128 values with the compact space codec.
|
pub fn serialize_u128(
|
||||||
pub fn serialize_u128<F: Fn() -> I, I: Iterator<Item = u128>>(
|
typed_column: impl Column<u128>,
|
||||||
iter_gen: F,
|
|
||||||
num_vals: u32,
|
|
||||||
output: &mut impl io::Write,
|
output: &mut impl io::Write,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
// TODO write header, to later support more codecs
|
// TODO write header, to later support more codecs
|
||||||
let compressor = CompactSpaceCompressor::train_from(iter_gen(), num_vals);
|
let compressor = CompactSpaceCompressor::train_from(&typed_column);
|
||||||
compressor.compress_into(iter_gen(), output).unwrap();
|
compressor
|
||||||
|
.compress_into(typed_column.iter(), output)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Serializes the column with the codec with the best estimate on the data.
|
|
||||||
pub fn serialize<T: MonotonicallyMappableToU64>(
|
pub fn serialize<T: MonotonicallyMappableToU64>(
|
||||||
typed_column: impl Column<T>,
|
typed_column: impl Column<T>,
|
||||||
output: &mut impl io::Write,
|
output: &mut impl io::Write,
|
||||||
codecs: &[FastFieldCodecType],
|
codecs: &[FastFieldCodecType],
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
|
let column = monotonic_map_column(typed_column, T::to_u64);
|
||||||
let header = Header::compute_header(&column, codecs).ok_or_else(|| {
|
let header = Header::compute_header(&column, codecs).ok_or_else(|| {
|
||||||
io::Error::new(
|
io::Error::new(
|
||||||
io::ErrorKind::InvalidInput,
|
io::ErrorKind::InvalidInput,
|
||||||
@@ -245,7 +225,6 @@ fn serialize_given_codec(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Helper function to serialize a column (autodetect from all codecs) and then open it
|
|
||||||
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
|
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
|
||||||
column: &[T],
|
column: &[T],
|
||||||
) -> Arc<dyn Column<T>> {
|
) -> Arc<dyn Column<T>> {
|
||||||
|
|||||||
@@ -62,20 +62,6 @@ fn word<'a>() -> impl Parser<&'a str, Output = String> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// word variant that allows more characters, e.g. for range queries that don't allow field
|
|
||||||
// specifier
|
|
||||||
fn relaxed_word<'a>() -> impl Parser<&'a str, Output = String> {
|
|
||||||
(
|
|
||||||
satisfy(|c: char| {
|
|
||||||
!c.is_whitespace() && !['`', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
|
||||||
}),
|
|
||||||
many(satisfy(|c: char| {
|
|
||||||
!c.is_whitespace() && !['{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
|
||||||
})),
|
|
||||||
)
|
|
||||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parses a date time according to rfc3339
|
/// Parses a date time according to rfc3339
|
||||||
/// 2015-08-02T18:54:42+02
|
/// 2015-08-02T18:54:42+02
|
||||||
/// 2021-04-13T19:46:26.266051969+00:00
|
/// 2021-04-13T19:46:26.266051969+00:00
|
||||||
@@ -195,8 +181,8 @@ fn spaces1<'a>() -> impl Parser<&'a str, Output = ()> {
|
|||||||
fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||||
let range_term_val = || {
|
let range_term_val = || {
|
||||||
attempt(date_time())
|
attempt(date_time())
|
||||||
|
.or(word())
|
||||||
.or(negative_number())
|
.or(negative_number())
|
||||||
.or(relaxed_word())
|
|
||||||
.or(char('*').with(value("*".to_string())))
|
.or(char('*').with(value("*".to_string())))
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -663,34 +649,6 @@ mod test {
|
|||||||
.expect("Cannot parse date range")
|
.expect("Cannot parse date range")
|
||||||
.0;
|
.0;
|
||||||
assert_eq!(res6, expected_flexible_dates);
|
assert_eq!(res6, expected_flexible_dates);
|
||||||
// IP Range Unbounded
|
|
||||||
let expected_weight = UserInputLeaf::Range {
|
|
||||||
field: Some("ip".to_string()),
|
|
||||||
lower: UserInputBound::Inclusive("::1".to_string()),
|
|
||||||
upper: UserInputBound::Unbounded,
|
|
||||||
};
|
|
||||||
let res1 = range()
|
|
||||||
.parse("ip: >=::1")
|
|
||||||
.expect("Cannot parse ip v6 format")
|
|
||||||
.0;
|
|
||||||
let res2 = range()
|
|
||||||
.parse("ip:[::1 TO *}")
|
|
||||||
.expect("Cannot parse ip v6 format")
|
|
||||||
.0;
|
|
||||||
assert_eq!(res1, expected_weight);
|
|
||||||
assert_eq!(res2, expected_weight);
|
|
||||||
|
|
||||||
// IP Range Bounded
|
|
||||||
let expected_weight = UserInputLeaf::Range {
|
|
||||||
field: Some("ip".to_string()),
|
|
||||||
lower: UserInputBound::Inclusive("::0.0.0.50".to_string()),
|
|
||||||
upper: UserInputBound::Exclusive("::0.0.0.52".to_string()),
|
|
||||||
};
|
|
||||||
let res1 = range()
|
|
||||||
.parse("ip:[::0.0.0.50 TO ::0.0.0.52}")
|
|
||||||
.expect("Cannot parse ip v6 format")
|
|
||||||
.0;
|
|
||||||
assert_eq!(res1, expected_weight);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use rustc_hash::FxHashMap;
|
use fnv::FnvHashMap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::agg_req::BucketAggregationInternal;
|
use super::agg_req::BucketAggregationInternal;
|
||||||
@@ -145,7 +145,7 @@ pub enum BucketEntries<T> {
|
|||||||
/// Vector format bucket entries
|
/// Vector format bucket entries
|
||||||
Vec(Vec<T>),
|
Vec(Vec<T>),
|
||||||
/// HashMap format bucket entries
|
/// HashMap format bucket entries
|
||||||
HashMap(FxHashMap<String, T>),
|
HashMap(FnvHashMap<String, T>),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This is the default entry for a bucket, which contains a key, count, and optionally
|
/// This is the default entry for a bucket, which contains a key, count, and optionally
|
||||||
|
|||||||
@@ -331,10 +331,10 @@ impl SegmentHistogramCollector {
|
|||||||
.expect("unexpected fast field cardinatility");
|
.expect("unexpected fast field cardinatility");
|
||||||
let mut iter = doc.chunks_exact(4);
|
let mut iter = doc.chunks_exact(4);
|
||||||
for docs in iter.by_ref() {
|
for docs in iter.by_ref() {
|
||||||
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0]));
|
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0] as u64));
|
||||||
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1]));
|
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1] as u64));
|
||||||
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2]));
|
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2] as u64));
|
||||||
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3]));
|
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3] as u64));
|
||||||
|
|
||||||
let bucket_pos0 = get_bucket_num(val0);
|
let bucket_pos0 = get_bucket_num(val0);
|
||||||
let bucket_pos1 = get_bucket_num(val1);
|
let bucket_pos1 = get_bucket_num(val1);
|
||||||
@@ -371,7 +371,7 @@ impl SegmentHistogramCollector {
|
|||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
for &doc in iter.remainder() {
|
for &doc in iter.remainder() {
|
||||||
let val = f64_from_fastfield_u64(accessor.get_val(doc), &self.field_type);
|
let val = f64_from_fastfield_u64(accessor.get_val(doc as u64), &self.field_type);
|
||||||
if !bounds.contains(val) {
|
if !bounds.contains(val) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -425,7 +425,7 @@ impl SegmentHistogramCollector {
|
|||||||
let bucket = &mut self.buckets[bucket_pos];
|
let bucket = &mut self.buckets[bucket_pos];
|
||||||
bucket.doc_count += 1;
|
bucket.doc_count += 1;
|
||||||
if let Some(sub_aggregation) = self.sub_aggregations.as_mut() {
|
if let Some(sub_aggregation) = self.sub_aggregations.as_mut() {
|
||||||
sub_aggregation[bucket_pos].collect(doc, bucket_with_accessor)?;
|
(&mut sub_aggregation[bucket_pos]).collect(doc, bucket_with_accessor)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -452,7 +452,7 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
|||||||
histogram_req: &HistogramAggregation,
|
histogram_req: &HistogramAggregation,
|
||||||
sub_aggregation: &AggregationsInternal,
|
sub_aggregation: &AggregationsInternal,
|
||||||
) -> crate::Result<Vec<BucketEntry>> {
|
) -> crate::Result<Vec<BucketEntry>> {
|
||||||
// Generate the full list of buckets without gaps.
|
// Generate the the full list of buckets without gaps.
|
||||||
//
|
//
|
||||||
// The bounds are the min max from the current buckets, optionally extended by
|
// The bounds are the min max from the current buckets, optionally extended by
|
||||||
// extended_bounds from the request
|
// extended_bounds from the request
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
use rustc_hash::FxHashMap;
|
use fnv::FnvHashMap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::aggregation::agg_req_with_accessor::{
|
use crate::aggregation::agg_req_with_accessor::{
|
||||||
@@ -176,7 +176,7 @@ impl SegmentRangeCollector {
|
|||||||
) -> crate::Result<IntermediateBucketResult> {
|
) -> crate::Result<IntermediateBucketResult> {
|
||||||
let field_type = self.field_type;
|
let field_type = self.field_type;
|
||||||
|
|
||||||
let buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
|
let buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
|
||||||
.buckets
|
.buckets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(move |range_bucket| {
|
.map(move |range_bucket| {
|
||||||
@@ -263,10 +263,10 @@ impl SegmentRangeCollector {
|
|||||||
.as_single()
|
.as_single()
|
||||||
.expect("unexpected fast field cardinality");
|
.expect("unexpected fast field cardinality");
|
||||||
for docs in iter.by_ref() {
|
for docs in iter.by_ref() {
|
||||||
let val1 = accessor.get_val(docs[0]);
|
let val1 = accessor.get_val(docs[0] as u64);
|
||||||
let val2 = accessor.get_val(docs[1]);
|
let val2 = accessor.get_val(docs[1] as u64);
|
||||||
let val3 = accessor.get_val(docs[2]);
|
let val3 = accessor.get_val(docs[2] as u64);
|
||||||
let val4 = accessor.get_val(docs[3]);
|
let val4 = accessor.get_val(docs[3] as u64);
|
||||||
let bucket_pos1 = self.get_bucket_pos(val1);
|
let bucket_pos1 = self.get_bucket_pos(val1);
|
||||||
let bucket_pos2 = self.get_bucket_pos(val2);
|
let bucket_pos2 = self.get_bucket_pos(val2);
|
||||||
let bucket_pos3 = self.get_bucket_pos(val3);
|
let bucket_pos3 = self.get_bucket_pos(val3);
|
||||||
@@ -278,7 +278,7 @@ impl SegmentRangeCollector {
|
|||||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
||||||
}
|
}
|
||||||
for &doc in iter.remainder() {
|
for &doc in iter.remainder() {
|
||||||
let val = accessor.get_val(doc);
|
let val = accessor.get_val(doc as u64);
|
||||||
let bucket_pos = self.get_bucket_pos(val);
|
let bucket_pos = self.get_bucket_pos(val);
|
||||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||||
}
|
}
|
||||||
@@ -323,8 +323,8 @@ impl SegmentRangeCollector {
|
|||||||
/// Converts the user provided f64 range value to fast field value space.
|
/// Converts the user provided f64 range value to fast field value space.
|
||||||
///
|
///
|
||||||
/// Internally fast field values are always stored as u64.
|
/// Internally fast field values are always stored as u64.
|
||||||
/// If the fast field has u64 `[1, 2, 5]`, these values are stored as is in the fast field.
|
/// If the fast field has u64 [1,2,5], these values are stored as is in the fast field.
|
||||||
/// A fast field with f64 `[1.0, 2.0, 5.0]` is converted to u64 space, using a
|
/// A fast field with f64 [1.0, 2.0, 5.0] is converted to u64 space, using a
|
||||||
/// monotonic mapping function, so the order is preserved.
|
/// monotonic mapping function, so the order is preserved.
|
||||||
///
|
///
|
||||||
/// Consequently, a f64 user range 1.0..3.0 needs to be converted to fast field value space using
|
/// Consequently, a f64 user range 1.0..3.0 needs to be converted to fast field value space using
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
use fnv::FnvHashMap;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use rustc_hash::FxHashMap;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::{CustomOrder, Order, OrderTarget};
|
use super::{CustomOrder, Order, OrderTarget};
|
||||||
@@ -17,11 +17,7 @@ use crate::fastfield::MultiValuedFastFieldReader;
|
|||||||
use crate::schema::Type;
|
use crate::schema::Type;
|
||||||
use crate::{DocId, TantivyError};
|
use crate::{DocId, TantivyError};
|
||||||
|
|
||||||
/// Creates a bucket for every unique term and counts the number of occurences.
|
/// Creates a bucket for every unique term
|
||||||
/// Note that doc_count in the response buckets equals term count here.
|
|
||||||
///
|
|
||||||
/// If the text is untokenized and single value, that means one term per document and therefore it
|
|
||||||
/// is in fact doc count.
|
|
||||||
///
|
///
|
||||||
/// ### Terminology
|
/// ### Terminology
|
||||||
/// Shard parameters are supposed to be equivalent to elasticsearch shard parameter.
|
/// Shard parameters are supposed to be equivalent to elasticsearch shard parameter.
|
||||||
@@ -68,25 +64,6 @@ use crate::{DocId, TantivyError};
|
|||||||
/// }
|
/// }
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
///
|
|
||||||
/// /// # Response JSON Format
|
|
||||||
/// ```json
|
|
||||||
/// {
|
|
||||||
/// ...
|
|
||||||
/// "aggregations": {
|
|
||||||
/// "genres": {
|
|
||||||
/// "doc_count_error_upper_bound": 0,
|
|
||||||
/// "sum_other_doc_count": 0,
|
|
||||||
/// "buckets": [
|
|
||||||
/// { "key": "drumnbass", "doc_count": 6 },
|
|
||||||
/// { "key": "raggae", "doc_count": 4 },
|
|
||||||
/// { "key": "jazz", "doc_count": 2 }
|
|
||||||
/// ]
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct TermsAggregation {
|
pub struct TermsAggregation {
|
||||||
/// The field to aggregate on.
|
/// The field to aggregate on.
|
||||||
@@ -199,7 +176,7 @@ impl TermsAggregationInternal {
|
|||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
/// Container to store term_ids and their buckets.
|
/// Container to store term_ids and their buckets.
|
||||||
struct TermBuckets {
|
struct TermBuckets {
|
||||||
pub(crate) entries: FxHashMap<u32, TermBucketEntry>,
|
pub(crate) entries: FnvHashMap<u32, TermBucketEntry>,
|
||||||
blueprint: Option<SegmentAggregationResultsCollector>,
|
blueprint: Option<SegmentAggregationResultsCollector>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -397,7 +374,7 @@ impl SegmentTermCollector {
|
|||||||
.expect("internal error: inverted index not loaded for term aggregation");
|
.expect("internal error: inverted index not loaded for term aggregation");
|
||||||
let term_dict = inverted_index.terms();
|
let term_dict = inverted_index.terms();
|
||||||
|
|
||||||
let mut dict: FxHashMap<String, IntermediateTermBucketEntry> = Default::default();
|
let mut dict: FnvHashMap<String, IntermediateTermBucketEntry> = Default::default();
|
||||||
let mut buffer = vec![];
|
let mut buffer = vec![];
|
||||||
for (term_id, entry) in entries {
|
for (term_id, entry) in entries {
|
||||||
term_dict
|
term_dict
|
||||||
@@ -1129,9 +1106,9 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "terma");
|
assert_eq!(res["my_texts"]["buckets"][0]["key"], "terma");
|
||||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 4);
|
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 4);
|
||||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termc");
|
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termb");
|
||||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 0);
|
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 0);
|
||||||
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termb");
|
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termc");
|
||||||
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 0);
|
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 0);
|
||||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
|
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
|
||||||
assert_eq!(res["my_texts"]["doc_count_error_upper_bound"], 0);
|
assert_eq!(res["my_texts"]["doc_count_error_upper_bound"], 0);
|
||||||
@@ -1229,43 +1206,11 @@ mod tests {
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let res = exec_request_with_query(agg_req, &index, None);
|
let res = exec_request_with_query(agg_req, &index, None);
|
||||||
|
|
||||||
assert!(res.is_err());
|
assert!(res.is_err());
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn terms_aggregation_multi_token_per_doc() -> crate::Result<()> {
|
|
||||||
let terms = vec!["Hello Hello", "Hallo Hallo"];
|
|
||||||
|
|
||||||
let index = get_test_index_from_terms(true, &[terms])?;
|
|
||||||
|
|
||||||
let agg_req: Aggregations = vec![(
|
|
||||||
"my_texts".to_string(),
|
|
||||||
Aggregation::Bucket(BucketAggregation {
|
|
||||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
|
||||||
field: "text_id".to_string(),
|
|
||||||
min_doc_count: Some(0),
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
sub_aggregation: Default::default(),
|
|
||||||
}),
|
|
||||||
)]
|
|
||||||
.into_iter()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let res = exec_request_with_query(agg_req, &index, None).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "hello");
|
|
||||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
|
|
||||||
|
|
||||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "hallo");
|
|
||||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 2);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_json_format() -> crate::Result<()> {
|
fn test_json_format() -> crate::Result<()> {
|
||||||
let agg_req: Aggregations = vec![(
|
let agg_req: Aggregations = vec![(
|
||||||
|
|||||||
@@ -5,8 +5,8 @@
|
|||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use fnv::FnvHashMap;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use rustc_hash::FxHashMap;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::agg_req::{
|
use super::agg_req::{
|
||||||
@@ -288,7 +288,7 @@ impl IntermediateBucketResult {
|
|||||||
.keyed;
|
.keyed;
|
||||||
let buckets = if is_keyed {
|
let buckets = if is_keyed {
|
||||||
let mut bucket_map =
|
let mut bucket_map =
|
||||||
FxHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
||||||
for bucket in buckets {
|
for bucket in buckets {
|
||||||
bucket_map.insert(bucket.key.to_string(), bucket);
|
bucket_map.insert(bucket.key.to_string(), bucket);
|
||||||
}
|
}
|
||||||
@@ -308,7 +308,7 @@ impl IntermediateBucketResult {
|
|||||||
|
|
||||||
let buckets = if req.as_histogram().unwrap().keyed {
|
let buckets = if req.as_histogram().unwrap().keyed {
|
||||||
let mut bucket_map =
|
let mut bucket_map =
|
||||||
FxHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
||||||
for bucket in buckets {
|
for bucket in buckets {
|
||||||
bucket_map.insert(bucket.key.to_string(), bucket);
|
bucket_map.insert(bucket.key.to_string(), bucket);
|
||||||
}
|
}
|
||||||
@@ -396,13 +396,13 @@ impl IntermediateBucketResult {
|
|||||||
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
/// Range aggregation including error counts
|
/// Range aggregation including error counts
|
||||||
pub struct IntermediateRangeBucketResult {
|
pub struct IntermediateRangeBucketResult {
|
||||||
pub(crate) buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry>,
|
pub(crate) buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
/// Term aggregation including error counts
|
/// Term aggregation including error counts
|
||||||
pub struct IntermediateTermBucketResult {
|
pub struct IntermediateTermBucketResult {
|
||||||
pub(crate) entries: FxHashMap<String, IntermediateTermBucketEntry>,
|
pub(crate) entries: FnvHashMap<String, IntermediateTermBucketEntry>,
|
||||||
pub(crate) sum_other_doc_count: u64,
|
pub(crate) sum_other_doc_count: u64,
|
||||||
pub(crate) doc_count_error_upper_bound: u64,
|
pub(crate) doc_count_error_upper_bound: u64,
|
||||||
}
|
}
|
||||||
@@ -499,8 +499,8 @@ trait MergeFruits {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn merge_maps<V: MergeFruits + Clone>(
|
fn merge_maps<V: MergeFruits + Clone>(
|
||||||
entries_left: &mut FxHashMap<SerializedKey, V>,
|
entries_left: &mut FnvHashMap<SerializedKey, V>,
|
||||||
mut entries_right: FxHashMap<SerializedKey, V>,
|
mut entries_right: FnvHashMap<SerializedKey, V>,
|
||||||
) {
|
) {
|
||||||
for (name, entry_left) in entries_left.iter_mut() {
|
for (name, entry_left) in entries_left.iter_mut() {
|
||||||
if let Some(entry_right) = entries_right.remove(name) {
|
if let Some(entry_right) = entries_right.remove(name) {
|
||||||
@@ -626,7 +626,7 @@ mod tests {
|
|||||||
|
|
||||||
fn get_sub_test_tree(data: &[(String, u64)]) -> IntermediateAggregationResults {
|
fn get_sub_test_tree(data: &[(String, u64)]) -> IntermediateAggregationResults {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
let mut buckets = FxHashMap::default();
|
let mut buckets = FnvHashMap::default();
|
||||||
for (key, doc_count) in data {
|
for (key, doc_count) in data {
|
||||||
buckets.insert(
|
buckets.insert(
|
||||||
key.to_string(),
|
key.to_string(),
|
||||||
@@ -653,7 +653,7 @@ mod tests {
|
|||||||
data: &[(String, u64, String, u64)],
|
data: &[(String, u64, String, u64)],
|
||||||
) -> IntermediateAggregationResults {
|
) -> IntermediateAggregationResults {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
let mut buckets: FxHashMap<_, _> = Default::default();
|
let mut buckets: FnvHashMap<_, _> = Default::default();
|
||||||
for (key, doc_count, sub_aggregation_key, sub_aggregation_count) in data {
|
for (key, doc_count, sub_aggregation_key, sub_aggregation_count) in data {
|
||||||
buckets.insert(
|
buckets.insert(
|
||||||
key.to_string(),
|
key.to_string(),
|
||||||
|
|||||||
@@ -60,10 +60,10 @@ impl SegmentAverageCollector {
|
|||||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||||
let mut iter = doc.chunks_exact(4);
|
let mut iter = doc.chunks_exact(4);
|
||||||
for docs in iter.by_ref() {
|
for docs in iter.by_ref() {
|
||||||
let val1 = field.get_val(docs[0]);
|
let val1 = field.get_val(docs[0] as u64);
|
||||||
let val2 = field.get_val(docs[1]);
|
let val2 = field.get_val(docs[1] as u64);
|
||||||
let val3 = field.get_val(docs[2]);
|
let val3 = field.get_val(docs[2] as u64);
|
||||||
let val4 = field.get_val(docs[3]);
|
let val4 = field.get_val(docs[3] as u64);
|
||||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||||
@@ -74,7 +74,7 @@ impl SegmentAverageCollector {
|
|||||||
self.data.collect(val4);
|
self.data.collect(val4);
|
||||||
}
|
}
|
||||||
for &doc in iter.remainder() {
|
for &doc in iter.remainder() {
|
||||||
let val = field.get_val(doc);
|
let val = field.get_val(doc as u64);
|
||||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||||
self.data.collect(val);
|
self.data.collect(val);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -166,10 +166,10 @@ impl SegmentStatsCollector {
|
|||||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||||
let mut iter = doc.chunks_exact(4);
|
let mut iter = doc.chunks_exact(4);
|
||||||
for docs in iter.by_ref() {
|
for docs in iter.by_ref() {
|
||||||
let val1 = field.get_val(docs[0]);
|
let val1 = field.get_val(docs[0] as u64);
|
||||||
let val2 = field.get_val(docs[1]);
|
let val2 = field.get_val(docs[1] as u64);
|
||||||
let val3 = field.get_val(docs[2]);
|
let val3 = field.get_val(docs[2] as u64);
|
||||||
let val4 = field.get_val(docs[3]);
|
let val4 = field.get_val(docs[3] as u64);
|
||||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||||
@@ -180,7 +180,7 @@ impl SegmentStatsCollector {
|
|||||||
self.stats.collect(val4);
|
self.stats.collect(val4);
|
||||||
}
|
}
|
||||||
for &doc in iter.remainder() {
|
for &doc in iter.remainder() {
|
||||||
let val = field.get_val(doc);
|
let val = field.get_val(doc as u64);
|
||||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||||
self.stats.collect(val);
|
self.stats.collect(val);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,19 +10,21 @@
|
|||||||
//!
|
//!
|
||||||
//! There are two categories: [Metrics](metric) and [Buckets](bucket).
|
//! There are two categories: [Metrics](metric) and [Buckets](bucket).
|
||||||
//!
|
//!
|
||||||
//! ## Prerequisite
|
//! # Usage
|
||||||
//! Currently aggregations work only on [fast fields](`crate::fastfield`). Single value fast fields
|
//!
|
||||||
//! of type `u64`, `f64`, `i64` and fast fields on text fields.
|
|
||||||
//!
|
//!
|
||||||
//! ## Usage
|
|
||||||
//! To use aggregations, build an aggregation request by constructing
|
//! To use aggregations, build an aggregation request by constructing
|
||||||
//! [`Aggregations`](agg_req::Aggregations).
|
//! [`Aggregations`](agg_req::Aggregations).
|
||||||
//! Create an [`AggregationCollector`] from this request. `AggregationCollector` implements the
|
//! Create an [`AggregationCollector`] from this request. `AggregationCollector` implements the
|
||||||
//! [`Collector`](crate::collector::Collector) trait and can be passed as collector into
|
//! [`Collector`](crate::collector::Collector) trait and can be passed as collector into
|
||||||
//! [`Searcher::search()`](crate::Searcher::search).
|
//! [`Searcher::search()`](crate::Searcher::search).
|
||||||
//!
|
//!
|
||||||
|
//! #### Limitations
|
||||||
//!
|
//!
|
||||||
//! ## JSON Format
|
//! Currently aggregations work only on single value fast fields of type `u64`, `f64`, `i64` and
|
||||||
|
//! fast fields on text fields.
|
||||||
|
//!
|
||||||
|
//! # JSON Format
|
||||||
//! Aggregations request and result structures de/serialize into elasticsearch compatible JSON.
|
//! Aggregations request and result structures de/serialize into elasticsearch compatible JSON.
|
||||||
//!
|
//!
|
||||||
//! ```verbatim
|
//! ```verbatim
|
||||||
@@ -33,7 +35,7 @@
|
|||||||
//! let json_response_string: String = &serde_json::to_string(&agg_res)?;
|
//! let json_response_string: String = &serde_json::to_string(&agg_res)?;
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! ## Supported Aggregations
|
//! # Supported Aggregations
|
||||||
//! - [Bucket](bucket)
|
//! - [Bucket](bucket)
|
||||||
//! - [Histogram](bucket::HistogramAggregation)
|
//! - [Histogram](bucket::HistogramAggregation)
|
||||||
//! - [Range](bucket::RangeAggregation)
|
//! - [Range](bucket::RangeAggregation)
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ pub trait CustomSegmentScorer<TScore>: 'static {
|
|||||||
pub trait CustomScorer<TScore>: Sync {
|
pub trait CustomScorer<TScore>: Sync {
|
||||||
/// Type of the associated [`CustomSegmentScorer`].
|
/// Type of the associated [`CustomSegmentScorer`].
|
||||||
type Child: CustomSegmentScorer<TScore>;
|
type Child: CustomSegmentScorer<TScore>;
|
||||||
/// Builds a child scorer for a specific segment. The child scorer is associated with
|
/// Builds a child scorer for a specific segment. The child scorer is associated to
|
||||||
/// a specific segment.
|
/// a specific segment.
|
||||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child>;
|
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// {
|
/// {
|
||||||
/// let mut index_writer = index.writer(3_000_000)?;
|
/// let mut index_writer = index.writer(3_000_000)?;
|
||||||
/// // a document can be associated with any number of facets
|
/// // a document can be associated to any number of facets
|
||||||
/// index_writer.add_document(doc!(
|
/// index_writer.add_document(doc!(
|
||||||
/// title => "The Name of the Wind",
|
/// title => "The Name of the Wind",
|
||||||
/// facet => Facet::from("/lang/en"),
|
/// facet => Facet::from("/lang/en"),
|
||||||
@@ -338,7 +338,11 @@ impl SegmentCollector for FacetSegmentCollector {
|
|||||||
let mut previous_collapsed_ord: usize = usize::MAX;
|
let mut previous_collapsed_ord: usize = usize::MAX;
|
||||||
for &facet_ord in &self.facet_ords_buf {
|
for &facet_ord in &self.facet_ords_buf {
|
||||||
let collapsed_ord = self.collapse_mapping[facet_ord as usize];
|
let collapsed_ord = self.collapse_mapping[facet_ord as usize];
|
||||||
self.counts[collapsed_ord] += u64::from(collapsed_ord != previous_collapsed_ord);
|
self.counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
1
|
||||||
|
};
|
||||||
previous_collapsed_ord = collapsed_ord;
|
previous_collapsed_ord = collapsed_ord;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -177,7 +177,7 @@ where
|
|||||||
type Fruit = TSegmentCollector::Fruit;
|
type Fruit = TSegmentCollector::Fruit;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, score: Score) {
|
fn collect(&mut self, doc: u32, score: Score) {
|
||||||
let value = self.fast_field_reader.get_val(doc);
|
let value = self.fast_field_reader.get_val(doc as u64);
|
||||||
if (self.predicate)(value) {
|
if (self.predicate)(value) {
|
||||||
self.segment_collector.collect(doc, score)
|
self.segment_collector.collect(doc, score)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ impl HistogramCollector {
|
|||||||
/// The scale/range of the histogram is not dynamic. It is required to
|
/// The scale/range of the histogram is not dynamic. It is required to
|
||||||
/// define it by supplying following parameter:
|
/// define it by supplying following parameter:
|
||||||
/// - `min_value`: the minimum value that can be recorded in the histogram.
|
/// - `min_value`: the minimum value that can be recorded in the histogram.
|
||||||
/// - `bucket_width`: the length of the interval that is associated with each buckets.
|
/// - `bucket_width`: the length of the interval that is associated to each buckets.
|
||||||
/// - `num_buckets`: The overall number of buckets.
|
/// - `num_buckets`: The overall number of buckets.
|
||||||
///
|
///
|
||||||
/// Together, this parameters define a partition of `[min_value, min_value + num_buckets *
|
/// Together, this parameters define a partition of `[min_value, min_value + num_buckets *
|
||||||
@@ -94,7 +94,7 @@ impl SegmentCollector for SegmentHistogramCollector {
|
|||||||
type Fruit = Vec<u64>;
|
type Fruit = Vec<u64>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||||
let value = self.ff_reader.get_val(doc);
|
let value = self.ff_reader.get_val(doc as u64);
|
||||||
self.histogram_computer.add_value(value);
|
self.histogram_computer.add_value(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -142,7 +142,7 @@ pub trait Collector: Sync + Send {
|
|||||||
/// e.g. `usize` for the `Count` collector.
|
/// e.g. `usize` for the `Count` collector.
|
||||||
type Fruit: Fruit;
|
type Fruit: Fruit;
|
||||||
|
|
||||||
/// Type of the `SegmentCollector` associated with this collector.
|
/// Type of the `SegmentCollector` associated to this collector.
|
||||||
type Child: SegmentCollector;
|
type Child: SegmentCollector;
|
||||||
|
|
||||||
/// `set_segment` is called before beginning to enumerate
|
/// `set_segment` is called before beginning to enumerate
|
||||||
@@ -156,7 +156,7 @@ pub trait Collector: Sync + Send {
|
|||||||
/// Returns true iff the collector requires to compute scores for documents.
|
/// Returns true iff the collector requires to compute scores for documents.
|
||||||
fn requires_scoring(&self) -> bool;
|
fn requires_scoring(&self) -> bool;
|
||||||
|
|
||||||
/// Combines the fruit associated with the collection of each segments
|
/// Combines the fruit associated to the collection of each segments
|
||||||
/// into one fruit.
|
/// into one fruit.
|
||||||
fn merge_fruits(
|
fn merge_fruits(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@@ -201,7 +201,7 @@ impl SegmentCollector for FastFieldSegmentCollector {
|
|||||||
type Fruit = Vec<u64>;
|
type Fruit = Vec<u64>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||||
let val = self.reader.get_val(doc);
|
let val = self.reader.get_val(doc as u64);
|
||||||
self.vals.push(val);
|
self.vals.push(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -137,7 +137,7 @@ struct ScorerByFastFieldReader {
|
|||||||
|
|
||||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||||
fn score(&mut self, doc: DocId) -> u64 {
|
fn score(&mut self, doc: DocId) -> u64 {
|
||||||
self.ff_reader.get_val(doc)
|
self.ff_reader.get_val(doc as u64)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -458,7 +458,7 @@ impl TopDocs {
|
|||||||
///
|
///
|
||||||
/// // We can now define our actual scoring function
|
/// // We can now define our actual scoring function
|
||||||
/// move |doc: DocId, original_score: Score| {
|
/// move |doc: DocId, original_score: Score| {
|
||||||
/// let popularity: u64 = popularity_reader.get_val(doc);
|
/// let popularity: u64 = popularity_reader.get_val(doc as u64);
|
||||||
/// // Well.. For the sake of the example we use a simple logarithm
|
/// // Well.. For the sake of the example we use a simple logarithm
|
||||||
/// // function.
|
/// // function.
|
||||||
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
||||||
@@ -567,8 +567,8 @@ impl TopDocs {
|
|||||||
///
|
///
|
||||||
/// // We can now define our actual scoring function
|
/// // We can now define our actual scoring function
|
||||||
/// move |doc: DocId| {
|
/// move |doc: DocId| {
|
||||||
/// let popularity: u64 = popularity_reader.get_val(doc);
|
/// let popularity: u64 = popularity_reader.get_val(doc as u64);
|
||||||
/// let boosted: u64 = boosted_reader.get_val(doc);
|
/// let boosted: u64 = boosted_reader.get_val(doc as u64);
|
||||||
/// // Score do not have to be `f64` in tantivy.
|
/// // Score do not have to be `f64` in tantivy.
|
||||||
/// // Here we return a couple to get lexicographical order
|
/// // Here we return a couple to get lexicographical order
|
||||||
/// // for free.
|
/// // for free.
|
||||||
@@ -693,7 +693,7 @@ impl Collector for TopDocs {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Segment Collector associated with `TopDocs`.
|
/// Segment Collector associated to `TopDocs`.
|
||||||
pub struct TopScoreSegmentCollector(TopSegmentCollector<Score>);
|
pub struct TopScoreSegmentCollector(TopSegmentCollector<Score>);
|
||||||
|
|
||||||
impl SegmentCollector for TopScoreSegmentCollector {
|
impl SegmentCollector for TopScoreSegmentCollector {
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ pub trait ScoreTweaker<TScore>: Sync {
|
|||||||
/// Type of the associated [`ScoreSegmentTweaker`].
|
/// Type of the associated [`ScoreSegmentTweaker`].
|
||||||
type Child: ScoreSegmentTweaker<TScore>;
|
type Child: ScoreSegmentTweaker<TScore>;
|
||||||
|
|
||||||
/// Builds a child tweaker for a specific segment. The child scorer is associated with
|
/// Builds a child tweaker for a specific segment. The child scorer is associated to
|
||||||
/// a specific segment.
|
/// a specific segment.
|
||||||
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
|
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ use crate::error::{DataCorruption, TantivyError};
|
|||||||
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
|
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
|
||||||
use crate::indexer::segment_updater::save_metas;
|
use crate::indexer::segment_updater::save_metas;
|
||||||
use crate::reader::{IndexReader, IndexReaderBuilder};
|
use crate::reader::{IndexReader, IndexReaderBuilder};
|
||||||
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
use crate::schema::{Field, FieldType, Schema};
|
||||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
|
|
||||||
@@ -152,7 +152,9 @@ impl IndexBuilder {
|
|||||||
/// This should only be used for unit tests.
|
/// This should only be used for unit tests.
|
||||||
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
|
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
|
||||||
let ram_directory = RamDirectory::create();
|
let ram_directory = RamDirectory::create();
|
||||||
self.create(ram_directory)
|
Ok(self
|
||||||
|
.create(ram_directory)
|
||||||
|
.expect("Creating a RamDirectory should never fail"))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index in a given filepath.
|
/// Creates a new index in a given filepath.
|
||||||
@@ -226,44 +228,10 @@ impl IndexBuilder {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate(&self) -> crate::Result<()> {
|
|
||||||
if let Some(schema) = self.schema.as_ref() {
|
|
||||||
if let Some(sort_by_field) = self.index_settings.sort_by_field.as_ref() {
|
|
||||||
let schema_field = schema.get_field(&sort_by_field.field).ok_or_else(|| {
|
|
||||||
TantivyError::InvalidArgument(format!(
|
|
||||||
"Field to sort index {} not found in schema",
|
|
||||||
sort_by_field.field
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
let entry = schema.get_field_entry(schema_field);
|
|
||||||
if !entry.is_fast() {
|
|
||||||
return Err(TantivyError::InvalidArgument(format!(
|
|
||||||
"Field {} is no fast field. Field needs to be a single value fast field \
|
|
||||||
to be used to sort an index",
|
|
||||||
sort_by_field.field
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
if entry.field_type().fastfield_cardinality() != Some(Cardinality::SingleValue) {
|
|
||||||
return Err(TantivyError::InvalidArgument(format!(
|
|
||||||
"Only single value fast field Cardinality supported for sorting index {}",
|
|
||||||
sort_by_field.field
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(TantivyError::InvalidArgument(
|
|
||||||
"no schema passed".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a new index given an implementation of the trait `Directory`.
|
/// Creates a new index given an implementation of the trait `Directory`.
|
||||||
///
|
///
|
||||||
/// If a directory previously existed, it will be erased.
|
/// If a directory previously existed, it will be erased.
|
||||||
fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
|
fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
|
||||||
self.validate()?;
|
|
||||||
let dir = dir.into();
|
let dir = dir.into();
|
||||||
let directory = ManagedDirectory::wrap(dir)?;
|
let directory = ManagedDirectory::wrap(dir)?;
|
||||||
save_new_metas(
|
save_new_metas(
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ impl SegmentMeta {
|
|||||||
/// Returns the relative path of a component of our segment.
|
/// Returns the relative path of a component of our segment.
|
||||||
///
|
///
|
||||||
/// It just joins the segment id with the extension
|
/// It just joins the segment id with the extension
|
||||||
/// associated with a segment component.
|
/// associated to a segment component.
|
||||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||||
let mut path = self.id().uuid_string();
|
let mut path = self.id().uuid_string();
|
||||||
path.push_str(&*match component {
|
path.push_str(&*match component {
|
||||||
@@ -326,13 +326,13 @@ pub struct IndexMeta {
|
|||||||
/// `IndexSettings` to configure index options.
|
/// `IndexSettings` to configure index options.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub index_settings: IndexSettings,
|
pub index_settings: IndexSettings,
|
||||||
/// List of `SegmentMeta` information associated with each finalized segment of the index.
|
/// List of `SegmentMeta` information associated to each finalized segment of the index.
|
||||||
pub segments: Vec<SegmentMeta>,
|
pub segments: Vec<SegmentMeta>,
|
||||||
/// Index `Schema`
|
/// Index `Schema`
|
||||||
pub schema: Schema,
|
pub schema: Schema,
|
||||||
/// Opstamp associated with the last `commit` operation.
|
/// Opstamp associated to the last `commit` operation.
|
||||||
pub opstamp: Opstamp,
|
pub opstamp: Opstamp,
|
||||||
/// Payload associated with the last commit.
|
/// Payload associated to the last commit.
|
||||||
///
|
///
|
||||||
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
||||||
/// to help identify this commit.
|
/// to help identify this commit.
|
||||||
|
|||||||
@@ -9,17 +9,18 @@ use crate::schema::{IndexRecordOption, Term};
|
|||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
|
|
||||||
/// The inverted index reader is in charge of accessing
|
/// The inverted index reader is in charge of accessing
|
||||||
/// the inverted index associated with a specific field.
|
/// the inverted index associated to a specific field.
|
||||||
///
|
///
|
||||||
/// # Note
|
/// # Note
|
||||||
///
|
///
|
||||||
/// It is safe to delete the segment associated with
|
/// It is safe to delete the segment associated to
|
||||||
/// an `InvertedIndexReader`. As long as it is open,
|
/// an `InvertedIndexReader`. As long as it is open,
|
||||||
/// the [`FileSlice`] it is relying on should
|
/// the `FileSlice` it is relying on should
|
||||||
/// stay available.
|
/// stay available.
|
||||||
///
|
///
|
||||||
|
///
|
||||||
/// `InvertedIndexReader` are created by calling
|
/// `InvertedIndexReader` are created by calling
|
||||||
/// [`SegmentReader::inverted_index()`](crate::SegmentReader::inverted_index).
|
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
||||||
pub struct InvertedIndexReader {
|
pub struct InvertedIndexReader {
|
||||||
termdict: TermDictionary,
|
termdict: TermDictionary,
|
||||||
postings_file_slice: FileSlice,
|
postings_file_slice: FileSlice,
|
||||||
@@ -29,7 +30,7 @@ pub struct InvertedIndexReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl InvertedIndexReader {
|
impl InvertedIndexReader {
|
||||||
#[allow(clippy::needless_pass_by_value)] // for symmetry
|
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
termdict: TermDictionary,
|
termdict: TermDictionary,
|
||||||
postings_file_slice: FileSlice,
|
postings_file_slice: FileSlice,
|
||||||
@@ -74,7 +75,7 @@ impl InvertedIndexReader {
|
|||||||
///
|
///
|
||||||
/// This is useful for enumerating through a list of terms,
|
/// This is useful for enumerating through a list of terms,
|
||||||
/// and consuming the associated posting lists while avoiding
|
/// and consuming the associated posting lists while avoiding
|
||||||
/// reallocating a [`BlockSegmentPostings`].
|
/// reallocating a `BlockSegmentPostings`.
|
||||||
///
|
///
|
||||||
/// # Warning
|
/// # Warning
|
||||||
///
|
///
|
||||||
@@ -95,7 +96,7 @@ impl InvertedIndexReader {
|
|||||||
/// Returns a block postings given a `Term`.
|
/// Returns a block postings given a `Term`.
|
||||||
/// This method is for an advanced usage only.
|
/// This method is for an advanced usage only.
|
||||||
///
|
///
|
||||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
/// Most user should prefer using `read_postings` instead.
|
||||||
pub fn read_block_postings(
|
pub fn read_block_postings(
|
||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
@@ -109,7 +110,7 @@ impl InvertedIndexReader {
|
|||||||
/// Returns a block postings given a `term_info`.
|
/// Returns a block postings given a `term_info`.
|
||||||
/// This method is for an advanced usage only.
|
/// This method is for an advanced usage only.
|
||||||
///
|
///
|
||||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
/// Most user should prefer using `read_postings` instead.
|
||||||
pub fn read_block_postings_from_terminfo(
|
pub fn read_block_postings_from_terminfo(
|
||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
@@ -129,7 +130,7 @@ impl InvertedIndexReader {
|
|||||||
/// Returns a posting object given a `term_info`.
|
/// Returns a posting object given a `term_info`.
|
||||||
/// This method is for an advanced usage only.
|
/// This method is for an advanced usage only.
|
||||||
///
|
///
|
||||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
/// Most user should prefer using `read_postings` instead.
|
||||||
pub fn read_postings_from_terminfo(
|
pub fn read_postings_from_terminfo(
|
||||||
&self,
|
&self,
|
||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
@@ -163,12 +164,12 @@ impl InvertedIndexReader {
|
|||||||
/// or `None` if the term has never been encountered and indexed.
|
/// or `None` if the term has never been encountered and indexed.
|
||||||
///
|
///
|
||||||
/// If the field was not indexed with the indexing options that cover
|
/// If the field was not indexed with the indexing options that cover
|
||||||
/// the requested options, the returned [`SegmentPostings`] the method does not fail
|
/// the requested options, the returned `SegmentPostings` the method does not fail
|
||||||
/// and returns a `SegmentPostings` with as much information as possible.
|
/// and returns a `SegmentPostings` with as much information as possible.
|
||||||
///
|
///
|
||||||
/// For instance, requesting [`IndexRecordOption::WithFreqs`] for a
|
/// For instance, requesting `IndexRecordOption::Freq` for a
|
||||||
/// [`TextOptions`](crate::schema::TextOptions) that does not index position
|
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
||||||
/// will return a [`SegmentPostings`] with `DocId`s and frequencies.
|
/// with `DocId`s and frequencies.
|
||||||
pub fn read_postings(
|
pub fn read_postings(
|
||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
@@ -210,7 +211,7 @@ impl InvertedIndexReader {
|
|||||||
/// Returns a block postings given a `Term`.
|
/// Returns a block postings given a `Term`.
|
||||||
/// This method is for an advanced usage only.
|
/// This method is for an advanced usage only.
|
||||||
///
|
///
|
||||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
/// Most user should prefer using `read_postings` instead.
|
||||||
pub async fn warm_postings(
|
pub async fn warm_postings(
|
||||||
&self,
|
&self,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ pub struct Searcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Searcher {
|
impl Searcher {
|
||||||
/// Returns the `Index` associated with the `Searcher`
|
/// Returns the `Index` associated to the `Searcher`
|
||||||
pub fn index(&self) -> &Index {
|
pub fn index(&self) -> &Index {
|
||||||
&self.inner.index
|
&self.inner.index
|
||||||
}
|
}
|
||||||
@@ -108,7 +108,7 @@ impl Searcher {
|
|||||||
store_reader.get_async(doc_address.doc_id).await
|
store_reader.get_async(doc_address.doc_id).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Access the schema associated with the index of this searcher.
|
/// Access the schema associated to the index of this searcher.
|
||||||
pub fn schema(&self) -> &Schema {
|
pub fn schema(&self) -> &Schema {
|
||||||
&self.inner.schema
|
&self.inner.schema
|
||||||
}
|
}
|
||||||
@@ -161,11 +161,11 @@ impl Searcher {
|
|||||||
///
|
///
|
||||||
/// Search works as follows :
|
/// Search works as follows :
|
||||||
///
|
///
|
||||||
/// First the weight object associated with the query is created.
|
/// First the weight object associated to the query is created.
|
||||||
///
|
///
|
||||||
/// Then, the query loops over the segments and for each segment :
|
/// Then, the query loops over the segments and for each segment :
|
||||||
/// - setup the collector and informs it that the segment being processed has changed.
|
/// - setup the collector and informs it that the segment being processed has changed.
|
||||||
/// - creates a SegmentCollector for collecting documents associated with the segment
|
/// - creates a SegmentCollector for collecting documents associated to the segment
|
||||||
/// - creates a `Scorer` object associated for this segment
|
/// - creates a `Scorer` object associated for this segment
|
||||||
/// - iterate through the matched documents and push them to the segment collector.
|
/// - iterate through the matched documents and push them to the segment collector.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ impl Segment {
|
|||||||
/// Returns the relative path of a component of our segment.
|
/// Returns the relative path of a component of our segment.
|
||||||
///
|
///
|
||||||
/// It just joins the segment id with the extension
|
/// It just joins the segment id with the extension
|
||||||
/// associated with a segment component.
|
/// associated to a segment component.
|
||||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||||
self.meta.relative_path(component)
|
self.meta.relative_path(component)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use std::slice;
|
|||||||
/// except the delete component that takes an `segment_uuid`.`delete_opstamp`.`component_extension`
|
/// except the delete component that takes an `segment_uuid`.`delete_opstamp`.`component_extension`
|
||||||
#[derive(Copy, Clone, Eq, PartialEq)]
|
#[derive(Copy, Clone, Eq, PartialEq)]
|
||||||
pub enum SegmentComponent {
|
pub enum SegmentComponent {
|
||||||
/// Postings (or inverted list). Sorted lists of document ids, associated with terms
|
/// Postings (or inverted list). Sorted lists of document ids, associated to terms
|
||||||
Postings,
|
Postings,
|
||||||
/// Positions of terms in each document.
|
/// Positions of terms in each document.
|
||||||
Positions,
|
Positions,
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ impl SegmentId {
|
|||||||
/// Picking the first 8 chars is ok to identify
|
/// Picking the first 8 chars is ok to identify
|
||||||
/// segments in a display message (e.g. a5c4dfcb).
|
/// segments in a display message (e.g. a5c4dfcb).
|
||||||
pub fn short_uuid_string(&self) -> String {
|
pub fn short_uuid_string(&self) -> String {
|
||||||
self.0.as_simple().to_string()[..8].to_string()
|
(&self.0.as_simple().to_string()[..8]).to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a segment uuid string.
|
/// Returns a segment uuid string.
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ impl SegmentReader {
|
|||||||
&self.fast_fields_readers
|
&self.fast_fields_readers
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the `FacetReader` associated with a given `Field`.
|
/// Accessor to the `FacetReader` associated to a given `Field`.
|
||||||
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
|
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
|
|
||||||
@@ -208,18 +208,18 @@ impl SegmentReader {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a field reader associated with the field given in argument.
|
/// Returns a field reader associated to the field given in argument.
|
||||||
/// If the field was not present in the index during indexing time,
|
/// If the field was not present in the index during indexing time,
|
||||||
/// the InvertedIndexReader is empty.
|
/// the InvertedIndexReader is empty.
|
||||||
///
|
///
|
||||||
/// The field reader is in charge of iterating through the
|
/// The field reader is in charge of iterating through the
|
||||||
/// term dictionary associated with a specific field,
|
/// term dictionary associated to a specific field,
|
||||||
/// and opening the posting list associated with any term.
|
/// and opening the posting list associated to any term.
|
||||||
///
|
///
|
||||||
/// If the field is not marked as index, a warning is logged and an empty `InvertedIndexReader`
|
/// If the field is not marked as index, a warn is logged and an empty `InvertedIndexReader`
|
||||||
/// is returned.
|
/// is returned.
|
||||||
/// Similarly, if the field is marked as indexed but no term has been indexed for the given
|
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
||||||
/// index, an empty `InvertedIndexReader` is returned (but no warning is logged).
|
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||||
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
||||||
if let Some(inv_idx_reader) = self
|
if let Some(inv_idx_reader) = self
|
||||||
.inv_idx_reader_cache
|
.inv_idx_reader_cache
|
||||||
@@ -241,7 +241,7 @@ impl SegmentReader {
|
|||||||
|
|
||||||
if postings_file_opt.is_none() || record_option_opt.is_none() {
|
if postings_file_opt.is_none() || record_option_opt.is_none() {
|
||||||
// no documents in the segment contained this field.
|
// no documents in the segment contained this field.
|
||||||
// As a result, no data is associated with the inverted index.
|
// As a result, no data is associated to the inverted index.
|
||||||
//
|
//
|
||||||
// Returns an empty inverted index.
|
// Returns an empty inverted index.
|
||||||
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
||||||
|
|||||||
@@ -154,14 +154,14 @@ impl CompositeFile {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `FileSlice` associated with
|
/// Returns the `FileSlice` associated
|
||||||
/// a given `Field` and stored in a `CompositeFile`.
|
/// to a given `Field` and stored in a `CompositeFile`.
|
||||||
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
|
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
|
||||||
self.open_read_with_idx(field, 0)
|
self.open_read_with_idx(field, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `FileSlice` associated with
|
/// Returns the `FileSlice` associated
|
||||||
/// a given `Field` and stored in a `CompositeFile`.
|
/// to a given `Field` and stored in a `CompositeFile`.
|
||||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
|
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
|
||||||
self.offsets_index
|
self.offsets_index
|
||||||
.get(&FileAddr { field, idx })
|
.get(&FileAddr { field, idx })
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ impl RetryPolicy {
|
|||||||
|
|
||||||
/// The `DirectoryLock` is an object that represents a file lock.
|
/// The `DirectoryLock` is an object that represents a file lock.
|
||||||
///
|
///
|
||||||
/// It is associated with a lock file, that gets deleted on `Drop.`
|
/// It is associated to a lock file, that gets deleted on `Drop.`
|
||||||
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
|
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
|
||||||
|
|
||||||
struct DirectoryLockGuard {
|
struct DirectoryLockGuard {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use std::ops::{Deref, Range};
|
use std::ops::{Deref, Range};
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, Weak};
|
||||||
use std::{fmt, io};
|
use std::{fmt, io};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
@@ -8,13 +8,16 @@ use stable_deref_trait::StableDeref;
|
|||||||
|
|
||||||
use crate::directory::OwnedBytes;
|
use crate::directory::OwnedBytes;
|
||||||
|
|
||||||
|
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||||
|
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||||
|
|
||||||
/// Objects that represents files sections in tantivy.
|
/// Objects that represents files sections in tantivy.
|
||||||
///
|
///
|
||||||
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
||||||
/// is alive, the data associated with it cannot be altered or destroyed.
|
/// is alive, the data associated with it cannot be altered or destroyed.
|
||||||
///
|
///
|
||||||
/// The underlying behavior is therefore specific to the [`Directory`](crate::Directory) that
|
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
||||||
/// created it. Despite its name, a [`FileSlice`] may or may not directly map to an actual file
|
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
||||||
/// on the filesystem.
|
/// on the filesystem.
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use crc32fast::Hasher;
|
|||||||
|
|
||||||
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
|
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||||
|
|
||||||
const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
|
pub const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
|
||||||
|
|
||||||
// Watches a file and executes registered callbacks when the file is modified.
|
// Watches a file and executes registered callbacks when the file is modified.
|
||||||
pub struct FileWatcher {
|
pub struct FileWatcher {
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::fs::{self, File, OpenOptions};
|
|||||||
use std::io::{self, BufWriter, Read, Seek, Write};
|
use std::io::{self, BufWriter, Read, Seek, Write};
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::{Arc, RwLock, Weak};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::{fmt, result};
|
use std::{fmt, result};
|
||||||
|
|
||||||
use fs2::FileExt;
|
use fs2::FileExt;
|
||||||
@@ -18,13 +18,10 @@ use crate::directory::error::{
|
|||||||
};
|
};
|
||||||
use crate::directory::file_watcher::FileWatcher;
|
use crate::directory::file_watcher::FileWatcher;
|
||||||
use crate::directory::{
|
use crate::directory::{
|
||||||
AntiCallToken, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes, TerminatingWrite,
|
AntiCallToken, ArcBytes, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes,
|
||||||
WatchCallback, WatchHandle, WritePtr,
|
TerminatingWrite, WatchCallback, WatchHandle, WeakArcBytes, WritePtr,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
|
||||||
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
|
||||||
|
|
||||||
/// Create a default io error given a string.
|
/// Create a default io error given a string.
|
||||||
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||||
io::Error::new(io::ErrorKind::Other, msg)
|
io::Error::new(io::ErrorKind::Other, msg)
|
||||||
@@ -304,7 +301,7 @@ pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
|
|||||||
"Path {:?} does not have parent directory.",
|
"Path {:?} does not have parent directory.",
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
let mut tempfile = tempfile::Builder::new().tempfile_in(parent_path)?;
|
let mut tempfile = tempfile::Builder::new().tempfile_in(&parent_path)?;
|
||||||
tempfile.write_all(content)?;
|
tempfile.write_all(content)?;
|
||||||
tempfile.flush()?;
|
tempfile.flush()?;
|
||||||
tempfile.as_file_mut().sync_data()?;
|
tempfile.as_file_mut().sync_data()?;
|
||||||
@@ -337,7 +334,7 @@ impl Directory for MmapDirectory {
|
|||||||
Ok(Arc::new(owned_bytes))
|
Ok(Arc::new(owned_bytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Any entry associated with the path in the mmap will be
|
/// Any entry associated to the path in the mmap will be
|
||||||
/// removed before the file is deleted.
|
/// removed before the file is deleted.
|
||||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
@@ -475,8 +472,6 @@ mod tests {
|
|||||||
// There are more tests in directory/mod.rs
|
// There are more tests in directory/mod.rs
|
||||||
// The following tests are specific to the MmapDirectory
|
// The following tests are specific to the MmapDirectory
|
||||||
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use common::HasLen;
|
use common::HasLen;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -571,21 +566,9 @@ mod tests {
|
|||||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn assert_eventually<P: Fn() -> Option<String>>(predicate: P) {
|
|
||||||
for _ in 0..30 {
|
|
||||||
if predicate().is_none() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
std::thread::sleep(Duration::from_millis(200));
|
|
||||||
}
|
|
||||||
if let Some(error_msg) = predicate() {
|
|
||||||
panic!("{}", error_msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_mmap_released() {
|
fn test_mmap_released() -> crate::Result<()> {
|
||||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mmap_directory = MmapDirectory::create_from_tempdir()?;
|
||||||
let mut schema_builder: SchemaBuilder = Schema::builder();
|
let mut schema_builder: SchemaBuilder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -594,56 +577,40 @@ mod tests {
|
|||||||
let index =
|
let index =
|
||||||
Index::create(mmap_directory.clone(), schema, IndexSettings::default()).unwrap();
|
Index::create(mmap_directory.clone(), schema, IndexSettings::default()).unwrap();
|
||||||
|
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let mut log_merge_policy = LogMergePolicy::default();
|
let mut log_merge_policy = LogMergePolicy::default();
|
||||||
log_merge_policy.set_min_num_segments(3);
|
log_merge_policy.set_min_num_segments(3);
|
||||||
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
||||||
for _num_commits in 0..10 {
|
for _num_commits in 0..10 {
|
||||||
for _ in 0..10 {
|
for _ in 0..10 {
|
||||||
index_writer.add_document(doc!(text_field=>"abc")).unwrap();
|
index_writer.add_document(doc!(text_field=>"abc"))?;
|
||||||
}
|
}
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()?;
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
for _ in 0..4 {
|
for _ in 0..4 {
|
||||||
index_writer.add_document(doc!(text_field=>"abc")).unwrap();
|
index_writer.add_document(doc!(text_field=>"abc"))?;
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
}
|
}
|
||||||
index_writer.wait_merging_threads().unwrap();
|
index_writer.wait_merging_threads()?;
|
||||||
|
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
let num_segments = reader.searcher().segment_readers().len();
|
let num_segments = reader.searcher().segment_readers().len();
|
||||||
assert!(num_segments <= 4);
|
assert!(num_segments <= 4);
|
||||||
let num_components_except_deletes_and_tempstore =
|
let num_components_except_deletes_and_tempstore =
|
||||||
crate::core::SegmentComponent::iterator().len() - 2;
|
crate::core::SegmentComponent::iterator().len() - 2;
|
||||||
let max_num_mmapped = num_components_except_deletes_and_tempstore * num_segments;
|
assert_eq!(
|
||||||
assert_eventually(|| {
|
num_segments * num_components_except_deletes_and_tempstore,
|
||||||
let num_mmapped = mmap_directory.get_cache_info().mmapped.len();
|
mmap_directory.get_cache_info().mmapped.len()
|
||||||
if num_mmapped > max_num_mmapped {
|
);
|
||||||
Some(format!(
|
|
||||||
"Expected at most {max_num_mmapped} mmapped files, got {num_mmapped}"
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
// This test failed on CI. The last Mmap is dropped from the merging thread so there might
|
assert!(mmap_directory.get_cache_info().mmapped.is_empty());
|
||||||
// be a race condition indeed.
|
Ok(())
|
||||||
assert_eventually(|| {
|
|
||||||
let num_mmapped = mmap_directory.get_cache_info().mmapped.len();
|
|
||||||
if num_mmapped > 0 {
|
|
||||||
Some(format!("Expected no mmapped files, got {num_mmapped}"))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ pub use ownedbytes::OwnedBytes;
|
|||||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||||
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
|
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
|
||||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||||
|
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
|
||||||
pub use self::file_slice::{FileHandle, FileSlice};
|
pub use self::file_slice::{FileHandle, FileSlice};
|
||||||
pub use self::ram_directory::RamDirectory;
|
pub use self::ram_directory::RamDirectory;
|
||||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||||
|
|||||||
@@ -136,20 +136,6 @@ impl RamDirectory {
|
|||||||
Self::default()
|
Self::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deep clones the directory.
|
|
||||||
///
|
|
||||||
/// Ulterior writes on one of the copy
|
|
||||||
/// will not affect the other copy.
|
|
||||||
pub fn deep_clone(&self) -> RamDirectory {
|
|
||||||
let inner_clone = InnerDirectory {
|
|
||||||
fs: self.fs.read().unwrap().fs.clone(),
|
|
||||||
watch_router: Default::default(),
|
|
||||||
};
|
|
||||||
RamDirectory {
|
|
||||||
fs: Arc::new(RwLock::new(inner_clone)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the sum of the size of the different files
|
/// Returns the sum of the size of the different files
|
||||||
/// in the [`RamDirectory`].
|
/// in the [`RamDirectory`].
|
||||||
pub fn total_mem_usage(&self) -> usize {
|
pub fn total_mem_usage(&self) -> usize {
|
||||||
@@ -270,23 +256,4 @@ mod tests {
|
|||||||
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
||||||
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_ram_directory_deep_clone() {
|
|
||||||
let dir = RamDirectory::default();
|
|
||||||
let test = Path::new("test");
|
|
||||||
let test2 = Path::new("test2");
|
|
||||||
dir.atomic_write(test, b"firstwrite").unwrap();
|
|
||||||
let dir_clone = dir.deep_clone();
|
|
||||||
assert_eq!(
|
|
||||||
dir_clone.atomic_read(test).unwrap(),
|
|
||||||
dir.atomic_read(test).unwrap()
|
|
||||||
);
|
|
||||||
dir.atomic_write(test, b"original").unwrap();
|
|
||||||
dir_clone.atomic_write(test, b"clone").unwrap();
|
|
||||||
dir_clone.atomic_write(test2, b"clone2").unwrap();
|
|
||||||
assert_eq!(dir.atomic_read(test).unwrap(), b"original");
|
|
||||||
assert_eq!(&dir_clone.atomic_read(test).unwrap(), b"clone");
|
|
||||||
assert_eq!(&dir_clone.atomic_read(test2).unwrap(), b"clone2");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use std::ops::Range;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use fastfield_codecs::Column;
|
use fastfield_codecs::Column;
|
||||||
@@ -32,38 +31,36 @@ impl BytesFastFieldReader {
|
|||||||
Ok(BytesFastFieldReader { idx_reader, values })
|
Ok(BytesFastFieldReader { idx_reader, values })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn range(&self, doc: DocId) -> Range<u32> {
|
fn range(&self, doc: DocId) -> (usize, usize) {
|
||||||
let start = self.idx_reader.get_val(doc) as u32;
|
let idx = doc as u64;
|
||||||
let end = self.idx_reader.get_val(doc + 1) as u32;
|
let start = self.idx_reader.get_val(idx) as usize;
|
||||||
start..end
|
let stop = self.idx_reader.get_val(idx + 1) as usize;
|
||||||
|
(start, stop)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the bytes associated with the given `doc`
|
/// Returns the bytes associated to the given `doc`
|
||||||
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
||||||
let range = self.range(doc);
|
let (start, stop) = self.range(doc);
|
||||||
&self.values.as_slice()[range.start as usize..range.end as usize]
|
&self.values.as_slice()[start..stop]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the length of the bytes associated with the given `doc`
|
/// Returns the length of the bytes associated to the given `doc`
|
||||||
pub fn num_bytes(&self, doc: DocId) -> u64 {
|
pub fn num_bytes(&self, doc: DocId) -> usize {
|
||||||
let range = self.range(doc);
|
let (start, stop) = self.range(doc);
|
||||||
(range.end - range.start) as u64
|
stop - start
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the overall number of bytes in this bytes fast field.
|
/// Returns the overall number of bytes in this bytes fast field.
|
||||||
pub fn total_num_bytes(&self) -> u64 {
|
pub fn total_num_bytes(&self) -> usize {
|
||||||
self.values.len() as u64
|
self.values.len()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MultiValueLength for BytesFastFieldReader {
|
impl MultiValueLength for BytesFastFieldReader {
|
||||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u32> {
|
|
||||||
self.range(doc_id)
|
|
||||||
}
|
|
||||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
fn get_len(&self, doc_id: DocId) -> u64 {
|
||||||
self.num_bytes(doc_id)
|
self.num_bytes(doc_id) as u64
|
||||||
}
|
}
|
||||||
fn get_total_len(&self) -> u64 {
|
fn get_total_len(&self) -> u64 {
|
||||||
self.total_num_bytes()
|
self.total_num_bytes() as u64
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ use crate::DocId;
|
|||||||
///
|
///
|
||||||
/// Once acquired, writing is done by calling
|
/// Once acquired, writing is done by calling
|
||||||
/// [`.add_document_val(&[u8])`](BytesFastFieldWriter::add_document_val)
|
/// [`.add_document_val(&[u8])`](BytesFastFieldWriter::add_document_val)
|
||||||
/// once per document, even if there are no bytes associated with it.
|
/// once per document, even if there are no bytes associated to it.
|
||||||
pub struct BytesFastFieldWriter {
|
pub struct BytesFastFieldWriter {
|
||||||
field: Field,
|
field: Field,
|
||||||
vals: Vec<u8>,
|
vals: Vec<u8>,
|
||||||
@@ -45,7 +45,7 @@ impl BytesFastFieldWriter {
|
|||||||
pub fn mem_usage(&self) -> usize {
|
pub fn mem_usage(&self) -> usize {
|
||||||
self.vals.capacity() + self.doc_index.capacity() * std::mem::size_of::<u64>()
|
self.vals.capacity() + self.doc_index.capacity() * std::mem::size_of::<u64>()
|
||||||
}
|
}
|
||||||
/// Access the field associated with the `BytesFastFieldWriter`
|
/// Access the field associated to the `BytesFastFieldWriter`
|
||||||
pub fn field(&self) -> Field {
|
pub fn field(&self) -> Field {
|
||||||
self.field
|
self.field
|
||||||
}
|
}
|
||||||
@@ -57,18 +57,17 @@ impl BytesFastFieldWriter {
|
|||||||
|
|
||||||
/// Shift to the next document and add all of the
|
/// Shift to the next document and add all of the
|
||||||
/// matching field values present in the document.
|
/// matching field values present in the document.
|
||||||
pub fn add_document(&mut self, doc: &Document) -> crate::Result<()> {
|
pub fn add_document(&mut self, doc: &Document) {
|
||||||
self.next_doc();
|
self.next_doc();
|
||||||
for field_value in doc.get_all(self.field) {
|
for field_value in doc.get_all(self.field) {
|
||||||
if let Value::Bytes(ref bytes) = field_value {
|
if let Value::Bytes(ref bytes) = field_value {
|
||||||
self.vals.extend_from_slice(bytes);
|
self.vals.extend_from_slice(bytes);
|
||||||
return Ok(());
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Register the bytes associated with a document.
|
/// Register the bytes associated to a document.
|
||||||
///
|
///
|
||||||
/// The method returns the `DocId` of the document that was
|
/// The method returns the `DocId` of the document that was
|
||||||
/// just written.
|
/// just written.
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use crate::termdict::{TermDictionary, TermOrdinal};
|
|||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
/// The facet reader makes it possible to access the list of
|
/// The facet reader makes it possible to access the list of
|
||||||
/// facets associated with a given document in a specific
|
/// facets associated to a given document in a specific
|
||||||
/// segment.
|
/// segment.
|
||||||
///
|
///
|
||||||
/// Rather than manipulating `Facet` object directly, the API
|
/// Rather than manipulating `Facet` object directly, the API
|
||||||
@@ -58,7 +58,7 @@ impl FacetReader {
|
|||||||
&self.term_dict
|
&self.term_dict
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Given a term ordinal returns the term associated with it.
|
/// Given a term ordinal returns the term associated to it.
|
||||||
pub fn facet_from_ord(
|
pub fn facet_from_ord(
|
||||||
&mut self,
|
&mut self,
|
||||||
facet_ord: TermOrdinal,
|
facet_ord: TermOrdinal,
|
||||||
@@ -74,7 +74,7 @@ impl FacetReader {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the list of facet ordinals associated with a document.
|
/// Return the list of facet ordinals associated to a document.
|
||||||
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) {
|
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) {
|
||||||
self.term_ords.get_vals(doc, output);
|
self.term_ords.get_vals(doc, output);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,15 +7,16 @@
|
|||||||
//! It is designed for the fast random access of some document
|
//! It is designed for the fast random access of some document
|
||||||
//! fields given a document id.
|
//! fields given a document id.
|
||||||
//!
|
//!
|
||||||
//! Fast fields are useful when a field is required for all or most of
|
//! `FastField` are useful when a field is required for all or most of
|
||||||
//! the `DocSet`: for instance for scoring, grouping, aggregation, filtering, or faceting.
|
//! the `DocSet` : for instance for scoring, grouping, filtering, or faceting.
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
//! Fields have to be declared as `FAST` in the schema.
|
//! Fields have to be declared as `FAST` in the schema.
|
||||||
//! Currently supported fields are: u64, i64, f64, bytes and text.
|
//! Currently supported fields are: u64, i64, f64 and bytes.
|
||||||
//!
|
//!
|
||||||
//! Fast fields are stored in with [different codecs](fastfield_codecs). The best codec is detected
|
//! u64, i64 and f64 fields are stored in a bit-packed fashion so that
|
||||||
//! automatically, when serializing.
|
//! their memory usage is directly linear with the amplitude of the
|
||||||
|
//! values stored.
|
||||||
//!
|
//!
|
||||||
//! Read access performance is comparable to that of an array lookup.
|
//! Read access performance is comparable to that of an array lookup.
|
||||||
|
|
||||||
@@ -25,15 +26,11 @@ pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveB
|
|||||||
pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter};
|
pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter};
|
||||||
pub use self::error::{FastFieldNotAvailableError, Result};
|
pub use self::error::{FastFieldNotAvailableError, Result};
|
||||||
pub use self::facet_reader::FacetReader;
|
pub use self::facet_reader::FacetReader;
|
||||||
pub(crate) use self::multivalued::{get_fastfield_codecs_for_multivalue, MultivalueStartIndex};
|
pub(crate) use self::multivalued::MultivalueStartIndex;
|
||||||
pub use self::multivalued::{
|
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
|
||||||
MultiValueU128FastFieldWriter, MultiValuedFastFieldReader, MultiValuedFastFieldWriter,
|
|
||||||
MultiValuedU128FastFieldReader,
|
|
||||||
};
|
|
||||||
pub use self::readers::FastFieldReaders;
|
pub use self::readers::FastFieldReaders;
|
||||||
pub(crate) use self::readers::{type_and_cardinality, FastType};
|
pub(crate) use self::readers::{type_and_cardinality, FastType};
|
||||||
pub use self::serializer::{Column, CompositeFastFieldSerializer};
|
pub use self::serializer::{Column, CompositeFastFieldSerializer};
|
||||||
use self::writer::unexpected_value;
|
|
||||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||||
use crate::schema::{Type, Value};
|
use crate::schema::{Type, Value};
|
||||||
use crate::{DateTime, DocId};
|
use crate::{DateTime, DocId};
|
||||||
@@ -50,9 +47,7 @@ mod writer;
|
|||||||
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
|
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
|
||||||
/// for a doc_id
|
/// for a doc_id
|
||||||
pub trait MultiValueLength {
|
pub trait MultiValueLength {
|
||||||
/// returns the positions for a docid
|
/// returns the num of values associated to a doc_id
|
||||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u32>;
|
|
||||||
/// returns the num of values associated with a doc_id
|
|
||||||
fn get_len(&self, doc_id: DocId) -> u64;
|
fn get_len(&self, doc_id: DocId) -> u64;
|
||||||
/// returns the sum of num values for all doc_ids
|
/// returns the sum of num values for all doc_ids
|
||||||
fn get_total_len(&self) -> u64;
|
fn get_total_len(&self) -> u64;
|
||||||
@@ -120,16 +115,15 @@ impl FastValue for DateTime {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn value_to_u64(value: &Value) -> crate::Result<u64> {
|
fn value_to_u64(value: &Value) -> u64 {
|
||||||
let value = match value {
|
match value {
|
||||||
Value::U64(val) => val.to_u64(),
|
Value::U64(val) => val.to_u64(),
|
||||||
Value::I64(val) => val.to_u64(),
|
Value::I64(val) => val.to_u64(),
|
||||||
Value::F64(val) => val.to_u64(),
|
Value::F64(val) => val.to_u64(),
|
||||||
Value::Bool(val) => val.to_u64(),
|
Value::Bool(val) => val.to_u64(),
|
||||||
Value::Date(val) => val.to_u64(),
|
Value::Date(val) => val.to_u64(),
|
||||||
_ => return Err(unexpected_value("u64/i64/f64/bool/date", value)),
|
_ => panic!("Expected a u64/i64/f64/bool/date field, got {:?} ", value),
|
||||||
};
|
}
|
||||||
Ok(value)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The fast field type
|
/// The fast field type
|
||||||
@@ -184,9 +178,9 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
pub fn test_fastfield() {
|
pub fn test_fastfield() {
|
||||||
let test_fastfield = fastfield_codecs::serialize_and_load(&[100u64, 200u64, 300u64][..]);
|
let test_fastfield = fastfield_codecs::serialize_and_load(&[100u64, 200u64, 300u64][..]);
|
||||||
assert_eq!(test_fastfield.get_val(0), 100);
|
assert_eq!(test_fastfield.get_val(0u64), 100);
|
||||||
assert_eq!(test_fastfield.get_val(1), 200);
|
assert_eq!(test_fastfield.get_val(1u64), 200);
|
||||||
assert_eq!(test_fastfield.get_val(2), 300);
|
assert_eq!(test_fastfield.get_val(2u64), 300);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -203,15 +197,9 @@ mod tests {
|
|||||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
fast_field_writers
|
fast_field_writers.add_document(&doc!(*FIELD=>13u64));
|
||||||
.add_document(&doc!(*FIELD=>13u64))
|
fast_field_writers.add_document(&doc!(*FIELD=>14u64));
|
||||||
.unwrap();
|
fast_field_writers.add_document(&doc!(*FIELD=>2u64));
|
||||||
fast_field_writers
|
|
||||||
.add_document(&doc!(*FIELD=>14u64))
|
|
||||||
.unwrap();
|
|
||||||
fast_field_writers
|
|
||||||
.add_document(&doc!(*FIELD=>2u64))
|
|
||||||
.unwrap();
|
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -236,33 +224,15 @@ mod tests {
|
|||||||
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
let write: WritePtr = directory.open_write(Path::new("test"))?;
|
||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write)?;
|
let mut serializer = CompositeFastFieldSerializer::from_write(write)?;
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
fast_field_writers
|
fast_field_writers.add_document(&doc!(*FIELD=>4u64));
|
||||||
.add_document(&doc!(*FIELD=>4u64))
|
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
|
||||||
.unwrap();
|
fast_field_writers.add_document(&doc!(*FIELD=>3_052u64));
|
||||||
fast_field_writers
|
fast_field_writers.add_document(&doc!(*FIELD=>9_002u64));
|
||||||
.add_document(&doc!(*FIELD=>14_082_001u64))
|
fast_field_writers.add_document(&doc!(*FIELD=>15_001u64));
|
||||||
.unwrap();
|
fast_field_writers.add_document(&doc!(*FIELD=>777u64));
|
||||||
fast_field_writers
|
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
|
||||||
.add_document(&doc!(*FIELD=>3_052u64))
|
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
|
||||||
.unwrap();
|
fast_field_writers.add_document(&doc!(*FIELD=>215u64));
|
||||||
fast_field_writers
|
|
||||||
.add_document(&doc!(*FIELD=>9_002u64))
|
|
||||||
.unwrap();
|
|
||||||
fast_field_writers
|
|
||||||
.add_document(&doc!(*FIELD=>15_001u64))
|
|
||||||
.unwrap();
|
|
||||||
fast_field_writers
|
|
||||||
.add_document(&doc!(*FIELD=>777u64))
|
|
||||||
.unwrap();
|
|
||||||
fast_field_writers
|
|
||||||
.add_document(&doc!(*FIELD=>1_002u64))
|
|
||||||
.unwrap();
|
|
||||||
fast_field_writers
|
|
||||||
.add_document(&doc!(*FIELD=>1_501u64))
|
|
||||||
.unwrap();
|
|
||||||
fast_field_writers
|
|
||||||
.add_document(&doc!(*FIELD=>215u64))
|
|
||||||
.unwrap();
|
|
||||||
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
|
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
}
|
}
|
||||||
@@ -298,9 +268,7 @@ mod tests {
|
|||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
for _ in 0..10_000 {
|
for _ in 0..10_000 {
|
||||||
fast_field_writers
|
fast_field_writers.add_document(&doc!(*FIELD=>100_000u64));
|
||||||
.add_document(&doc!(*FIELD=>100_000u64))
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
@@ -333,13 +301,9 @@ mod tests {
|
|||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
// forcing the amplitude to be high
|
// forcing the amplitude to be high
|
||||||
fast_field_writers
|
fast_field_writers.add_document(&doc!(*FIELD=>0u64));
|
||||||
.add_document(&doc!(*FIELD=>0u64))
|
|
||||||
.unwrap();
|
|
||||||
for i in 0u64..10_000u64 {
|
for i in 0u64..10_000u64 {
|
||||||
fast_field_writers
|
fast_field_writers.add_document(&doc!(*FIELD=>5_000_000_000_000_000_000u64 + i));
|
||||||
.add_document(&doc!(*FIELD=>5_000_000_000_000_000_000u64 + i))
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
@@ -381,7 +345,7 @@ mod tests {
|
|||||||
for i in -100i64..10_000i64 {
|
for i in -100i64..10_000i64 {
|
||||||
let mut doc = Document::default();
|
let mut doc = Document::default();
|
||||||
doc.add_i64(i64_field, i);
|
doc.add_i64(i64_field, i);
|
||||||
fast_field_writers.add_document(&doc).unwrap();
|
fast_field_writers.add_document(&doc);
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
@@ -402,7 +366,7 @@ mod tests {
|
|||||||
assert_eq!(fast_field_reader.min_value(), -100i64);
|
assert_eq!(fast_field_reader.min_value(), -100i64);
|
||||||
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
||||||
for (doc, i) in (-100i64..10_000i64).enumerate() {
|
for (doc, i) in (-100i64..10_000i64).enumerate() {
|
||||||
assert_eq!(fast_field_reader.get_val(doc as u32), i);
|
assert_eq!(fast_field_reader.get_val(doc as u64), i);
|
||||||
}
|
}
|
||||||
let mut buffer = vec![0i64; 100];
|
let mut buffer = vec![0i64; 100];
|
||||||
fast_field_reader.get_range(53, &mut buffer[..]);
|
fast_field_reader.get_range(53, &mut buffer[..]);
|
||||||
@@ -426,7 +390,7 @@ mod tests {
|
|||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||||
let doc = Document::default();
|
let doc = Document::default();
|
||||||
fast_field_writers.add_document(&doc).unwrap();
|
fast_field_writers.add_document(&doc);
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -469,7 +433,7 @@ mod tests {
|
|||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write)?;
|
let mut serializer = CompositeFastFieldSerializer::from_write(write)?;
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||||
for &x in &permutation {
|
for &x in &permutation {
|
||||||
fast_field_writers.add_document(&doc!(*FIELD=>x)).unwrap();
|
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
||||||
}
|
}
|
||||||
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
|
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
@@ -484,7 +448,7 @@ mod tests {
|
|||||||
let fast_field_reader = open::<u64>(data)?;
|
let fast_field_reader = open::<u64>(data)?;
|
||||||
|
|
||||||
for a in 0..n {
|
for a in 0..n {
|
||||||
assert_eq!(fast_field_reader.get_val(a as u32), permutation[a as usize]);
|
assert_eq!(fast_field_reader.get_val(a as u64), permutation[a as usize]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -819,14 +783,10 @@ mod tests {
|
|||||||
let write: WritePtr = directory.open_write(path).unwrap();
|
let write: WritePtr = directory.open_write(path).unwrap();
|
||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||||
fast_field_writers.add_document(&doc!(field=>true)).unwrap();
|
fast_field_writers.add_document(&doc!(field=>true));
|
||||||
fast_field_writers
|
fast_field_writers.add_document(&doc!(field=>false));
|
||||||
.add_document(&doc!(field=>false))
|
fast_field_writers.add_document(&doc!(field=>true));
|
||||||
.unwrap();
|
fast_field_writers.add_document(&doc!(field=>false));
|
||||||
fast_field_writers.add_document(&doc!(field=>true)).unwrap();
|
|
||||||
fast_field_writers
|
|
||||||
.add_document(&doc!(field=>false))
|
|
||||||
.unwrap();
|
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -860,10 +820,8 @@ mod tests {
|
|||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||||
for _ in 0..50 {
|
for _ in 0..50 {
|
||||||
fast_field_writers.add_document(&doc!(field=>true)).unwrap();
|
fast_field_writers.add_document(&doc!(field=>true));
|
||||||
fast_field_writers
|
fast_field_writers.add_document(&doc!(field=>false));
|
||||||
.add_document(&doc!(field=>false))
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
@@ -897,7 +855,7 @@ mod tests {
|
|||||||
let mut serializer = CompositeFastFieldSerializer::from_write(write)?;
|
let mut serializer = CompositeFastFieldSerializer::from_write(write)?;
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||||
let doc = Document::default();
|
let doc = Document::default();
|
||||||
fast_field_writers.add_document(&doc).unwrap();
|
fast_field_writers.add_document(&doc);
|
||||||
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
|
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
}
|
}
|
||||||
@@ -923,7 +881,7 @@ mod tests {
|
|||||||
CompositeFastFieldSerializer::from_write_with_codec(write, codec_types).unwrap();
|
CompositeFastFieldSerializer::from_write_with_codec(write, codec_types).unwrap();
|
||||||
let mut fast_field_writers = FastFieldsWriter::from_schema(schema);
|
let mut fast_field_writers = FastFieldsWriter::from_schema(schema);
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
fast_field_writers.add_document(doc).unwrap();
|
fast_field_writers.add_document(doc);
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
@@ -976,7 +934,7 @@ mod tests {
|
|||||||
let test_fastfield = open::<DateTime>(file.read_bytes()?)?;
|
let test_fastfield = open::<DateTime>(file.read_bytes()?)?;
|
||||||
|
|
||||||
for (i, time) in times.iter().enumerate() {
|
for (i, time) in times.iter().enumerate() {
|
||||||
assert_eq!(test_fastfield.get_val(i as u32), time.truncate(precision));
|
assert_eq!(test_fastfield.get_val(i as u64), time.truncate(precision));
|
||||||
}
|
}
|
||||||
Ok(len)
|
Ok(len)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,10 @@
|
|||||||
|
mod multivalue_start_index;
|
||||||
mod reader;
|
mod reader;
|
||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
use fastfield_codecs::FastFieldCodecType;
|
pub(crate) use self::multivalue_start_index::MultivalueStartIndex;
|
||||||
|
pub use self::reader::MultiValuedFastFieldReader;
|
||||||
pub use self::reader::{MultiValuedFastFieldReader, MultiValuedU128FastFieldReader};
|
pub use self::writer::MultiValuedFastFieldWriter;
|
||||||
pub(crate) use self::writer::MultivalueStartIndex;
|
|
||||||
pub use self::writer::{MultiValueU128FastFieldWriter, MultiValuedFastFieldWriter};
|
|
||||||
|
|
||||||
/// The valid codecs for multivalue values excludes the linear interpolation codec.
|
|
||||||
///
|
|
||||||
/// This limitation is only valid for the values, not the offset index of the multivalue index.
|
|
||||||
pub(crate) fn get_fastfield_codecs_for_multivalue() -> [FastFieldCodecType; 2] {
|
|
||||||
[
|
|
||||||
FastFieldCodecType::Bitpacked,
|
|
||||||
FastFieldCodecType::BlockwiseLinear,
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
@@ -414,74 +403,6 @@ mod bench {
|
|||||||
use crate::schema::{Cardinality, NumericOptions, Schema};
|
use crate::schema::{Cardinality, NumericOptions, Schema};
|
||||||
use crate::Document;
|
use crate::Document;
|
||||||
|
|
||||||
fn bench_multi_value_ff_merge_opt(
|
|
||||||
num_docs: usize,
|
|
||||||
segments_every_n_docs: usize,
|
|
||||||
merge_policy: impl crate::indexer::MergePolicy + 'static,
|
|
||||||
) {
|
|
||||||
let mut builder = crate::schema::SchemaBuilder::new();
|
|
||||||
|
|
||||||
let fast_multi =
|
|
||||||
crate::schema::NumericOptions::default().set_fast(Cardinality::MultiValues);
|
|
||||||
let multi_field = builder.add_f64_field("f64s", fast_multi);
|
|
||||||
|
|
||||||
let index = crate::Index::create_in_ram(builder.build());
|
|
||||||
|
|
||||||
let mut writer = index.writer_for_tests().unwrap();
|
|
||||||
writer.set_merge_policy(Box::new(merge_policy));
|
|
||||||
|
|
||||||
for i in 0..num_docs {
|
|
||||||
let mut doc = crate::Document::new();
|
|
||||||
doc.add_f64(multi_field, 0.24);
|
|
||||||
doc.add_f64(multi_field, 0.27);
|
|
||||||
doc.add_f64(multi_field, 0.37);
|
|
||||||
if i % 3 == 0 {
|
|
||||||
doc.add_f64(multi_field, 0.44);
|
|
||||||
}
|
|
||||||
|
|
||||||
writer.add_document(doc).unwrap();
|
|
||||||
if i % segments_every_n_docs == 0 {
|
|
||||||
writer.commit().unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
writer.wait_merging_threads().unwrap();
|
|
||||||
let mut writer = index.writer_for_tests().unwrap();
|
|
||||||
let segment_ids = index.searchable_segment_ids().unwrap();
|
|
||||||
writer.merge(&segment_ids).wait().unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
// If a merging thread fails, we should end up with more
|
|
||||||
// than one segment here
|
|
||||||
assert_eq!(1, index.searchable_segments().unwrap().len());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn bench_multi_value_ff_merge_many_segments(b: &mut Bencher) {
|
|
||||||
let num_docs = 100_000;
|
|
||||||
b.iter(|| {
|
|
||||||
bench_multi_value_ff_merge_opt(num_docs, 1_000, crate::indexer::NoMergePolicy);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn bench_multi_value_ff_merge_many_segments_log_merge(b: &mut Bencher) {
|
|
||||||
let num_docs = 100_000;
|
|
||||||
b.iter(|| {
|
|
||||||
let merge_policy = crate::indexer::LogMergePolicy::default();
|
|
||||||
bench_multi_value_ff_merge_opt(num_docs, 1_000, merge_policy);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn bench_multi_value_ff_merge_few_segments(b: &mut Bencher) {
|
|
||||||
let num_docs = 100_000;
|
|
||||||
b.iter(|| {
|
|
||||||
bench_multi_value_ff_merge_opt(num_docs, 33_000, crate::indexer::NoMergePolicy);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
fn multi_values(num_docs: usize, vals_per_doc: usize) -> Vec<Vec<u64>> {
|
fn multi_values(num_docs: usize, vals_per_doc: usize) -> Vec<Vec<u64>> {
|
||||||
let mut vals = vec![];
|
let mut vals = vec![];
|
||||||
for _i in 0..num_docs {
|
for _i in 0..num_docs {
|
||||||
@@ -515,7 +436,7 @@ mod bench {
|
|||||||
for val in block {
|
for val in block {
|
||||||
doc.add_u64(field, *val);
|
doc.add_u64(field, *val);
|
||||||
}
|
}
|
||||||
fast_field_writers.add_document(&doc).unwrap();
|
fast_field_writers.add_document(&doc);
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
@@ -573,7 +494,7 @@ mod bench {
|
|||||||
for val in block {
|
for val in block {
|
||||||
doc.add_u64(field, *val);
|
doc.add_u64(field, *val);
|
||||||
}
|
}
|
||||||
fast_field_writers.add_document(&doc).unwrap();
|
fast_field_writers.add_document(&doc);
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), None)
|
.serialize(&mut serializer, &HashMap::new(), None)
|
||||||
@@ -606,7 +527,7 @@ mod bench {
|
|||||||
for val in block {
|
for val in block {
|
||||||
doc.add_u64(field, *val);
|
doc.add_u64(field, *val);
|
||||||
}
|
}
|
||||||
fast_field_writers.add_document(&doc).unwrap();
|
fast_field_writers.add_document(&doc);
|
||||||
}
|
}
|
||||||
fast_field_writers
|
fast_field_writers
|
||||||
.serialize(&mut serializer, &HashMap::new(), Some(&doc_id_mapping))
|
.serialize(&mut serializer, &HashMap::new(), Some(&doc_id_mapping))
|
||||||
|
|||||||
171
src/fastfield/multivalued/multivalue_start_index.rs
Normal file
171
src/fastfield/multivalued/multivalue_start_index.rs
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
use fastfield_codecs::{Column, ColumnReader};
|
||||||
|
|
||||||
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
|
|
||||||
|
pub(crate) struct MultivalueStartIndex<'a, C: Column> {
|
||||||
|
column: &'a C,
|
||||||
|
doc_id_map: &'a DocIdMapping,
|
||||||
|
min_value: u64,
|
||||||
|
max_value: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MultivalueStartIndexReader<'a, C: Column> {
|
||||||
|
seek_head: MultivalueStartIndexIter<'a, C>,
|
||||||
|
seek_next_id: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, C: Column> MultivalueStartIndexReader<'a, C> {
|
||||||
|
fn new(column: &'a C, doc_id_map: &'a DocIdMapping) -> Self {
|
||||||
|
Self {
|
||||||
|
seek_head: MultivalueStartIndexIter {
|
||||||
|
column,
|
||||||
|
doc_id_map,
|
||||||
|
new_doc_id: 0,
|
||||||
|
offset: 0u64,
|
||||||
|
},
|
||||||
|
seek_next_id: 0u64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reset(&mut self) {
|
||||||
|
self.seek_next_id = 0;
|
||||||
|
self.seek_head.new_doc_id = 0;
|
||||||
|
self.seek_head.offset = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, C: Column> ColumnReader for MultivalueStartIndexReader<'a, C> {
|
||||||
|
fn seek(&mut self, idx: u64) -> u64 {
|
||||||
|
if self.seek_next_id > idx {
|
||||||
|
self.reset();
|
||||||
|
}
|
||||||
|
let to_skip = idx - self.seek_next_id;
|
||||||
|
self.seek_next_id = idx + 1;
|
||||||
|
self.seek_head.nth(to_skip as usize).unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, C: Column> MultivalueStartIndex<'a, C> {
|
||||||
|
pub fn new(column: &'a C, doc_id_map: &'a DocIdMapping) -> Self {
|
||||||
|
assert_eq!(column.num_vals(), doc_id_map.num_old_doc_ids() as u64 + 1);
|
||||||
|
let iter = MultivalueStartIndexIter::new(column, doc_id_map);
|
||||||
|
let (min_value, max_value) = tantivy_bitpacker::minmax(iter).unwrap_or((0, 0));
|
||||||
|
MultivalueStartIndex {
|
||||||
|
column,
|
||||||
|
doc_id_map,
|
||||||
|
min_value,
|
||||||
|
max_value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn specialized_reader(&self) -> MultivalueStartIndexReader<'a, C> {
|
||||||
|
MultivalueStartIndexReader::new(self.column, self.doc_id_map)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<'a, C: Column> Column for MultivalueStartIndex<'a, C> {
|
||||||
|
fn reader(&self) -> Box<dyn ColumnReader + '_> {
|
||||||
|
Box::new(self.specialized_reader())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_val(&self, idx: u64) -> u64 {
|
||||||
|
let mut reader = self.specialized_reader();
|
||||||
|
reader.seek(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn min_value(&self) -> u64 {
|
||||||
|
self.min_value
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_value(&self) -> u64 {
|
||||||
|
self.max_value
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_vals(&self) -> u64 {
|
||||||
|
(self.doc_id_map.num_new_doc_ids() + 1) as u64
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
|
||||||
|
Box::new(MultivalueStartIndexIter::new(self.column, self.doc_id_map))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MultivalueStartIndexIter<'a, C: Column> {
|
||||||
|
column: &'a C,
|
||||||
|
doc_id_map: &'a DocIdMapping,
|
||||||
|
new_doc_id: usize,
|
||||||
|
offset: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, C: Column> MultivalueStartIndexIter<'a, C> {
|
||||||
|
fn new(column: &'a C, doc_id_map: &'a DocIdMapping) -> Self {
|
||||||
|
Self {
|
||||||
|
column,
|
||||||
|
doc_id_map,
|
||||||
|
new_doc_id: 0,
|
||||||
|
offset: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, C: Column> Iterator for MultivalueStartIndexIter<'a, C> {
|
||||||
|
type Item = u64;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
if self.new_doc_id > self.doc_id_map.num_new_doc_ids() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let new_doc_id = self.new_doc_id;
|
||||||
|
self.new_doc_id += 1;
|
||||||
|
let start_offset = self.offset;
|
||||||
|
if new_doc_id < self.doc_id_map.num_new_doc_ids() {
|
||||||
|
let old_doc = self.doc_id_map.get_old_doc_id(new_doc_id as u32) as u64;
|
||||||
|
let num_vals_for_doc = self.column.get_val(old_doc + 1) - self.column.get_val(old_doc);
|
||||||
|
self.offset += num_vals_for_doc;
|
||||||
|
}
|
||||||
|
Some(start_offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use fastfield_codecs::VecColumn;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multivalue_start_index() {
|
||||||
|
let doc_id_mapping = DocIdMapping::from_new_id_to_old_id(vec![4, 1, 2]);
|
||||||
|
assert_eq!(doc_id_mapping.num_old_doc_ids(), 5);
|
||||||
|
let col = VecColumn::from(&[0u64, 3, 5, 10, 12, 16][..]);
|
||||||
|
let multivalue_start_index = MultivalueStartIndex::new(
|
||||||
|
&col, // 3, 2, 5, 2, 4
|
||||||
|
&doc_id_mapping,
|
||||||
|
);
|
||||||
|
assert_eq!(multivalue_start_index.num_vals(), 4);
|
||||||
|
assert_eq!(
|
||||||
|
multivalue_start_index.iter().collect::<Vec<u64>>(),
|
||||||
|
vec![0, 4, 6, 11]
|
||||||
|
); // 4, 2, 5
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multivalue_get_vals() {
|
||||||
|
let doc_id_mapping =
|
||||||
|
DocIdMapping::from_new_id_to_old_id(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
|
||||||
|
assert_eq!(doc_id_mapping.num_old_doc_ids(), 10);
|
||||||
|
let col = VecColumn::from(&[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55][..]);
|
||||||
|
let multivalue_start_index = MultivalueStartIndex::new(&col, &doc_id_mapping);
|
||||||
|
assert_eq!(
|
||||||
|
multivalue_start_index.iter().collect::<Vec<u64>>(),
|
||||||
|
vec![0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
|
||||||
|
);
|
||||||
|
assert_eq!(multivalue_start_index.num_vals(), 11);
|
||||||
|
let mut multivalue_start_index_reader = multivalue_start_index.reader();
|
||||||
|
assert_eq!(multivalue_start_index_reader.seek(3), 2);
|
||||||
|
assert_eq!(multivalue_start_index_reader.seek(5), 5);
|
||||||
|
assert_eq!(multivalue_start_index_reader.seek(8), 21);
|
||||||
|
assert_eq!(multivalue_start_index_reader.seek(4), 3);
|
||||||
|
assert_eq!(multivalue_start_index_reader.seek(0), 0);
|
||||||
|
assert_eq!(multivalue_start_index_reader.seek(10), 55);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::ops::{Range, RangeInclusive};
|
use std::ops::Range;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use fastfield_codecs::{Column, MonotonicallyMappableToU128};
|
use fastfield_codecs::Column;
|
||||||
|
|
||||||
use crate::fastfield::{FastValue, MultiValueLength};
|
use crate::fastfield::{FastValue, MultiValueLength};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
@@ -30,25 +30,25 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `[start, end)`, such that the values associated with
|
/// Returns `[start, end)`, such that the values associated
|
||||||
/// the given document are `start..end`.
|
/// to the given document are `start..end`.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn range(&self, doc: DocId) -> Range<u32> {
|
fn range(&self, doc: DocId) -> Range<u64> {
|
||||||
let start = self.idx_reader.get_val(doc) as u32;
|
let idx = doc as u64;
|
||||||
let end = self.idx_reader.get_val(doc + 1) as u32;
|
let start = self.idx_reader.get_val(idx);
|
||||||
|
let end = self.idx_reader.get_val(idx + 1);
|
||||||
start..end
|
start..end
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the array of values associated with the given `doc`.
|
/// Returns the array of values associated to the given `doc`.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_vals_for_range(&self, range: Range<u32>, vals: &mut Vec<Item>) {
|
fn get_vals_for_range(&self, range: Range<u64>, vals: &mut Vec<Item>) {
|
||||||
let len = (range.end - range.start) as usize;
|
let len = (range.end - range.start) as usize;
|
||||||
vals.resize(len, Item::make_zero());
|
vals.resize(len, Item::make_zero());
|
||||||
self.vals_reader
|
self.vals_reader.get_range(range.start, &mut vals[..]);
|
||||||
.get_range(range.start as u64, &mut vals[..]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the array of values associated with the given `doc`.
|
/// Returns the array of values associated to the given `doc`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
||||||
let range = self.range(doc);
|
let range = self.range(doc);
|
||||||
@@ -88,9 +88,6 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
|
impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
|
||||||
fn get_range(&self, doc_id: DocId) -> Range<u32> {
|
|
||||||
self.range(doc_id)
|
|
||||||
}
|
|
||||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
fn get_len(&self, doc_id: DocId) -> u64 {
|
||||||
self.num_vals(doc_id) as u64
|
self.num_vals(doc_id) as u64
|
||||||
}
|
}
|
||||||
@@ -99,183 +96,12 @@ impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
|
|||||||
self.total_num_vals() as u64
|
self.total_num_vals() as u64
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reader for a multivalued `u128` fast field.
|
|
||||||
///
|
|
||||||
/// The reader is implemented as a `u64` fast field for the index and a `u128` fast field.
|
|
||||||
///
|
|
||||||
/// The `vals_reader` will access the concatenated list of all
|
|
||||||
/// values for all reader.
|
|
||||||
/// The `idx_reader` associated, for each document, the index of its first value.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct MultiValuedU128FastFieldReader<T: MonotonicallyMappableToU128> {
|
|
||||||
idx_reader: Arc<dyn Column<u64>>,
|
|
||||||
vals_reader: Arc<dyn Column<T>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
|
|
||||||
pub(crate) fn open(
|
|
||||||
idx_reader: Arc<dyn Column<u64>>,
|
|
||||||
vals_reader: Arc<dyn Column<T>>,
|
|
||||||
) -> MultiValuedU128FastFieldReader<T> {
|
|
||||||
Self {
|
|
||||||
idx_reader,
|
|
||||||
vals_reader,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `[start, end)`, such that the values associated
|
|
||||||
/// to the given document are `start..end`.
|
|
||||||
#[inline]
|
|
||||||
fn range(&self, doc: DocId) -> Range<u32> {
|
|
||||||
let start = self.idx_reader.get_val(doc) as u32;
|
|
||||||
let end = self.idx_reader.get_val(doc + 1) as u32;
|
|
||||||
start..end
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the array of values associated to the given `doc`.
|
|
||||||
#[inline]
|
|
||||||
pub fn get_first_val(&self, doc: DocId) -> Option<T> {
|
|
||||||
let range = self.range(doc);
|
|
||||||
if range.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
Some(self.vals_reader.get_val(range.start))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the array of values associated to the given `doc`.
|
|
||||||
#[inline]
|
|
||||||
fn get_vals_for_range(&self, range: Range<u32>, vals: &mut Vec<T>) {
|
|
||||||
let len = (range.end - range.start) as usize;
|
|
||||||
vals.resize(len, T::from_u128(0));
|
|
||||||
self.vals_reader
|
|
||||||
.get_range(range.start as u64, &mut vals[..]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the array of values associated to the given `doc`.
|
|
||||||
#[inline]
|
|
||||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<T>) {
|
|
||||||
let range = self.range(doc);
|
|
||||||
self.get_vals_for_range(range, vals);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns all docids which are in the provided value range
|
|
||||||
pub fn get_positions_for_value_range(
|
|
||||||
&self,
|
|
||||||
value_range: RangeInclusive<T>,
|
|
||||||
doc_id_range: Range<u32>,
|
|
||||||
) -> Vec<DocId> {
|
|
||||||
let mut positions = Vec::new(); // TODO replace
|
|
||||||
self.vals_reader
|
|
||||||
.get_positions_for_value_range(value_range, doc_id_range, &mut positions);
|
|
||||||
|
|
||||||
positions_to_docids(&positions, self.idx_reader.as_ref())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Iterates over all elements in the fast field
|
|
||||||
pub fn iter(&self) -> impl Iterator<Item = T> + '_ {
|
|
||||||
self.vals_reader.iter()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the minimum value for this fast field.
|
|
||||||
///
|
|
||||||
/// The min value does not take in account of possible
|
|
||||||
/// deleted document, and should be considered as a lower bound
|
|
||||||
/// of the actual mimimum value.
|
|
||||||
pub fn min_value(&self) -> T {
|
|
||||||
self.vals_reader.min_value()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the maximum value for this fast field.
|
|
||||||
///
|
|
||||||
/// The max value does not take in account of possible
|
|
||||||
/// deleted document, and should be considered as an upper bound
|
|
||||||
/// of the actual maximum value.
|
|
||||||
pub fn max_value(&self) -> T {
|
|
||||||
self.vals_reader.max_value()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of values associated with the document `DocId`.
|
|
||||||
#[inline]
|
|
||||||
pub fn num_vals(&self, doc: DocId) -> usize {
|
|
||||||
let range = self.range(doc);
|
|
||||||
(range.end - range.start) as usize
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the overall number of values in this field.
|
|
||||||
#[inline]
|
|
||||||
pub fn total_num_vals(&self) -> u64 {
|
|
||||||
self.idx_reader.max_value()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: MonotonicallyMappableToU128> MultiValueLength for MultiValuedU128FastFieldReader<T> {
|
|
||||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u32> {
|
|
||||||
self.range(doc_id)
|
|
||||||
}
|
|
||||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
|
||||||
self.num_vals(doc_id) as u64
|
|
||||||
}
|
|
||||||
fn get_total_len(&self) -> u64 {
|
|
||||||
self.total_num_vals() as u64
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Converts a list of positions of values in a 1:n index to the corresponding list of DocIds.
|
|
||||||
///
|
|
||||||
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the index.
|
|
||||||
///
|
|
||||||
/// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically increasing
|
|
||||||
/// positions.
|
|
||||||
///
|
|
||||||
/// TODO: Instead of a linear scan we can employ a expotential search into binary search to match a
|
|
||||||
/// docid to its value position.
|
|
||||||
fn positions_to_docids<C: Column + ?Sized>(positions: &[u32], idx_reader: &C) -> Vec<DocId> {
|
|
||||||
let mut docs = vec![];
|
|
||||||
let mut cur_doc = 0u32;
|
|
||||||
let mut last_doc = None;
|
|
||||||
|
|
||||||
for pos in positions {
|
|
||||||
loop {
|
|
||||||
let end = idx_reader.get_val(cur_doc + 1) as u32;
|
|
||||||
if end > *pos {
|
|
||||||
// avoid duplicates
|
|
||||||
if Some(cur_doc) == last_doc {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
docs.push(cur_doc);
|
|
||||||
last_doc = Some(cur_doc);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
cur_doc += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
docs
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use fastfield_codecs::VecColumn;
|
|
||||||
|
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::fastfield::multivalued::reader::positions_to_docids;
|
|
||||||
use crate::schema::{Cardinality, Facet, FacetOptions, NumericOptions, Schema};
|
use crate::schema::{Cardinality, Facet, FacetOptions, NumericOptions, Schema};
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_positions_to_docid() {
|
|
||||||
let positions = vec![10u32, 11, 15, 20, 21, 22];
|
|
||||||
|
|
||||||
let offsets = vec![0, 10, 12, 15, 22, 23];
|
|
||||||
{
|
|
||||||
let column = VecColumn::from(&offsets);
|
|
||||||
|
|
||||||
let docids = positions_to_docids(&positions, &column);
|
|
||||||
assert_eq!(docids, vec![1, 3, 4]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_multifastfield_reader() -> crate::Result<()> {
|
fn test_multifastfield_reader() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|||||||
@@ -1,13 +1,11 @@
|
|||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use fastfield_codecs::{
|
use fastfield_codecs::{MonotonicallyMappableToU64, VecColumn};
|
||||||
Column, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn,
|
use fnv::FnvHashMap;
|
||||||
};
|
|
||||||
use rustc_hash::FxHashMap;
|
|
||||||
|
|
||||||
use super::get_fastfield_codecs_for_multivalue;
|
use crate::fastfield::{
|
||||||
use crate::fastfield::writer::unexpected_value;
|
value_to_u64, CompositeFastFieldSerializer, FastFieldType, MultivalueStartIndex,
|
||||||
use crate::fastfield::{value_to_u64, CompositeFastFieldSerializer, FastFieldType};
|
};
|
||||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
use crate::postings::UnorderedTermId;
|
use crate::postings::UnorderedTermId;
|
||||||
use crate::schema::{Document, Field, Value};
|
use crate::schema::{Document, Field, Value};
|
||||||
@@ -65,7 +63,7 @@ impl MultiValuedFastFieldWriter {
|
|||||||
+ self.doc_index.capacity() * std::mem::size_of::<u64>()
|
+ self.doc_index.capacity() * std::mem::size_of::<u64>()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Access the field associated with the `MultiValuedFastFieldWriter`
|
/// Access the field associated to the `MultiValuedFastFieldWriter`
|
||||||
pub fn field(&self) -> Field {
|
pub fn field(&self) -> Field {
|
||||||
self.field
|
self.field
|
||||||
}
|
}
|
||||||
@@ -82,11 +80,11 @@ impl MultiValuedFastFieldWriter {
|
|||||||
|
|
||||||
/// Shift to the next document and adds
|
/// Shift to the next document and adds
|
||||||
/// all of the matching field values present in the document.
|
/// all of the matching field values present in the document.
|
||||||
pub fn add_document(&mut self, doc: &Document) -> crate::Result<()> {
|
pub fn add_document(&mut self, doc: &Document) {
|
||||||
self.next_doc();
|
self.next_doc();
|
||||||
// facets/texts are indexed in the `SegmentWriter` as we encode their unordered id.
|
// facets/texts are indexed in the `SegmentWriter` as we encode their unordered id.
|
||||||
if self.fast_field_type.is_storing_term_ids() {
|
if self.fast_field_type.is_storing_term_ids() {
|
||||||
return Ok(());
|
return;
|
||||||
}
|
}
|
||||||
for field_value in doc.field_values() {
|
for field_value in doc.field_values() {
|
||||||
if field_value.field == self.field {
|
if field_value.field == self.field {
|
||||||
@@ -95,12 +93,11 @@ impl MultiValuedFastFieldWriter {
|
|||||||
(Some(precision), Value::Date(date_val)) => {
|
(Some(precision), Value::Date(date_val)) => {
|
||||||
date_val.truncate(precision).to_u64()
|
date_val.truncate(precision).to_u64()
|
||||||
}
|
}
|
||||||
_ => value_to_u64(value)?,
|
_ => value_to_u64(value),
|
||||||
};
|
};
|
||||||
self.add_val(value_u64);
|
self.add_val(value_u64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over values per doc_id in ascending doc_id order.
|
/// Returns an iterator over values per doc_id in ascending doc_id order.
|
||||||
@@ -144,7 +141,7 @@ impl MultiValuedFastFieldWriter {
|
|||||||
pub fn serialize(
|
pub fn serialize(
|
||||||
mut self,
|
mut self,
|
||||||
serializer: &mut CompositeFastFieldSerializer,
|
serializer: &mut CompositeFastFieldSerializer,
|
||||||
term_mapping_opt: Option<&FxHashMap<UnorderedTermId, TermOrdinal>>,
|
term_mapping_opt: Option<&FnvHashMap<UnorderedTermId, TermOrdinal>>,
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
doc_id_map: Option<&DocIdMapping>,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
{
|
{
|
||||||
@@ -199,244 +196,8 @@ impl MultiValuedFastFieldWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
let col = VecColumn::from(&values[..]);
|
let col = VecColumn::from(&values[..]);
|
||||||
serializer.create_auto_detect_u64_fast_field_with_idx_and_codecs(
|
serializer.create_auto_detect_u64_fast_field_with_idx(self.field, col, 1)?;
|
||||||
self.field,
|
|
||||||
col,
|
|
||||||
1,
|
|
||||||
&get_fastfield_codecs_for_multivalue(),
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct MultivalueStartIndex<'a, C: Column> {
|
|
||||||
column: &'a C,
|
|
||||||
doc_id_map: &'a DocIdMapping,
|
|
||||||
min: u64,
|
|
||||||
max: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, C: Column> MultivalueStartIndex<'a, C> {
|
|
||||||
pub fn new(column: &'a C, doc_id_map: &'a DocIdMapping) -> Self {
|
|
||||||
assert_eq!(column.num_vals(), doc_id_map.num_old_doc_ids() as u32 + 1);
|
|
||||||
let (min, max) =
|
|
||||||
tantivy_bitpacker::minmax(iter_remapped_multivalue_index(doc_id_map, column))
|
|
||||||
.unwrap_or((0u64, 0u64));
|
|
||||||
MultivalueStartIndex {
|
|
||||||
column,
|
|
||||||
doc_id_map,
|
|
||||||
min,
|
|
||||||
max,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a, C: Column> Column for MultivalueStartIndex<'a, C> {
|
|
||||||
fn get_val(&self, _idx: u32) -> u64 {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn min_value(&self) -> u64 {
|
|
||||||
self.min
|
|
||||||
}
|
|
||||||
|
|
||||||
fn max_value(&self) -> u64 {
|
|
||||||
self.max
|
|
||||||
}
|
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
|
||||||
(self.doc_id_map.num_new_doc_ids() + 1) as u32
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
|
||||||
Box::new(iter_remapped_multivalue_index(
|
|
||||||
self.doc_id_map,
|
|
||||||
&self.column,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_remapped_multivalue_index<'a, C: Column>(
|
|
||||||
doc_id_map: &'a DocIdMapping,
|
|
||||||
column: &'a C,
|
|
||||||
) -> impl Iterator<Item = u64> + 'a {
|
|
||||||
let mut offset = 0;
|
|
||||||
std::iter::once(0).chain(doc_id_map.iter_old_doc_ids().map(move |old_doc| {
|
|
||||||
let num_vals_for_doc = column.get_val(old_doc + 1) - column.get_val(old_doc);
|
|
||||||
offset += num_vals_for_doc;
|
|
||||||
offset as u64
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Writer for multi-valued (as in, more than one value per document)
|
|
||||||
/// int fast field.
|
|
||||||
///
|
|
||||||
/// This `Writer` is only useful for advanced users.
|
|
||||||
/// The normal way to get your multivalued int in your index
|
|
||||||
/// is to
|
|
||||||
/// - declare your field with fast set to `Cardinality::MultiValues`
|
|
||||||
/// in your schema
|
|
||||||
/// - add your document simply by calling `.add_document(...)`.
|
|
||||||
///
|
|
||||||
/// The `MultiValuedFastFieldWriter` can be acquired from the
|
|
||||||
|
|
||||||
pub struct MultiValueU128FastFieldWriter {
|
|
||||||
field: Field,
|
|
||||||
vals: Vec<u128>,
|
|
||||||
doc_index: Vec<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MultiValueU128FastFieldWriter {
|
|
||||||
/// Creates a new `U128MultiValueFastFieldWriter`
|
|
||||||
pub(crate) fn new(field: Field) -> Self {
|
|
||||||
MultiValueU128FastFieldWriter {
|
|
||||||
field,
|
|
||||||
vals: Vec::new(),
|
|
||||||
doc_index: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The memory used (inclusive childs)
|
|
||||||
pub fn mem_usage(&self) -> usize {
|
|
||||||
self.vals.capacity() * std::mem::size_of::<UnorderedTermId>()
|
|
||||||
+ self.doc_index.capacity() * std::mem::size_of::<u64>()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Finalize the current document.
|
|
||||||
pub(crate) fn next_doc(&mut self) {
|
|
||||||
self.doc_index.push(self.vals.len() as u64);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Pushes a new value to the current document.
|
|
||||||
pub(crate) fn add_val(&mut self, val: u128) {
|
|
||||||
self.vals.push(val);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Shift to the next document and adds
|
|
||||||
/// all of the matching field values present in the document.
|
|
||||||
pub fn add_document(&mut self, doc: &Document) -> crate::Result<()> {
|
|
||||||
self.next_doc();
|
|
||||||
for field_value in doc.field_values() {
|
|
||||||
if field_value.field == self.field {
|
|
||||||
let value = field_value.value();
|
|
||||||
let ip_addr = value
|
|
||||||
.as_ip_addr()
|
|
||||||
.ok_or_else(|| unexpected_value("ip", value))?;
|
|
||||||
let ip_addr_u128 = ip_addr.to_u128();
|
|
||||||
self.add_val(ip_addr_u128);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an iterator over values per doc_id in ascending doc_id order.
|
|
||||||
///
|
|
||||||
/// Normally the order is simply iterating self.doc_id_index.
|
|
||||||
/// With doc_id_map it accounts for the new mapping, returning values in the order of the
|
|
||||||
/// new doc_ids.
|
|
||||||
fn get_ordered_values<'a: 'b, 'b>(
|
|
||||||
&'a self,
|
|
||||||
doc_id_map: Option<&'b DocIdMapping>,
|
|
||||||
) -> impl Iterator<Item = &'b [u128]> {
|
|
||||||
get_ordered_values(&self.vals, &self.doc_index, doc_id_map)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serializes fast field values.
|
|
||||||
pub fn serialize(
|
|
||||||
mut self,
|
|
||||||
serializer: &mut CompositeFastFieldSerializer,
|
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
{
|
|
||||||
// writing the offset index
|
|
||||||
//
|
|
||||||
self.doc_index.push(self.vals.len() as u64);
|
|
||||||
let col = VecColumn::from(&self.doc_index[..]);
|
|
||||||
if let Some(doc_id_map) = doc_id_map {
|
|
||||||
let multi_value_start_index = MultivalueStartIndex::new(&col, doc_id_map);
|
|
||||||
serializer.create_auto_detect_u64_fast_field_with_idx(
|
|
||||||
self.field,
|
|
||||||
multi_value_start_index,
|
|
||||||
0,
|
|
||||||
)?;
|
|
||||||
} else {
|
|
||||||
serializer.create_auto_detect_u64_fast_field_with_idx(self.field, col, 0)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let iter_gen = || self.get_ordered_values(doc_id_map).flatten().cloned();
|
|
||||||
|
|
||||||
serializer.create_u128_fast_field_with_idx(
|
|
||||||
self.field,
|
|
||||||
iter_gen,
|
|
||||||
self.vals.len() as u32,
|
|
||||||
1,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an iterator over values per doc_id in ascending doc_id order.
|
|
||||||
///
|
|
||||||
/// Normally the order is simply iterating self.doc_id_index.
|
|
||||||
/// With doc_id_map it accounts for the new mapping, returning values in the order of the
|
|
||||||
/// new doc_ids.
|
|
||||||
fn get_ordered_values<'a: 'b, 'b, T>(
|
|
||||||
vals: &'a [T],
|
|
||||||
doc_index: &'a [u64],
|
|
||||||
doc_id_map: Option<&'b DocIdMapping>,
|
|
||||||
) -> impl Iterator<Item = &'b [T]> {
|
|
||||||
let doc_id_iter: Box<dyn Iterator<Item = u32>> = if let Some(doc_id_map) = doc_id_map {
|
|
||||||
Box::new(doc_id_map.iter_old_doc_ids())
|
|
||||||
} else {
|
|
||||||
let max_doc = doc_index.len() as DocId;
|
|
||||||
Box::new(0..max_doc)
|
|
||||||
};
|
|
||||||
doc_id_iter.map(move |doc_id| get_values_for_doc_id(doc_id, vals, doc_index))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// returns all values for a doc_id
|
|
||||||
fn get_values_for_doc_id<'a, T>(doc_id: u32, vals: &'a [T], doc_index: &'a [u64]) -> &'a [T] {
|
|
||||||
let start_pos = doc_index[doc_id as usize] as usize;
|
|
||||||
let end_pos = doc_index
|
|
||||||
.get(doc_id as usize + 1)
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or(vals.len() as u64) as usize; // special case, last doc_id has no offset information
|
|
||||||
&vals[start_pos..end_pos]
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_multivalue_start_index() {
|
|
||||||
let doc_id_mapping = DocIdMapping::from_new_id_to_old_id(vec![4, 1, 2]);
|
|
||||||
assert_eq!(doc_id_mapping.num_old_doc_ids(), 5);
|
|
||||||
let col = VecColumn::from(&[0u64, 3, 5, 10, 12, 16][..]);
|
|
||||||
let multivalue_start_index = MultivalueStartIndex::new(
|
|
||||||
&col, // 3, 2, 5, 2, 4
|
|
||||||
&doc_id_mapping,
|
|
||||||
);
|
|
||||||
assert_eq!(multivalue_start_index.num_vals(), 4);
|
|
||||||
assert_eq!(
|
|
||||||
multivalue_start_index.iter().collect::<Vec<u64>>(),
|
|
||||||
vec![0, 4, 6, 11]
|
|
||||||
); // 4, 2, 5
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_multivalue_get_vals() {
|
|
||||||
let doc_id_mapping =
|
|
||||||
DocIdMapping::from_new_id_to_old_id(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
|
|
||||||
assert_eq!(doc_id_mapping.num_old_doc_ids(), 10);
|
|
||||||
let col = VecColumn::from(&[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55][..]);
|
|
||||||
let multivalue_start_index = MultivalueStartIndex::new(&col, &doc_id_mapping);
|
|
||||||
assert_eq!(
|
|
||||||
multivalue_start_index.iter().collect::<Vec<u64>>(),
|
|
||||||
vec![0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
|
|
||||||
);
|
|
||||||
assert_eq!(multivalue_start_index.num_vals(), 11);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
use std::net::Ipv6Addr;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use fastfield_codecs::{open, open_u128, Column};
|
use fastfield_codecs::{open, Column};
|
||||||
|
|
||||||
use super::multivalued::MultiValuedU128FastFieldReader;
|
|
||||||
use crate::directory::{CompositeFile, FileSlice};
|
use crate::directory::{CompositeFile, FileSlice};
|
||||||
use crate::fastfield::{
|
use crate::fastfield::{
|
||||||
BytesFastFieldReader, FastFieldNotAvailableError, FastValue, MultiValuedFastFieldReader,
|
BytesFastFieldReader, FastFieldNotAvailableError, FastValue, MultiValuedFastFieldReader,
|
||||||
@@ -25,7 +23,6 @@ pub struct FastFieldReaders {
|
|||||||
pub(crate) enum FastType {
|
pub(crate) enum FastType {
|
||||||
I64,
|
I64,
|
||||||
U64,
|
U64,
|
||||||
U128,
|
|
||||||
F64,
|
F64,
|
||||||
Bool,
|
Bool,
|
||||||
Date,
|
Date,
|
||||||
@@ -52,9 +49,6 @@ pub(crate) fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType,
|
|||||||
FieldType::Str(options) if options.is_fast() => {
|
FieldType::Str(options) if options.is_fast() => {
|
||||||
Some((FastType::U64, Cardinality::MultiValues))
|
Some((FastType::U64, Cardinality::MultiValues))
|
||||||
}
|
}
|
||||||
FieldType::IpAddr(options) => options
|
|
||||||
.get_fastfield_cardinality()
|
|
||||||
.map(|cardinality| (FastType::U128, cardinality)),
|
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -141,7 +135,7 @@ impl FastFieldReaders {
|
|||||||
Ok(MultiValuedFastFieldReader::open(idx_reader, vals_reader))
|
Ok(MultiValuedFastFieldReader::open(idx_reader, vals_reader))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `u64` fast field reader reader associated with `field`.
|
/// Returns the `u64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a u64 fast field, this method returns an Error.
|
/// If `field` is not a u64 fast field, this method returns an Error.
|
||||||
pub fn u64(&self, field: Field) -> crate::Result<Arc<dyn Column<u64>>> {
|
pub fn u64(&self, field: Field) -> crate::Result<Arc<dyn Column<u64>>> {
|
||||||
@@ -149,69 +143,16 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_reader(field)
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `ip` fast field reader reader associated to `field`.
|
/// Returns the `u64` fast field reader reader associated to `field`, regardless of whether the
|
||||||
|
/// given field is effectively of type `u64` or not.
|
||||||
///
|
///
|
||||||
/// If `field` is not a u128 fast field, this method returns an Error.
|
/// If not, the fastfield reader will returns the u64-value associated to the original
|
||||||
pub fn ip_addr(&self, field: Field) -> crate::Result<Arc<dyn Column<Ipv6Addr>>> {
|
|
||||||
self.check_type(field, FastType::U128, Cardinality::SingleValue)?;
|
|
||||||
let bytes = self.fast_field_data(field, 0)?.read_bytes()?;
|
|
||||||
Ok(open_u128::<Ipv6Addr>(bytes)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the `ip` fast field reader reader associated to `field`.
|
|
||||||
///
|
|
||||||
/// If `field` is not a u128 fast field, this method returns an Error.
|
|
||||||
pub fn ip_addrs(
|
|
||||||
&self,
|
|
||||||
field: Field,
|
|
||||||
) -> crate::Result<MultiValuedU128FastFieldReader<Ipv6Addr>> {
|
|
||||||
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
|
|
||||||
let idx_reader: Arc<dyn Column<u64>> = self.typed_fast_field_reader(field)?;
|
|
||||||
|
|
||||||
let bytes = self.fast_field_data(field, 1)?.read_bytes()?;
|
|
||||||
let vals_reader = open_u128::<Ipv6Addr>(bytes)?;
|
|
||||||
|
|
||||||
Ok(MultiValuedU128FastFieldReader::open(
|
|
||||||
idx_reader,
|
|
||||||
vals_reader,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the `u128` fast field reader reader associated to `field`.
|
|
||||||
///
|
|
||||||
/// If `field` is not a u128 fast field, this method returns an Error.
|
|
||||||
pub(crate) fn u128(&self, field: Field) -> crate::Result<Arc<dyn Column<u128>>> {
|
|
||||||
self.check_type(field, FastType::U128, Cardinality::SingleValue)?;
|
|
||||||
let bytes = self.fast_field_data(field, 0)?.read_bytes()?;
|
|
||||||
Ok(open_u128::<u128>(bytes)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the `u128` multi-valued fast field reader reader associated to `field`.
|
|
||||||
///
|
|
||||||
/// If `field` is not a u128 multi-valued fast field, this method returns an Error.
|
|
||||||
pub fn u128s(&self, field: Field) -> crate::Result<MultiValuedU128FastFieldReader<u128>> {
|
|
||||||
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
|
|
||||||
let idx_reader: Arc<dyn Column<u64>> = self.typed_fast_field_reader(field)?;
|
|
||||||
|
|
||||||
let bytes = self.fast_field_data(field, 1)?.read_bytes()?;
|
|
||||||
let vals_reader = open_u128::<u128>(bytes)?;
|
|
||||||
|
|
||||||
Ok(MultiValuedU128FastFieldReader::open(
|
|
||||||
idx_reader,
|
|
||||||
vals_reader,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the `u64` fast field reader reader associated with `field`, regardless of whether
|
|
||||||
/// the given field is effectively of type `u64` or not.
|
|
||||||
///
|
|
||||||
/// If not, the fastfield reader will returns the u64-value associated with the original
|
|
||||||
/// FastValue.
|
/// FastValue.
|
||||||
pub fn u64_lenient(&self, field: Field) -> crate::Result<Arc<dyn Column<u64>>> {
|
pub fn u64_lenient(&self, field: Field) -> crate::Result<Arc<dyn Column<u64>>> {
|
||||||
self.typed_fast_field_reader(field)
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `i64` fast field reader reader associated with `field`.
|
/// Returns the `i64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a i64 fast field, this method returns an Error.
|
/// If `field` is not a i64 fast field, this method returns an Error.
|
||||||
pub fn i64(&self, field: Field) -> crate::Result<Arc<dyn Column<i64>>> {
|
pub fn i64(&self, field: Field) -> crate::Result<Arc<dyn Column<i64>>> {
|
||||||
@@ -219,7 +160,7 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_reader(field)
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `date` fast field reader reader associated with `field`.
|
/// Returns the `date` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a date fast field, this method returns an Error.
|
/// If `field` is not a date fast field, this method returns an Error.
|
||||||
pub fn date(&self, field: Field) -> crate::Result<Arc<dyn Column<DateTime>>> {
|
pub fn date(&self, field: Field) -> crate::Result<Arc<dyn Column<DateTime>>> {
|
||||||
@@ -227,7 +168,7 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_reader(field)
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `f64` fast field reader reader associated with `field`.
|
/// Returns the `f64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a f64 fast field, this method returns an Error.
|
/// If `field` is not a f64 fast field, this method returns an Error.
|
||||||
pub fn f64(&self, field: Field) -> crate::Result<Arc<dyn Column<f64>>> {
|
pub fn f64(&self, field: Field) -> crate::Result<Arc<dyn Column<f64>>> {
|
||||||
@@ -235,7 +176,7 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_reader(field)
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `bool` fast field reader reader associated with `field`.
|
/// Returns the `bool` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a bool fast field, this method returns an Error.
|
/// If `field` is not a bool fast field, this method returns an Error.
|
||||||
pub fn bool(&self, field: Field) -> crate::Result<Arc<dyn Column<bool>>> {
|
pub fn bool(&self, field: Field) -> crate::Result<Arc<dyn Column<bool>>> {
|
||||||
@@ -243,7 +184,7 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_reader(field)
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `u64s` multi-valued fast field reader reader associated with `field`.
|
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a u64 multi-valued fast field, this method returns an Error.
|
/// If `field` is not a u64 multi-valued fast field, this method returns an Error.
|
||||||
pub fn u64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> {
|
pub fn u64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> {
|
||||||
@@ -251,15 +192,15 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_multi_reader(field)
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `u64s` multi-valued fast field reader reader associated with `field`, regardless
|
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`, regardless of
|
||||||
/// of whether the given field is effectively of type `u64` or not.
|
/// whether the given field is effectively of type `u64` or not.
|
||||||
///
|
///
|
||||||
/// If `field` is not a u64 multi-valued fast field, this method returns an Error.
|
/// If `field` is not a u64 multi-valued fast field, this method returns an Error.
|
||||||
pub fn u64s_lenient(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> {
|
pub fn u64s_lenient(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> {
|
||||||
self.typed_fast_field_multi_reader(field)
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `i64s` multi-valued fast field reader reader associated with `field`.
|
/// Returns a `i64s` multi-valued fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a i64 multi-valued fast field, this method returns an Error.
|
/// If `field` is not a i64 multi-valued fast field, this method returns an Error.
|
||||||
pub fn i64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<i64>> {
|
pub fn i64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<i64>> {
|
||||||
@@ -267,7 +208,7 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_multi_reader(field)
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `f64s` multi-valued fast field reader reader associated with `field`.
|
/// Returns a `f64s` multi-valued fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a f64 multi-valued fast field, this method returns an Error.
|
/// If `field` is not a f64 multi-valued fast field, this method returns an Error.
|
||||||
pub fn f64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<f64>> {
|
pub fn f64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<f64>> {
|
||||||
@@ -275,7 +216,7 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_multi_reader(field)
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `bools` multi-valued fast field reader reader associated with `field`.
|
/// Returns a `bools` multi-valued fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a bool multi-valued fast field, this method returns an Error.
|
/// If `field` is not a bool multi-valued fast field, this method returns an Error.
|
||||||
pub fn bools(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<bool>> {
|
pub fn bools(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<bool>> {
|
||||||
@@ -283,7 +224,7 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_multi_reader(field)
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `time::OffsetDateTime` multi-valued fast field reader reader associated with
|
/// Returns a `time::OffsetDateTime` multi-valued fast field reader reader associated to
|
||||||
/// `field`.
|
/// `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a `time::OffsetDateTime` multi-valued fast field, this method returns an
|
/// If `field` is not a `time::OffsetDateTime` multi-valued fast field, this method returns an
|
||||||
@@ -293,7 +234,7 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_multi_reader(field)
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `bytes` fast field reader associated with `field`.
|
/// Returns the `bytes` fast field reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a bytes fast field, returns an Error.
|
/// If `field` is not a bytes fast field, returns an Error.
|
||||||
pub fn bytes(&self, field: Field) -> crate::Result<BytesFastFieldReader> {
|
pub fn bytes(&self, field: Field) -> crate::Result<BytesFastFieldReader> {
|
||||||
|
|||||||
@@ -70,35 +70,6 @@ impl CompositeFastFieldSerializer {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Serialize data into a new u64 fast field. The best compression codec of the the provided
|
|
||||||
/// will be chosen.
|
|
||||||
pub fn create_auto_detect_u64_fast_field_with_idx_and_codecs<T: MonotonicallyMappableToU64>(
|
|
||||||
&mut self,
|
|
||||||
field: Field,
|
|
||||||
fastfield_accessor: impl Column<T>,
|
|
||||||
idx: usize,
|
|
||||||
codec_types: &[FastFieldCodecType],
|
|
||||||
) -> io::Result<()> {
|
|
||||||
let field_write = self.composite_write.for_field_with_idx(field, idx);
|
|
||||||
fastfield_codecs::serialize(fastfield_accessor, field_write, codec_types)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serialize data into a new u128 fast field. The codec will be compact space compressor,
|
|
||||||
/// which is optimized for scanning the fast field for a given range.
|
|
||||||
pub fn create_u128_fast_field_with_idx<F: Fn() -> I, I: Iterator<Item = u128>>(
|
|
||||||
&mut self,
|
|
||||||
field: Field,
|
|
||||||
iter_gen: F,
|
|
||||||
num_vals: u32,
|
|
||||||
idx: usize,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
let field_write = self.composite_write.for_field_with_idx(field, idx);
|
|
||||||
fastfield_codecs::serialize_u128(iter_gen, num_vals, field_write)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start serializing a new [u8] fast field. Use the returned writer to write data into the
|
/// Start serializing a new [u8] fast field. Use the returned writer to write data into the
|
||||||
/// bytes field. To associate the bytes with documents a seperate index must be created on
|
/// bytes field. To associate the bytes with documents a seperate index must be created on
|
||||||
/// index 0. See bytes/writer.rs::serialize for an example.
|
/// index 0. See bytes/writer.rs::serialize for an example.
|
||||||
|
|||||||
@@ -2,11 +2,11 @@ use std::collections::HashMap;
|
|||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use common;
|
use common;
|
||||||
use fastfield_codecs::{Column, MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
use fastfield_codecs::{Column, MonotonicallyMappableToU64};
|
||||||
use rustc_hash::FxHashMap;
|
use fnv::FnvHashMap;
|
||||||
use tantivy_bitpacker::BlockedBitpacker;
|
use tantivy_bitpacker::BlockedBitpacker;
|
||||||
|
|
||||||
use super::multivalued::{MultiValueU128FastFieldWriter, MultiValuedFastFieldWriter};
|
use super::multivalued::MultiValuedFastFieldWriter;
|
||||||
use super::FastFieldType;
|
use super::FastFieldType;
|
||||||
use crate::fastfield::{BytesFastFieldWriter, CompositeFastFieldSerializer};
|
use crate::fastfield::{BytesFastFieldWriter, CompositeFastFieldSerializer};
|
||||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
@@ -19,19 +19,10 @@ use crate::DatePrecision;
|
|||||||
pub struct FastFieldsWriter {
|
pub struct FastFieldsWriter {
|
||||||
term_id_writers: Vec<MultiValuedFastFieldWriter>,
|
term_id_writers: Vec<MultiValuedFastFieldWriter>,
|
||||||
single_value_writers: Vec<IntFastFieldWriter>,
|
single_value_writers: Vec<IntFastFieldWriter>,
|
||||||
u128_value_writers: Vec<U128FastFieldWriter>,
|
|
||||||
u128_multi_value_writers: Vec<MultiValueU128FastFieldWriter>,
|
|
||||||
multi_values_writers: Vec<MultiValuedFastFieldWriter>,
|
multi_values_writers: Vec<MultiValuedFastFieldWriter>,
|
||||||
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn unexpected_value(expected: &str, actual: &Value) -> crate::TantivyError {
|
|
||||||
crate::TantivyError::SchemaError(format!(
|
|
||||||
"Expected a {:?} in fast field, but got {:?}",
|
|
||||||
expected, actual
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
|
fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
|
||||||
match *field_entry.field_type() {
|
match *field_entry.field_type() {
|
||||||
FieldType::I64(_) | FieldType::Date(_) => common::i64_to_u64(0i64),
|
FieldType::I64(_) | FieldType::Date(_) => common::i64_to_u64(0i64),
|
||||||
@@ -43,8 +34,6 @@ fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
|
|||||||
impl FastFieldsWriter {
|
impl FastFieldsWriter {
|
||||||
/// Create all `FastFieldWriter` required by the schema.
|
/// Create all `FastFieldWriter` required by the schema.
|
||||||
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
||||||
let mut u128_value_writers = Vec::new();
|
|
||||||
let mut u128_multi_value_writers = Vec::new();
|
|
||||||
let mut single_value_writers = Vec::new();
|
let mut single_value_writers = Vec::new();
|
||||||
let mut term_id_writers = Vec::new();
|
let mut term_id_writers = Vec::new();
|
||||||
let mut multi_values_writers = Vec::new();
|
let mut multi_values_writers = Vec::new();
|
||||||
@@ -108,27 +97,10 @@ impl FastFieldsWriter {
|
|||||||
bytes_value_writers.push(fast_field_writer);
|
bytes_value_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::IpAddr(opt) => {
|
|
||||||
if opt.is_fast() {
|
|
||||||
match opt.get_fastfield_cardinality() {
|
|
||||||
Some(Cardinality::SingleValue) => {
|
|
||||||
let fast_field_writer = U128FastFieldWriter::new(field);
|
|
||||||
u128_value_writers.push(fast_field_writer);
|
|
||||||
}
|
|
||||||
Some(Cardinality::MultiValues) => {
|
|
||||||
let fast_field_writer = MultiValueU128FastFieldWriter::new(field);
|
|
||||||
u128_multi_value_writers.push(fast_field_writer);
|
|
||||||
}
|
|
||||||
None => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
FieldType::Str(_) | FieldType::JsonObject(_) => {}
|
FieldType::Str(_) | FieldType::JsonObject(_) => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FastFieldsWriter {
|
FastFieldsWriter {
|
||||||
u128_value_writers,
|
|
||||||
u128_multi_value_writers,
|
|
||||||
term_id_writers,
|
term_id_writers,
|
||||||
single_value_writers,
|
single_value_writers,
|
||||||
multi_values_writers,
|
multi_values_writers,
|
||||||
@@ -157,19 +129,9 @@ impl FastFieldsWriter {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|w| w.mem_usage())
|
.map(|w| w.mem_usage())
|
||||||
.sum::<usize>()
|
.sum::<usize>()
|
||||||
+ self
|
|
||||||
.u128_value_writers
|
|
||||||
.iter()
|
|
||||||
.map(|w| w.mem_usage())
|
|
||||||
.sum::<usize>()
|
|
||||||
+ self
|
|
||||||
.u128_multi_value_writers
|
|
||||||
.iter()
|
|
||||||
.map(|w| w.mem_usage())
|
|
||||||
.sum::<usize>()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the `FastFieldWriter` associated with a field.
|
/// Get the `FastFieldWriter` associated to a field.
|
||||||
pub fn get_term_id_writer(&self, field: Field) -> Option<&MultiValuedFastFieldWriter> {
|
pub fn get_term_id_writer(&self, field: Field) -> Option<&MultiValuedFastFieldWriter> {
|
||||||
// TODO optimize
|
// TODO optimize
|
||||||
self.term_id_writers
|
self.term_id_writers
|
||||||
@@ -177,7 +139,7 @@ impl FastFieldsWriter {
|
|||||||
.find(|field_writer| field_writer.field() == field)
|
.find(|field_writer| field_writer.field() == field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the `FastFieldWriter` associated with a field.
|
/// Get the `FastFieldWriter` associated to a field.
|
||||||
pub fn get_field_writer(&self, field: Field) -> Option<&IntFastFieldWriter> {
|
pub fn get_field_writer(&self, field: Field) -> Option<&IntFastFieldWriter> {
|
||||||
// TODO optimize
|
// TODO optimize
|
||||||
self.single_value_writers
|
self.single_value_writers
|
||||||
@@ -185,7 +147,7 @@ impl FastFieldsWriter {
|
|||||||
.find(|field_writer| field_writer.field() == field)
|
.find(|field_writer| field_writer.field() == field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the `FastFieldWriter` associated with a field.
|
/// Get the `FastFieldWriter` associated to a field.
|
||||||
pub fn get_field_writer_mut(&mut self, field: Field) -> Option<&mut IntFastFieldWriter> {
|
pub fn get_field_writer_mut(&mut self, field: Field) -> Option<&mut IntFastFieldWriter> {
|
||||||
// TODO optimize
|
// TODO optimize
|
||||||
self.single_value_writers
|
self.single_value_writers
|
||||||
@@ -193,7 +155,7 @@ impl FastFieldsWriter {
|
|||||||
.find(|field_writer| field_writer.field() == field)
|
.find(|field_writer| field_writer.field() == field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the `FastFieldWriter` associated with a field.
|
/// Get the `FastFieldWriter` associated to a field.
|
||||||
pub fn get_term_id_writer_mut(
|
pub fn get_term_id_writer_mut(
|
||||||
&mut self,
|
&mut self,
|
||||||
field: Field,
|
field: Field,
|
||||||
@@ -228,27 +190,21 @@ impl FastFieldsWriter {
|
|||||||
.iter_mut()
|
.iter_mut()
|
||||||
.find(|field_writer| field_writer.field() == field)
|
.find(|field_writer| field_writer.field() == field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Indexes all of the fastfields of a new document.
|
/// Indexes all of the fastfields of a new document.
|
||||||
pub fn add_document(&mut self, doc: &Document) -> crate::Result<()> {
|
pub fn add_document(&mut self, doc: &Document) {
|
||||||
for field_writer in &mut self.term_id_writers {
|
for field_writer in &mut self.term_id_writers {
|
||||||
field_writer.add_document(doc)?;
|
field_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
for field_writer in &mut self.single_value_writers {
|
for field_writer in &mut self.single_value_writers {
|
||||||
field_writer.add_document(doc)?;
|
field_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
for field_writer in &mut self.multi_values_writers {
|
for field_writer in &mut self.multi_values_writers {
|
||||||
field_writer.add_document(doc)?;
|
field_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
for field_writer in &mut self.bytes_value_writers {
|
for field_writer in &mut self.bytes_value_writers {
|
||||||
field_writer.add_document(doc)?;
|
field_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
for field_writer in &mut self.u128_value_writers {
|
|
||||||
field_writer.add_document(doc)?;
|
|
||||||
}
|
|
||||||
for field_writer in &mut self.u128_multi_value_writers {
|
|
||||||
field_writer.add_document(doc)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Serializes all of the `FastFieldWriter`s by pushing them in
|
/// Serializes all of the `FastFieldWriter`s by pushing them in
|
||||||
@@ -256,7 +212,7 @@ impl FastFieldsWriter {
|
|||||||
pub fn serialize(
|
pub fn serialize(
|
||||||
self,
|
self,
|
||||||
serializer: &mut CompositeFastFieldSerializer,
|
serializer: &mut CompositeFastFieldSerializer,
|
||||||
mapping: &HashMap<Field, FxHashMap<UnorderedTermId, TermOrdinal>>,
|
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>,
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
doc_id_map: Option<&DocIdMapping>,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
for field_writer in self.term_id_writers {
|
for field_writer in self.term_id_writers {
|
||||||
@@ -274,108 +230,6 @@ impl FastFieldsWriter {
|
|||||||
for field_writer in self.bytes_value_writers {
|
for field_writer in self.bytes_value_writers {
|
||||||
field_writer.serialize(serializer, doc_id_map)?;
|
field_writer.serialize(serializer, doc_id_map)?;
|
||||||
}
|
}
|
||||||
for field_writer in self.u128_value_writers {
|
|
||||||
field_writer.serialize(serializer, doc_id_map)?;
|
|
||||||
}
|
|
||||||
for field_writer in self.u128_multi_value_writers {
|
|
||||||
field_writer.serialize(serializer, doc_id_map)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fast field writer for u128 values.
|
|
||||||
/// The fast field writer just keeps the values in memory.
|
|
||||||
///
|
|
||||||
/// Only when the segment writer can be closed and
|
|
||||||
/// persisted on disk, the fast field writer is
|
|
||||||
/// sent to a `FastFieldSerializer` via the `.serialize(...)`
|
|
||||||
/// method.
|
|
||||||
///
|
|
||||||
/// We cannot serialize earlier as the values are
|
|
||||||
/// compressed to a compact number space and the number of
|
|
||||||
/// bits required for bitpacking can only been known once
|
|
||||||
/// we have seen all of the values.
|
|
||||||
pub struct U128FastFieldWriter {
|
|
||||||
field: Field,
|
|
||||||
vals: Vec<u128>,
|
|
||||||
val_count: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl U128FastFieldWriter {
|
|
||||||
/// Creates a new `IntFastFieldWriter`
|
|
||||||
pub fn new(field: Field) -> Self {
|
|
||||||
Self {
|
|
||||||
field,
|
|
||||||
vals: vec![],
|
|
||||||
val_count: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The memory used (inclusive childs)
|
|
||||||
pub fn mem_usage(&self) -> usize {
|
|
||||||
self.vals.len() * 16
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Records a new value.
|
|
||||||
///
|
|
||||||
/// The n-th value being recorded is implicitely
|
|
||||||
/// associated to the document with the `DocId` n.
|
|
||||||
/// (Well, `n-1` actually because of 0-indexing)
|
|
||||||
pub fn add_val(&mut self, val: u128) {
|
|
||||||
self.vals.push(val);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract the fast field value from the document
|
|
||||||
/// (or use the default value) and records it.
|
|
||||||
///
|
|
||||||
/// Extract the value associated to the fast field for
|
|
||||||
/// this document.
|
|
||||||
pub fn add_document(&mut self, doc: &Document) -> crate::Result<()> {
|
|
||||||
match doc.get_first(self.field) {
|
|
||||||
Some(v) => {
|
|
||||||
let ip_addr = v.as_ip_addr().ok_or_else(|| unexpected_value("ip", v))?;
|
|
||||||
let value = ip_addr.to_u128();
|
|
||||||
self.add_val(value);
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
self.add_val(0); // TODO fix null handling
|
|
||||||
}
|
|
||||||
};
|
|
||||||
self.val_count += 1;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Push the fast fields value to the `FastFieldWriter`.
|
|
||||||
pub fn serialize(
|
|
||||||
&self,
|
|
||||||
serializer: &mut CompositeFastFieldSerializer,
|
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
if let Some(doc_id_map) = doc_id_map {
|
|
||||||
let iter_gen = || {
|
|
||||||
doc_id_map
|
|
||||||
.iter_old_doc_ids()
|
|
||||||
.map(|idx| self.vals[idx as usize])
|
|
||||||
};
|
|
||||||
|
|
||||||
serializer.create_u128_fast_field_with_idx(
|
|
||||||
self.field,
|
|
||||||
iter_gen,
|
|
||||||
self.val_count as u32,
|
|
||||||
0,
|
|
||||||
)?;
|
|
||||||
} else {
|
|
||||||
let iter_gen = || self.vals.iter().cloned();
|
|
||||||
serializer.create_u128_fast_field_with_idx(
|
|
||||||
self.field,
|
|
||||||
iter_gen,
|
|
||||||
self.val_count as u32,
|
|
||||||
0,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -384,7 +238,7 @@ impl U128FastFieldWriter {
|
|||||||
/// The fast field writer just keeps the values in memory.
|
/// The fast field writer just keeps the values in memory.
|
||||||
///
|
///
|
||||||
/// Only when the segment writer can be closed and
|
/// Only when the segment writer can be closed and
|
||||||
/// persisted on disk, the fast field writer is
|
/// persisted on disc, the fast field writer is
|
||||||
/// sent to a `FastFieldSerializer` via the `.serialize(...)`
|
/// sent to a `FastFieldSerializer` via the `.serialize(...)`
|
||||||
/// method.
|
/// method.
|
||||||
///
|
///
|
||||||
@@ -440,7 +294,7 @@ impl IntFastFieldWriter {
|
|||||||
/// Records a new value.
|
/// Records a new value.
|
||||||
///
|
///
|
||||||
/// The n-th value being recorded is implicitly
|
/// The n-th value being recorded is implicitly
|
||||||
/// associated with the document with the `DocId` n.
|
/// associated to the document with the `DocId` n.
|
||||||
/// (Well, `n-1` actually because of 0-indexing)
|
/// (Well, `n-1` actually because of 0-indexing)
|
||||||
pub fn add_val(&mut self, val: u64) {
|
pub fn add_val(&mut self, val: u64) {
|
||||||
self.vals.add(val);
|
self.vals.add(val);
|
||||||
@@ -459,7 +313,7 @@ impl IntFastFieldWriter {
|
|||||||
/// (or use the default value) and records it.
|
/// (or use the default value) and records it.
|
||||||
///
|
///
|
||||||
///
|
///
|
||||||
/// Extract the value associated with the fast field for
|
/// Extract the value associated to the fast field for
|
||||||
/// this document.
|
/// this document.
|
||||||
///
|
///
|
||||||
/// i64 and f64 are remapped to u64 using the logic
|
/// i64 and f64 are remapped to u64 using the logic
|
||||||
@@ -471,14 +325,14 @@ impl IntFastFieldWriter {
|
|||||||
/// only the first one is taken in account.
|
/// only the first one is taken in account.
|
||||||
///
|
///
|
||||||
/// Values on text fast fields are skipped.
|
/// Values on text fast fields are skipped.
|
||||||
pub fn add_document(&mut self, doc: &Document) -> crate::Result<()> {
|
pub fn add_document(&mut self, doc: &Document) {
|
||||||
match doc.get_first(self.field) {
|
match doc.get_first(self.field) {
|
||||||
Some(v) => {
|
Some(v) => {
|
||||||
let value = match (self.precision_opt, v) {
|
let value = match (self.precision_opt, v) {
|
||||||
(Some(precision), Value::Date(date_val)) => {
|
(Some(precision), Value::Date(date_val)) => {
|
||||||
date_val.truncate(precision).to_u64()
|
date_val.truncate(precision).to_u64()
|
||||||
}
|
}
|
||||||
_ => super::value_to_u64(v)?,
|
_ => super::value_to_u64(v),
|
||||||
};
|
};
|
||||||
self.add_val(value);
|
self.add_val(value);
|
||||||
}
|
}
|
||||||
@@ -486,7 +340,6 @@ impl IntFastFieldWriter {
|
|||||||
self.add_val(self.val_if_missing);
|
self.add_val(self.val_if_missing);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// get iterator over the data
|
/// get iterator over the data
|
||||||
@@ -511,7 +364,7 @@ impl IntFastFieldWriter {
|
|||||||
vals: &self.vals,
|
vals: &self.vals,
|
||||||
min_value: min,
|
min_value: min,
|
||||||
max_value: max,
|
max_value: max,
|
||||||
num_vals: self.val_count as u32,
|
num_vals: self.val_count as u64,
|
||||||
};
|
};
|
||||||
|
|
||||||
serializer.create_auto_detect_u64_fast_field(self.field, fastfield_accessor)?;
|
serializer.create_auto_detect_u64_fast_field(self.field, fastfield_accessor)?;
|
||||||
@@ -526,11 +379,11 @@ struct WriterFastFieldAccessProvider<'map, 'bitp> {
|
|||||||
vals: &'bitp BlockedBitpacker,
|
vals: &'bitp BlockedBitpacker,
|
||||||
min_value: u64,
|
min_value: u64,
|
||||||
max_value: u64,
|
max_value: u64,
|
||||||
num_vals: u32,
|
num_vals: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'map, 'bitp> Column for WriterFastFieldAccessProvider<'map, 'bitp> {
|
impl<'map, 'bitp> Column for WriterFastFieldAccessProvider<'map, 'bitp> {
|
||||||
/// Return the value associated with the given doc.
|
/// Return the value associated to the given doc.
|
||||||
///
|
///
|
||||||
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance
|
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance
|
||||||
/// reasons.
|
/// reasons.
|
||||||
@@ -538,8 +391,15 @@ impl<'map, 'bitp> Column for WriterFastFieldAccessProvider<'map, 'bitp> {
|
|||||||
/// # Panics
|
/// # Panics
|
||||||
///
|
///
|
||||||
/// May panic if `doc` is greater than the index.
|
/// May panic if `doc` is greater than the index.
|
||||||
fn get_val(&self, _doc: u32) -> u64 {
|
fn get_val(&self, doc: u64) -> u64 {
|
||||||
unimplemented!()
|
if let Some(doc_id_map) = self.doc_id_map {
|
||||||
|
self.vals
|
||||||
|
.get(doc_id_map.get_old_doc_id(doc as u32) as usize) // consider extra
|
||||||
|
// FastFieldReader wrapper for
|
||||||
|
// non doc_id_map
|
||||||
|
} else {
|
||||||
|
self.vals.get(doc as usize)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||||
@@ -562,7 +422,7 @@ impl<'map, 'bitp> Column for WriterFastFieldAccessProvider<'map, 'bitp> {
|
|||||||
self.max_value
|
self.max_value
|
||||||
}
|
}
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u64 {
|
||||||
self.num_vals
|
self.num_vals
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//! The fieldnorm represents the length associated with
|
//! The fieldnorm represents the length associated to
|
||||||
//! a given Field of a given document.
|
//! a given Field of a given document.
|
||||||
//!
|
//!
|
||||||
//! This metric is important to compute the score of a
|
//! This metric is important to compute the score of a
|
||||||
|
|||||||
@@ -47,9 +47,9 @@ impl FieldNormReaders {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reads the fieldnorm associated with a document.
|
/// Reads the fieldnorm associated to a document.
|
||||||
///
|
///
|
||||||
/// The [fieldnorm](FieldNormReader::fieldnorm) represents the length associated with
|
/// The [fieldnorm](FieldNormReader::fieldnorm) represents the length associated to
|
||||||
/// a given Field of a given document.
|
/// a given Field of a given document.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct FieldNormReader(ReaderImplEnum);
|
pub struct FieldNormReader(ReaderImplEnum);
|
||||||
@@ -104,7 +104,7 @@ impl FieldNormReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `fieldnorm` associated with a doc id.
|
/// Returns the `fieldnorm` associated to a doc id.
|
||||||
/// The fieldnorm is a value approximating the number
|
/// The fieldnorm is a value approximating the number
|
||||||
/// of tokens in a given field of the `doc_id`.
|
/// of tokens in a given field of the `doc_id`.
|
||||||
///
|
///
|
||||||
@@ -123,7 +123,7 @@ impl FieldNormReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `fieldnorm_id` associated with a document.
|
/// Returns the `fieldnorm_id` associated to a document.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
|
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
|
||||||
match &self.0 {
|
match &self.0 {
|
||||||
|
|||||||
@@ -188,7 +188,7 @@ impl DeleteCursor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::wrong_self_convention)]
|
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
|
||||||
fn is_behind_opstamp(&mut self, target_opstamp: Opstamp) -> bool {
|
fn is_behind_opstamp(&mut self, target_opstamp: Opstamp) -> bool {
|
||||||
self.get()
|
self.get()
|
||||||
.map(|operation| operation.opstamp < target_opstamp)
|
.map(|operation| operation.opstamp < target_opstamp)
|
||||||
@@ -246,27 +246,18 @@ impl DeleteCursor {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::{DeleteOperation, DeleteQueue};
|
use super::{DeleteOperation, DeleteQueue};
|
||||||
use crate::query::{Explanation, Scorer, Weight};
|
use crate::schema::{Field, Term};
|
||||||
use crate::{DocId, Score, SegmentReader};
|
|
||||||
|
|
||||||
struct DummyWeight;
|
|
||||||
impl Weight for DummyWeight {
|
|
||||||
fn scorer(&self, _reader: &SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
|
||||||
Err(crate::TantivyError::InternalError("dummy impl".to_owned()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn explain(&self, _reader: &SegmentReader, _doc: DocId) -> crate::Result<Explanation> {
|
|
||||||
Err(crate::TantivyError::InternalError("dummy impl".to_owned()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_deletequeue() {
|
fn test_deletequeue() {
|
||||||
let delete_queue = DeleteQueue::new();
|
let delete_queue = DeleteQueue::new();
|
||||||
|
|
||||||
let make_op = |i: usize| DeleteOperation {
|
let make_op = |i: usize| {
|
||||||
opstamp: i as u64,
|
let field = Field::from_field_id(1u32);
|
||||||
target: Box::new(DummyWeight),
|
DeleteOperation {
|
||||||
|
opstamp: i as u64,
|
||||||
|
term: Term::from_field_u64(field, i as u64),
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
delete_queue.push(make_op(1));
|
delete_queue.push(make_op(1));
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ impl SegmentDocIdMapping {
|
|||||||
|
|
||||||
/// Returns an iterator over the old document addresses, ordered by the new document ids.
|
/// Returns an iterator over the old document addresses, ordered by the new document ids.
|
||||||
///
|
///
|
||||||
/// In the returned `DocAddress`, the `segment_ord` is the ordinal of targeted segment
|
/// In the returned `DocAddress`, the `segment_ord` is the ordinal of targetted segment
|
||||||
/// in the list of merged segments.
|
/// in the list of merged segments.
|
||||||
pub(crate) fn iter_old_doc_addrs(&self) -> impl Iterator<Item = DocAddress> + '_ {
|
pub(crate) fn iter_old_doc_addrs(&self) -> impl Iterator<Item = DocAddress> + '_ {
|
||||||
self.new_doc_id_to_old_doc_addr.iter().copied()
|
self.new_doc_id_to_old_doc_addr.iter().copied()
|
||||||
@@ -34,6 +34,10 @@ impl SegmentDocIdMapping {
|
|||||||
self.new_doc_id_to_old_doc_addr.len()
|
self.new_doc_id_to_old_doc_addr.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn get_old_doc_addr(&self, new_doc_id: DocId) -> DocAddress {
|
||||||
|
self.new_doc_id_to_old_doc_addr[new_doc_id as usize]
|
||||||
|
}
|
||||||
|
|
||||||
/// This flags means the segments are simply stacked in the order of their ordinal.
|
/// This flags means the segments are simply stacked in the order of their ordinal.
|
||||||
/// e.g. [(0, 1), .. (n, 1), (0, 2)..., (m, 2)]
|
/// e.g. [(0, 1), .. (n, 1), (0, 2)..., (m, 2)]
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -1,69 +0,0 @@
|
|||||||
pub struct FlatMapWithBuffer<T, F, Iter> {
|
|
||||||
buffer: Vec<T>,
|
|
||||||
fill_buffer: F,
|
|
||||||
underlying_it: Iter,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, F, Iter, I> Iterator for FlatMapWithBuffer<T, F, Iter>
|
|
||||||
where
|
|
||||||
Iter: Iterator<Item = I>,
|
|
||||||
F: Fn(I, &mut Vec<T>),
|
|
||||||
{
|
|
||||||
type Item = T;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
while self.buffer.is_empty() {
|
|
||||||
let next_el = self.underlying_it.next()?;
|
|
||||||
(self.fill_buffer)(next_el, &mut self.buffer);
|
|
||||||
// We will pop elements, so we reverse the buffer first.
|
|
||||||
self.buffer.reverse();
|
|
||||||
}
|
|
||||||
self.buffer.pop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait FlatMapWithBufferIter: Iterator {
|
|
||||||
/// Function similar to `flat_map`, but allows reusing a shared `Vec`.
|
|
||||||
fn flat_map_with_buffer<F, T>(self, fill_buffer: F) -> FlatMapWithBuffer<T, F, Self>
|
|
||||||
where
|
|
||||||
F: Fn(Self::Item, &mut Vec<T>),
|
|
||||||
Self: Sized,
|
|
||||||
{
|
|
||||||
FlatMapWithBuffer {
|
|
||||||
buffer: Vec::with_capacity(10),
|
|
||||||
fill_buffer,
|
|
||||||
underlying_it: self,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: ?Sized> FlatMapWithBufferIter for T where T: Iterator {}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use crate::indexer::flat_map_with_buffer::FlatMapWithBufferIter;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_flat_map_with_buffer_empty() {
|
|
||||||
let mut empty_iter = std::iter::empty::<usize>()
|
|
||||||
.flat_map_with_buffer(|_val: usize, _buffer: &mut Vec<usize>| {});
|
|
||||||
assert!(empty_iter.next().is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_flat_map_with_buffer_simple() {
|
|
||||||
let vals: Vec<usize> = (1..5)
|
|
||||||
.flat_map_with_buffer(|val: usize, buffer: &mut Vec<usize>| buffer.extend(0..val))
|
|
||||||
.collect();
|
|
||||||
assert_eq!(&[0, 0, 1, 0, 1, 2, 0, 1, 2, 3], &vals[..]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_flat_map_filling_no_elements_does_not_stop_iterator() {
|
|
||||||
let vals: Vec<usize> = [2, 0, 0, 3]
|
|
||||||
.into_iter()
|
|
||||||
.flat_map_with_buffer(|val: usize, buffer: &mut Vec<usize>| buffer.extend(0..val))
|
|
||||||
.collect();
|
|
||||||
assert_eq!(&[0, 1, 0, 1, 2], &vals[..]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -11,6 +11,7 @@ use super::segment_updater::SegmentUpdater;
|
|||||||
use super::{AddBatch, AddBatchReceiver, AddBatchSender, PreparedCommit};
|
use super::{AddBatch, AddBatchReceiver, AddBatchSender, PreparedCommit};
|
||||||
use crate::core::{Index, Segment, SegmentComponent, SegmentId, SegmentMeta, SegmentReader};
|
use crate::core::{Index, Segment, SegmentComponent, SegmentId, SegmentMeta, SegmentReader};
|
||||||
use crate::directory::{DirectoryLock, GarbageCollectionResult, TerminatingWrite};
|
use crate::directory::{DirectoryLock, GarbageCollectionResult, TerminatingWrite};
|
||||||
|
use crate::docset::{DocSet, TERMINATED};
|
||||||
use crate::error::TantivyError;
|
use crate::error::TantivyError;
|
||||||
use crate::fastfield::write_alive_bitset;
|
use crate::fastfield::write_alive_bitset;
|
||||||
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
||||||
@@ -19,9 +20,8 @@ use crate::indexer::index_writer_status::IndexWriterStatus;
|
|||||||
use crate::indexer::operation::DeleteOperation;
|
use crate::indexer::operation::DeleteOperation;
|
||||||
use crate::indexer::stamper::Stamper;
|
use crate::indexer::stamper::Stamper;
|
||||||
use crate::indexer::{MergePolicy, SegmentEntry, SegmentWriter};
|
use crate::indexer::{MergePolicy, SegmentEntry, SegmentWriter};
|
||||||
use crate::query::{Query, TermQuery};
|
|
||||||
use crate::schema::{Document, IndexRecordOption, Term};
|
use crate::schema::{Document, IndexRecordOption, Term};
|
||||||
use crate::{FutureResult, IndexReader, Opstamp};
|
use crate::{FutureResult, Opstamp};
|
||||||
|
|
||||||
// Size of the margin for the `memory_arena`. A segment is closed when the remaining memory
|
// Size of the margin for the `memory_arena`. A segment is closed when the remaining memory
|
||||||
// in the `memory_arena` goes below MARGIN_IN_BYTES.
|
// in the `memory_arena` goes below MARGIN_IN_BYTES.
|
||||||
@@ -57,7 +57,6 @@ pub struct IndexWriter {
|
|||||||
_directory_lock: Option<DirectoryLock>,
|
_directory_lock: Option<DirectoryLock>,
|
||||||
|
|
||||||
index: Index,
|
index: Index,
|
||||||
index_reader: IndexReader,
|
|
||||||
|
|
||||||
memory_arena_in_bytes_per_thread: usize,
|
memory_arena_in_bytes_per_thread: usize,
|
||||||
|
|
||||||
@@ -93,14 +92,19 @@ fn compute_deleted_bitset(
|
|||||||
|
|
||||||
// A delete operation should only affect
|
// A delete operation should only affect
|
||||||
// document that were inserted before it.
|
// document that were inserted before it.
|
||||||
delete_op
|
let inverted_index = segment_reader.inverted_index(delete_op.term.field())?;
|
||||||
.target
|
if let Some(mut docset) =
|
||||||
.for_each(segment_reader, &mut |doc_matching_delete_query, _| {
|
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)?
|
||||||
if doc_opstamps.is_deleted(doc_matching_delete_query, delete_op.opstamp) {
|
{
|
||||||
alive_bitset.remove(doc_matching_delete_query);
|
let mut doc_matching_deleted_term = docset.doc();
|
||||||
|
while doc_matching_deleted_term != TERMINATED {
|
||||||
|
if doc_opstamps.is_deleted(doc_matching_deleted_term, delete_op.opstamp) {
|
||||||
|
alive_bitset.remove(doc_matching_deleted_term);
|
||||||
might_have_changed = true;
|
might_have_changed = true;
|
||||||
}
|
}
|
||||||
})?;
|
doc_matching_deleted_term = docset.advance();
|
||||||
|
}
|
||||||
|
}
|
||||||
delete_cursor.advance();
|
delete_cursor.advance();
|
||||||
}
|
}
|
||||||
Ok(might_have_changed)
|
Ok(might_have_changed)
|
||||||
@@ -298,7 +302,6 @@ impl IndexWriter {
|
|||||||
|
|
||||||
memory_arena_in_bytes_per_thread,
|
memory_arena_in_bytes_per_thread,
|
||||||
index: index.clone(),
|
index: index.clone(),
|
||||||
index_reader: index.reader()?,
|
|
||||||
|
|
||||||
index_writer_status: IndexWriterStatus::from(document_receiver),
|
index_writer_status: IndexWriterStatus::from(document_receiver),
|
||||||
operation_sender: document_sender,
|
operation_sender: document_sender,
|
||||||
@@ -370,9 +373,9 @@ impl IndexWriter {
|
|||||||
/// This method is useful only for users trying to do complex
|
/// This method is useful only for users trying to do complex
|
||||||
/// operations, like converting an index format to another.
|
/// operations, like converting an index format to another.
|
||||||
///
|
///
|
||||||
/// It is safe to start writing file associated with the new `Segment`.
|
/// It is safe to start writing file associated to the new `Segment`.
|
||||||
/// These will not be garbage collected as long as an instance object of
|
/// These will not be garbage collected as long as an instance object of
|
||||||
/// `SegmentMeta` object associated with the new `Segment` is "alive".
|
/// `SegmentMeta` object associated to the new `Segment` is "alive".
|
||||||
pub fn new_segment(&self) -> Segment {
|
pub fn new_segment(&self) -> Segment {
|
||||||
self.index.new_segment()
|
self.index.new_segment()
|
||||||
}
|
}
|
||||||
@@ -663,33 +666,10 @@ impl IndexWriter {
|
|||||||
/// Like adds, the deletion itself will be visible
|
/// Like adds, the deletion itself will be visible
|
||||||
/// only after calling `commit()`.
|
/// only after calling `commit()`.
|
||||||
pub fn delete_term(&self, term: Term) -> Opstamp {
|
pub fn delete_term(&self, term: Term) -> Opstamp {
|
||||||
let query = TermQuery::new(term, IndexRecordOption::Basic);
|
|
||||||
// For backward compatibility, if Term is invalid for the index, do nothing but return an
|
|
||||||
// Opstamp
|
|
||||||
self.delete_query(Box::new(query))
|
|
||||||
.unwrap_or_else(|_| self.stamper.stamp())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Delete all documents matching a given query.
|
|
||||||
/// Returns an `Err` if the query can't be executed.
|
|
||||||
///
|
|
||||||
/// Delete operation only affects documents that
|
|
||||||
/// were added in previous commits, and documents
|
|
||||||
/// that were added previously in the same commit.
|
|
||||||
///
|
|
||||||
/// Like adds, the deletion itself will be visible
|
|
||||||
/// only after calling `commit()`.
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub fn delete_query(&self, query: Box<dyn Query>) -> crate::Result<Opstamp> {
|
|
||||||
let weight = query.weight(&self.index_reader.searcher(), false)?;
|
|
||||||
|
|
||||||
let opstamp = self.stamper.stamp();
|
let opstamp = self.stamper.stamp();
|
||||||
let delete_operation = DeleteOperation {
|
let delete_operation = DeleteOperation { opstamp, term };
|
||||||
opstamp,
|
|
||||||
target: weight,
|
|
||||||
};
|
|
||||||
self.delete_queue.push(delete_operation);
|
self.delete_queue.push(delete_operation);
|
||||||
Ok(opstamp)
|
opstamp
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the opstamp of the last successful commit.
|
/// Returns the opstamp of the last successful commit.
|
||||||
@@ -758,17 +738,10 @@ impl IndexWriter {
|
|||||||
let (batch_opstamp, stamps) = self.get_batch_opstamps(count);
|
let (batch_opstamp, stamps) = self.get_batch_opstamps(count);
|
||||||
|
|
||||||
let mut adds = AddBatch::default();
|
let mut adds = AddBatch::default();
|
||||||
|
|
||||||
for (user_op, opstamp) in user_operations_it.zip(stamps) {
|
for (user_op, opstamp) in user_operations_it.zip(stamps) {
|
||||||
match user_op {
|
match user_op {
|
||||||
UserOperation::Delete(term) => {
|
UserOperation::Delete(term) => {
|
||||||
let query = TermQuery::new(term, IndexRecordOption::Basic);
|
let delete_operation = DeleteOperation { opstamp, term };
|
||||||
let weight = query.weight(&self.index_reader.searcher(), false)?;
|
|
||||||
|
|
||||||
let delete_operation = DeleteOperation {
|
|
||||||
opstamp,
|
|
||||||
target: weight,
|
|
||||||
};
|
|
||||||
self.delete_queue.push(delete_operation);
|
self.delete_queue.push(delete_operation);
|
||||||
}
|
}
|
||||||
UserOperation::Add(document) => {
|
UserOperation::Add(document) => {
|
||||||
@@ -803,9 +776,7 @@ impl Drop for IndexWriter {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::net::Ipv6Addr;
|
|
||||||
|
|
||||||
use fastfield_codecs::MonotonicallyMappableToU128;
|
|
||||||
use proptest::prelude::*;
|
use proptest::prelude::*;
|
||||||
use proptest::prop_oneof;
|
use proptest::prop_oneof;
|
||||||
use proptest::strategy::Strategy;
|
use proptest::strategy::Strategy;
|
||||||
@@ -815,15 +786,13 @@ mod tests {
|
|||||||
use crate::directory::error::LockError;
|
use crate::directory::error::LockError;
|
||||||
use crate::error::*;
|
use crate::error::*;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::{BooleanQuery, Occur, Query, QueryParser, TermQuery};
|
use crate::query::{QueryParser, TermQuery};
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
self, Cardinality, Facet, FacetOptions, IndexRecordOption, IpAddrOptions, NumericOptions,
|
self, Cardinality, Facet, FacetOptions, IndexRecordOption, NumericOptions,
|
||||||
TextFieldIndexing, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
|
TextFieldIndexing, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
|
||||||
};
|
};
|
||||||
use crate::store::DOCSTORE_CACHE_CAPACITY;
|
use crate::store::DOCSTORE_CACHE_CAPACITY;
|
||||||
use crate::{
|
use crate::{DocAddress, Index, IndexSettings, IndexSortByField, Order, ReloadPolicy, Term};
|
||||||
DateTime, DocAddress, Index, IndexSettings, IndexSortByField, Order, ReloadPolicy, Term,
|
|
||||||
};
|
|
||||||
|
|
||||||
const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do \
|
const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do \
|
||||||
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad \
|
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad \
|
||||||
@@ -1399,35 +1368,6 @@ mod tests {
|
|||||||
assert!(commit_again.is_ok());
|
assert!(commit_again.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_sort_by_multivalue_field_error() -> crate::Result<()> {
|
|
||||||
let mut schema_builder = schema::Schema::builder();
|
|
||||||
let options = NumericOptions::default().set_fast(Cardinality::MultiValues);
|
|
||||||
schema_builder.add_u64_field("id", options);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
|
|
||||||
let settings = IndexSettings {
|
|
||||||
sort_by_field: Some(IndexSortByField {
|
|
||||||
field: "id".to_string(),
|
|
||||||
order: Order::Desc,
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let err = Index::builder()
|
|
||||||
.schema(schema)
|
|
||||||
.settings(settings)
|
|
||||||
.create_in_ram()
|
|
||||||
.unwrap_err();
|
|
||||||
assert_eq!(
|
|
||||||
err.to_string(),
|
|
||||||
"An invalid argument was passed: 'Only single value fast field Cardinality supported \
|
|
||||||
for sorting index id'"
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_with_sort_by_field() -> crate::Result<()> {
|
fn test_delete_with_sort_by_field() -> crate::Result<()> {
|
||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
@@ -1472,78 +1412,16 @@ mod tests {
|
|||||||
let fast_field_reader = segment_reader.fast_fields().u64(id_field)?;
|
let fast_field_reader = segment_reader.fast_fields().u64(id_field)?;
|
||||||
let in_order_alive_ids: Vec<u64> = segment_reader
|
let in_order_alive_ids: Vec<u64> = segment_reader
|
||||||
.doc_ids_alive()
|
.doc_ids_alive()
|
||||||
.map(|doc| fast_field_reader.get_val(doc))
|
.map(|doc| fast_field_reader.get_val(doc as u64))
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(&in_order_alive_ids[..], &[9, 8, 7, 6, 5, 4, 1, 0]);
|
assert_eq!(&in_order_alive_ids[..], &[9, 8, 7, 6, 5, 4, 1, 0]);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_delete_query_with_sort_by_field() -> crate::Result<()> {
|
|
||||||
let mut schema_builder = schema::Schema::builder();
|
|
||||||
let id_field =
|
|
||||||
schema_builder.add_u64_field("id", schema::INDEXED | schema::STORED | schema::FAST);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
|
|
||||||
let settings = IndexSettings {
|
|
||||||
sort_by_field: Some(IndexSortByField {
|
|
||||||
field: "id".to_string(),
|
|
||||||
order: Order::Desc,
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let index = Index::builder()
|
|
||||||
.schema(schema)
|
|
||||||
.settings(settings)
|
|
||||||
.create_in_ram()?;
|
|
||||||
let index_reader = index.reader()?;
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
|
|
||||||
// create and delete docs in same commit
|
|
||||||
for id in 0u64..5u64 {
|
|
||||||
index_writer.add_document(doc!(id_field => id))?;
|
|
||||||
}
|
|
||||||
for id in 1u64..4u64 {
|
|
||||||
let term = Term::from_field_u64(id_field, id);
|
|
||||||
let not_term = Term::from_field_u64(id_field, 2);
|
|
||||||
let term = Box::new(TermQuery::new(term, Default::default()));
|
|
||||||
let not_term = Box::new(TermQuery::new(not_term, Default::default()));
|
|
||||||
|
|
||||||
let query: BooleanQuery = vec![
|
|
||||||
(Occur::Must, term as Box<dyn Query>),
|
|
||||||
(Occur::MustNot, not_term as Box<dyn Query>),
|
|
||||||
]
|
|
||||||
.into();
|
|
||||||
|
|
||||||
index_writer.delete_query(Box::new(query))?;
|
|
||||||
}
|
|
||||||
for id in 5u64..10u64 {
|
|
||||||
index_writer.add_document(doc!(id_field => id))?;
|
|
||||||
}
|
|
||||||
index_writer.commit()?;
|
|
||||||
index_reader.reload()?;
|
|
||||||
|
|
||||||
let searcher = index_reader.searcher();
|
|
||||||
assert_eq!(searcher.segment_readers().len(), 1);
|
|
||||||
|
|
||||||
let segment_reader = searcher.segment_reader(0);
|
|
||||||
assert_eq!(segment_reader.num_docs(), 8);
|
|
||||||
assert_eq!(segment_reader.max_doc(), 10);
|
|
||||||
let fast_field_reader = segment_reader.fast_fields().u64(id_field)?;
|
|
||||||
let in_order_alive_ids: Vec<u64> = segment_reader
|
|
||||||
.doc_ids_alive()
|
|
||||||
.map(|doc| fast_field_reader.get_val(doc))
|
|
||||||
.collect();
|
|
||||||
assert_eq!(&in_order_alive_ids[..], &[9, 8, 7, 6, 5, 4, 2, 0]);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
#[derive(Debug, Clone, Copy)]
|
||||||
enum IndexingOp {
|
enum IndexingOp {
|
||||||
AddDoc { id: u64 },
|
AddDoc { id: u64 },
|
||||||
DeleteDoc { id: u64 },
|
DeleteDoc { id: u64 },
|
||||||
DeleteDocQuery { id: u64 },
|
|
||||||
Commit,
|
Commit,
|
||||||
Merge,
|
Merge,
|
||||||
}
|
}
|
||||||
@@ -1551,7 +1429,6 @@ mod tests {
|
|||||||
fn balanced_operation_strategy() -> impl Strategy<Value = IndexingOp> {
|
fn balanced_operation_strategy() -> impl Strategy<Value = IndexingOp> {
|
||||||
prop_oneof![
|
prop_oneof![
|
||||||
(0u64..20u64).prop_map(|id| IndexingOp::DeleteDoc { id }),
|
(0u64..20u64).prop_map(|id| IndexingOp::DeleteDoc { id }),
|
||||||
(0u64..20u64).prop_map(|id| IndexingOp::DeleteDocQuery { id }),
|
|
||||||
(0u64..20u64).prop_map(|id| IndexingOp::AddDoc { id }),
|
(0u64..20u64).prop_map(|id| IndexingOp::AddDoc { id }),
|
||||||
(0u64..1u64).prop_map(|_| IndexingOp::Commit),
|
(0u64..1u64).prop_map(|_| IndexingOp::Commit),
|
||||||
(0u64..1u64).prop_map(|_| IndexingOp::Merge),
|
(0u64..1u64).prop_map(|_| IndexingOp::Merge),
|
||||||
@@ -1560,8 +1437,7 @@ mod tests {
|
|||||||
|
|
||||||
fn adding_operation_strategy() -> impl Strategy<Value = IndexingOp> {
|
fn adding_operation_strategy() -> impl Strategy<Value = IndexingOp> {
|
||||||
prop_oneof![
|
prop_oneof![
|
||||||
5 => (0u64..100u64).prop_map(|id| IndexingOp::DeleteDoc { id }),
|
10 => (0u64..100u64).prop_map(|id| IndexingOp::DeleteDoc { id }),
|
||||||
5 => (0u64..100u64).prop_map(|id| IndexingOp::DeleteDocQuery { id }),
|
|
||||||
50 => (0u64..100u64).prop_map(|id| IndexingOp::AddDoc { id }),
|
50 => (0u64..100u64).prop_map(|id| IndexingOp::AddDoc { id }),
|
||||||
2 => (0u64..1u64).prop_map(|_| IndexingOp::Commit),
|
2 => (0u64..1u64).prop_map(|_| IndexingOp::Commit),
|
||||||
1 => (0u64..1u64).prop_map(|_| IndexingOp::Merge),
|
1 => (0u64..1u64).prop_map(|_| IndexingOp::Merge),
|
||||||
@@ -1581,10 +1457,6 @@ mod tests {
|
|||||||
existing_ids.remove(&id);
|
existing_ids.remove(&id);
|
||||||
deleted_ids.insert(id);
|
deleted_ids.insert(id);
|
||||||
}
|
}
|
||||||
IndexingOp::DeleteDocQuery { id } => {
|
|
||||||
existing_ids.remove(&id);
|
|
||||||
deleted_ids.insert(id);
|
|
||||||
}
|
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1597,15 +1469,7 @@ mod tests {
|
|||||||
force_end_merge: bool,
|
force_end_merge: bool,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let ip_field = schema_builder.add_ip_addr_field("ip", FAST | INDEXED | STORED);
|
|
||||||
let ips_field = schema_builder.add_ip_addr_field(
|
|
||||||
"ips",
|
|
||||||
IpAddrOptions::default().set_fast(Cardinality::MultiValues),
|
|
||||||
);
|
|
||||||
let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED);
|
let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED);
|
||||||
let i64_field = schema_builder.add_i64_field("i64", INDEXED);
|
|
||||||
let f64_field = schema_builder.add_f64_field("f64", INDEXED);
|
|
||||||
let date_field = schema_builder.add_date_field("date", INDEXED);
|
|
||||||
let bytes_field = schema_builder.add_bytes_field("bytes", FAST | INDEXED | STORED);
|
let bytes_field = schema_builder.add_bytes_field("bytes", FAST | INDEXED | STORED);
|
||||||
let bool_field = schema_builder.add_bool_field("bool", FAST | INDEXED | STORED);
|
let bool_field = schema_builder.add_bool_field("bool", FAST | INDEXED | STORED);
|
||||||
let text_field = schema_builder.add_text_field(
|
let text_field = schema_builder.add_text_field(
|
||||||
@@ -1619,7 +1483,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let large_text_field = schema_builder.add_text_field("large_text_field", TEXT | STORED);
|
let large_text_field = schema_builder.add_text_field("large_text_field", TEXT | STORED);
|
||||||
let multi_text_fields = schema_builder.add_text_field("multi_text_fields", TEXT | STORED);
|
|
||||||
|
|
||||||
let multi_numbers = schema_builder.add_u64_field(
|
let multi_numbers = schema_builder.add_u64_field(
|
||||||
"multi_numbers",
|
"multi_numbers",
|
||||||
@@ -1657,70 +1520,25 @@ mod tests {
|
|||||||
|
|
||||||
let old_reader = index.reader()?;
|
let old_reader = index.reader()?;
|
||||||
|
|
||||||
let ip_exists = |id| id % 3 != 0; // 0 does not exist
|
|
||||||
|
|
||||||
let multi_text_field_text1 = "test1 test2 test3 test1 test2 test3";
|
|
||||||
// rotate left
|
|
||||||
let multi_text_field_text2 = "test2 test3 test1 test2 test3 test1";
|
|
||||||
// rotate right
|
|
||||||
let multi_text_field_text3 = "test3 test1 test2 test3 test1 test2";
|
|
||||||
|
|
||||||
for &op in ops {
|
for &op in ops {
|
||||||
match op {
|
match op {
|
||||||
IndexingOp::AddDoc { id } => {
|
IndexingOp::AddDoc { id } => {
|
||||||
let facet = Facet::from(&("/cola/".to_string() + &id.to_string()));
|
let facet = Facet::from(&("/cola/".to_string() + &id.to_string()));
|
||||||
let ip_from_id = Ipv6Addr::from_u128(id as u128);
|
index_writer.add_document(doc!(id_field=>id,
|
||||||
|
bytes_field => id.to_le_bytes().as_slice(),
|
||||||
if !ip_exists(id) {
|
multi_numbers=> id,
|
||||||
// every 3rd doc has no ip field
|
multi_numbers => id,
|
||||||
index_writer.add_document(doc!(id_field=>id,
|
bool_field => (id % 2u64) != 0,
|
||||||
bytes_field => id.to_le_bytes().as_slice(),
|
multi_bools => (id % 2u64) != 0,
|
||||||
multi_numbers=> id,
|
multi_bools => (id % 2u64) == 0,
|
||||||
multi_numbers => id,
|
text_field => id.to_string(),
|
||||||
bool_field => (id % 2u64) != 0,
|
facet_field => facet,
|
||||||
i64_field => id as i64,
|
large_text_field=> LOREM
|
||||||
f64_field => id as f64,
|
))?;
|
||||||
date_field => DateTime::from_timestamp_secs(id as i64),
|
|
||||||
multi_bools => (id % 2u64) != 0,
|
|
||||||
multi_bools => (id % 2u64) == 0,
|
|
||||||
text_field => id.to_string(),
|
|
||||||
facet_field => facet,
|
|
||||||
large_text_field => LOREM,
|
|
||||||
multi_text_fields => multi_text_field_text1,
|
|
||||||
multi_text_fields => multi_text_field_text2,
|
|
||||||
multi_text_fields => multi_text_field_text3,
|
|
||||||
))?;
|
|
||||||
} else {
|
|
||||||
index_writer.add_document(doc!(id_field=>id,
|
|
||||||
bytes_field => id.to_le_bytes().as_slice(),
|
|
||||||
ip_field => ip_from_id,
|
|
||||||
ips_field => ip_from_id,
|
|
||||||
ips_field => ip_from_id,
|
|
||||||
multi_numbers=> id,
|
|
||||||
multi_numbers => id,
|
|
||||||
bool_field => (id % 2u64) != 0,
|
|
||||||
i64_field => id as i64,
|
|
||||||
f64_field => id as f64,
|
|
||||||
date_field => DateTime::from_timestamp_secs(id as i64),
|
|
||||||
multi_bools => (id % 2u64) != 0,
|
|
||||||
multi_bools => (id % 2u64) == 0,
|
|
||||||
text_field => id.to_string(),
|
|
||||||
facet_field => facet,
|
|
||||||
large_text_field => LOREM,
|
|
||||||
multi_text_fields => multi_text_field_text1,
|
|
||||||
multi_text_fields => multi_text_field_text2,
|
|
||||||
multi_text_fields => multi_text_field_text3,
|
|
||||||
))?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
IndexingOp::DeleteDoc { id } => {
|
IndexingOp::DeleteDoc { id } => {
|
||||||
index_writer.delete_term(Term::from_field_u64(id_field, id));
|
index_writer.delete_term(Term::from_field_u64(id_field, id));
|
||||||
}
|
}
|
||||||
IndexingOp::DeleteDocQuery { id } => {
|
|
||||||
let term = Term::from_field_u64(id_field, id);
|
|
||||||
let query = TermQuery::new(term, Default::default());
|
|
||||||
index_writer.delete_query(Box::new(query))?;
|
|
||||||
}
|
|
||||||
IndexingOp::Commit => {
|
IndexingOp::Commit => {
|
||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
@@ -1760,7 +1578,7 @@ mod tests {
|
|||||||
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
|
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
|
||||||
segment_reader
|
segment_reader
|
||||||
.doc_ids_alive()
|
.doc_ids_alive()
|
||||||
.map(move |doc| ff_reader.get_val(doc))
|
.map(move |doc| ff_reader.get_val(doc as u64))
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
@@ -1771,7 +1589,7 @@ mod tests {
|
|||||||
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
|
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
|
||||||
segment_reader
|
segment_reader
|
||||||
.doc_ids_alive()
|
.doc_ids_alive()
|
||||||
.map(move |doc| ff_reader.get_val(doc))
|
.map(move |doc| ff_reader.get_val(doc as u64))
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
@@ -1797,60 +1615,6 @@ mod tests {
|
|||||||
.collect::<HashSet<_>>()
|
.collect::<HashSet<_>>()
|
||||||
);
|
);
|
||||||
|
|
||||||
// Load all ips addr
|
|
||||||
let ips: HashSet<Ipv6Addr> = searcher
|
|
||||||
.segment_readers()
|
|
||||||
.iter()
|
|
||||||
.flat_map(|segment_reader| {
|
|
||||||
let ff_reader = segment_reader.fast_fields().ip_addr(ip_field).unwrap();
|
|
||||||
segment_reader.doc_ids_alive().flat_map(move |doc| {
|
|
||||||
let val = ff_reader.get_val(doc);
|
|
||||||
if val == Ipv6Addr::from_u128(0) {
|
|
||||||
// TODO Fix null handling
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(val)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let expected_ips = expected_ids_and_num_occurrences
|
|
||||||
.keys()
|
|
||||||
.flat_map(|id| {
|
|
||||||
if !ip_exists(*id) {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(Ipv6Addr::from_u128(*id as u128))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<HashSet<_>>();
|
|
||||||
assert_eq!(ips, expected_ips);
|
|
||||||
|
|
||||||
let expected_ips = expected_ids_and_num_occurrences
|
|
||||||
.keys()
|
|
||||||
.filter_map(|id| {
|
|
||||||
if !ip_exists(*id) {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(Ipv6Addr::from_u128(*id as u128))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<HashSet<_>>();
|
|
||||||
let ips: HashSet<Ipv6Addr> = searcher
|
|
||||||
.segment_readers()
|
|
||||||
.iter()
|
|
||||||
.flat_map(|segment_reader| {
|
|
||||||
let ff_reader = segment_reader.fast_fields().ip_addrs(ips_field).unwrap();
|
|
||||||
segment_reader.doc_ids_alive().flat_map(move |doc| {
|
|
||||||
let mut vals = vec![];
|
|
||||||
ff_reader.get_vals(doc, &mut vals);
|
|
||||||
vals.into_iter().filter(|val| val.to_u128() != 0) // TODO Fix null handling
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
assert_eq!(ips, expected_ips);
|
|
||||||
|
|
||||||
// multivalue fast field tests
|
// multivalue fast field tests
|
||||||
for segment_reader in searcher.segment_readers().iter() {
|
for segment_reader in searcher.segment_readers().iter() {
|
||||||
let id_reader = segment_reader.fast_fields().u64(id_field).unwrap();
|
let id_reader = segment_reader.fast_fields().u64(id_field).unwrap();
|
||||||
@@ -1861,7 +1625,7 @@ mod tests {
|
|||||||
ff_reader.get_vals(doc, &mut vals);
|
ff_reader.get_vals(doc, &mut vals);
|
||||||
assert_eq!(vals.len(), 2);
|
assert_eq!(vals.len(), 2);
|
||||||
assert_eq!(vals[0], vals[1]);
|
assert_eq!(vals[0], vals[1]);
|
||||||
assert_eq!(id_reader.get_val(doc), vals[0]);
|
assert_eq!(id_reader.get_val(doc as u64), vals[0]);
|
||||||
|
|
||||||
let mut bool_vals = vec![];
|
let mut bool_vals = vec![];
|
||||||
bool_ff_reader.get_vals(doc, &mut bool_vals);
|
bool_ff_reader.get_vals(doc, &mut bool_vals);
|
||||||
@@ -1915,8 +1679,10 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// test search
|
// test search
|
||||||
let do_search = |term: &str, field| {
|
let my_text_field = index.schema().get_field("text_field").unwrap();
|
||||||
let query = QueryParser::for_index(&index, vec![field])
|
|
||||||
|
let do_search = |term: &str| {
|
||||||
|
let query = QueryParser::for_index(&index, vec![my_text_field])
|
||||||
.parse_query(term)
|
.parse_query(term)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let top_docs: Vec<(f32, DocAddress)> =
|
let top_docs: Vec<(f32, DocAddress)> =
|
||||||
@@ -1925,80 +1691,11 @@ mod tests {
|
|||||||
top_docs.iter().map(|el| el.1).collect::<Vec<_>>()
|
top_docs.iter().map(|el| el.1).collect::<Vec<_>>()
|
||||||
};
|
};
|
||||||
|
|
||||||
let do_search2 = |term: Term| {
|
for (existing_id, count) in expected_ids_and_num_occurrences {
|
||||||
let query = TermQuery::new(term, IndexRecordOption::Basic);
|
assert_eq!(do_search(&existing_id.to_string()).len() as u64, count);
|
||||||
let top_docs: Vec<(f32, DocAddress)> =
|
|
||||||
searcher.search(&query, &TopDocs::with_limit(1000)).unwrap();
|
|
||||||
|
|
||||||
top_docs.iter().map(|el| el.1).collect::<Vec<_>>()
|
|
||||||
};
|
|
||||||
|
|
||||||
for (existing_id, count) in &expected_ids_and_num_occurrences {
|
|
||||||
let (existing_id, count) = (*existing_id, *count);
|
|
||||||
let get_num_hits = |field| do_search(&existing_id.to_string(), field).len() as u64;
|
|
||||||
assert_eq!(get_num_hits(text_field), count);
|
|
||||||
assert_eq!(get_num_hits(i64_field), count);
|
|
||||||
assert_eq!(get_num_hits(f64_field), count);
|
|
||||||
assert_eq!(get_num_hits(id_field), count);
|
|
||||||
|
|
||||||
// Test multi text
|
|
||||||
assert_eq!(
|
|
||||||
do_search("\"test1 test2\"", multi_text_fields).len(),
|
|
||||||
num_docs_expected
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
do_search("\"test2 test3\"", multi_text_fields).len(),
|
|
||||||
num_docs_expected
|
|
||||||
);
|
|
||||||
|
|
||||||
// Test bytes
|
|
||||||
let term = Term::from_field_bytes(bytes_field, existing_id.to_le_bytes().as_slice());
|
|
||||||
assert_eq!(do_search2(term).len() as u64, count);
|
|
||||||
|
|
||||||
// Test date
|
|
||||||
let term = Term::from_field_date(
|
|
||||||
date_field,
|
|
||||||
DateTime::from_timestamp_secs(existing_id as i64),
|
|
||||||
);
|
|
||||||
assert_eq!(do_search2(term).len() as u64, count);
|
|
||||||
}
|
}
|
||||||
for deleted_id in deleted_ids {
|
for existing_id in deleted_ids {
|
||||||
let assert_field = |field| {
|
assert_eq!(do_search(&existing_id.to_string()).len(), 0);
|
||||||
assert_eq!(do_search(&deleted_id.to_string(), field).len() as u64, 0);
|
|
||||||
};
|
|
||||||
assert_field(text_field);
|
|
||||||
assert_field(f64_field);
|
|
||||||
assert_field(i64_field);
|
|
||||||
assert_field(id_field);
|
|
||||||
|
|
||||||
// Test bytes
|
|
||||||
let term = Term::from_field_bytes(bytes_field, deleted_id.to_le_bytes().as_slice());
|
|
||||||
assert_eq!(do_search2(term).len() as u64, 0);
|
|
||||||
|
|
||||||
// Test date
|
|
||||||
let term =
|
|
||||||
Term::from_field_date(date_field, DateTime::from_timestamp_secs(deleted_id as i64));
|
|
||||||
assert_eq!(do_search2(term).len() as u64, 0);
|
|
||||||
}
|
|
||||||
// search ip address
|
|
||||||
//
|
|
||||||
for (existing_id, count) in &expected_ids_and_num_occurrences {
|
|
||||||
let (existing_id, count) = (*existing_id, *count);
|
|
||||||
if !ip_exists(existing_id) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let do_search_ip_field = |term: &str| do_search(term, ip_field).len() as u64;
|
|
||||||
let ip_addr = Ipv6Addr::from_u128(existing_id as u128);
|
|
||||||
// Test incoming ip as ipv6
|
|
||||||
assert_eq!(do_search_ip_field(&format!("\"{}\"", ip_addr)), count);
|
|
||||||
|
|
||||||
let term = Term::from_field_ip_addr(ip_field, ip_addr);
|
|
||||||
assert_eq!(do_search2(term).len() as u64, count);
|
|
||||||
|
|
||||||
// Test incoming ip as ipv4
|
|
||||||
if let Some(ip_addr) = ip_addr.to_ipv4_mapped() {
|
|
||||||
assert_eq!(do_search_ip_field(&format!("\"{}\"", ip_addr)), count);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// test facets
|
// test facets
|
||||||
for segment_reader in searcher.segment_readers().iter() {
|
for segment_reader in searcher.segment_readers().iter() {
|
||||||
@@ -2012,7 +1709,7 @@ mod tests {
|
|||||||
facet_reader
|
facet_reader
|
||||||
.facet_from_ord(facet_ords[0], &mut facet)
|
.facet_from_ord(facet_ords[0], &mut facet)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let id = ff_reader.get_val(doc_id);
|
let id = ff_reader.get_val(doc_id as u64);
|
||||||
let facet_expected = Facet::from(&("/cola/".to_string() + &id.to_string()));
|
let facet_expected = Facet::from(&("/cola/".to_string() + &id.to_string()));
|
||||||
|
|
||||||
assert_eq!(facet, facet_expected);
|
assert_eq!(facet, facet_expected);
|
||||||
@@ -2021,36 +1718,6 @@ mod tests {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_minimal() {
|
|
||||||
assert!(test_operation_strategy(
|
|
||||||
&[
|
|
||||||
IndexingOp::AddDoc { id: 23 },
|
|
||||||
IndexingOp::AddDoc { id: 13 },
|
|
||||||
IndexingOp::DeleteDoc { id: 13 }
|
|
||||||
],
|
|
||||||
true,
|
|
||||||
false
|
|
||||||
)
|
|
||||||
.is_ok());
|
|
||||||
|
|
||||||
assert!(test_operation_strategy(
|
|
||||||
&[
|
|
||||||
IndexingOp::AddDoc { id: 23 },
|
|
||||||
IndexingOp::AddDoc { id: 13 },
|
|
||||||
IndexingOp::DeleteDoc { id: 13 }
|
|
||||||
],
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
)
|
|
||||||
.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_minimal_sort_merge() {
|
|
||||||
assert!(test_operation_strategy(&[IndexingOp::AddDoc { id: 3 },], true, true).is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
proptest! {
|
proptest! {
|
||||||
#![proptest_config(ProptestConfig::with_cases(20))]
|
#![proptest_config(ProptestConfig::with_cases(20))]
|
||||||
#[test]
|
#[test]
|
||||||
@@ -2143,135 +1810,4 @@ mod tests {
|
|||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_bug_1617_3() {
|
|
||||||
assert!(test_operation_strategy(
|
|
||||||
&[
|
|
||||||
IndexingOp::DeleteDoc { id: 0 },
|
|
||||||
IndexingOp::AddDoc { id: 6 },
|
|
||||||
IndexingOp::DeleteDocQuery { id: 11 },
|
|
||||||
IndexingOp::Commit,
|
|
||||||
IndexingOp::Merge,
|
|
||||||
IndexingOp::Commit,
|
|
||||||
IndexingOp::Commit
|
|
||||||
],
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
)
|
|
||||||
.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_bug_1617_2() {
|
|
||||||
assert!(test_operation_strategy(
|
|
||||||
&[
|
|
||||||
IndexingOp::AddDoc { id: 13 },
|
|
||||||
IndexingOp::DeleteDoc { id: 13 },
|
|
||||||
IndexingOp::Commit,
|
|
||||||
IndexingOp::AddDoc { id: 30 },
|
|
||||||
IndexingOp::Commit,
|
|
||||||
IndexingOp::Merge,
|
|
||||||
],
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
)
|
|
||||||
.is_ok());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_bug_1617() -> crate::Result<()> {
|
|
||||||
let mut schema_builder = schema::Schema::builder();
|
|
||||||
let id_field = schema_builder.add_u64_field("id", INDEXED);
|
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::builder().schema(schema).create_in_ram()?;
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
|
||||||
|
|
||||||
let existing_id = 16u64;
|
|
||||||
let deleted_id = 13u64;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
id_field=>existing_id,
|
|
||||||
))?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
id_field=>deleted_id,
|
|
||||||
))?;
|
|
||||||
index_writer.delete_term(Term::from_field_u64(id_field, deleted_id));
|
|
||||||
index_writer.commit()?;
|
|
||||||
|
|
||||||
// Merge
|
|
||||||
{
|
|
||||||
assert!(index_writer.wait_merging_threads().is_ok());
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
let segment_ids = index
|
|
||||||
.searchable_segment_ids()
|
|
||||||
.expect("Searchable segments failed.");
|
|
||||||
index_writer.merge(&segment_ids).wait().unwrap();
|
|
||||||
assert!(index_writer.wait_merging_threads().is_ok());
|
|
||||||
}
|
|
||||||
let searcher = index.reader()?.searcher();
|
|
||||||
|
|
||||||
let query = TermQuery::new(
|
|
||||||
Term::from_field_u64(id_field, existing_id),
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
let top_docs: Vec<(f32, DocAddress)> =
|
|
||||||
searcher.search(&query, &TopDocs::with_limit(10)).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(top_docs.len(), 1); // Fails
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_bug_1618() -> crate::Result<()> {
|
|
||||||
let mut schema_builder = schema::Schema::builder();
|
|
||||||
let id_field = schema_builder.add_i64_field("id", INDEXED);
|
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::builder().schema(schema).create_in_ram()?;
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
|
||||||
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
id_field=>10i64,
|
|
||||||
))?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
id_field=>30i64,
|
|
||||||
))?;
|
|
||||||
index_writer.commit()?;
|
|
||||||
|
|
||||||
// Merge
|
|
||||||
{
|
|
||||||
assert!(index_writer.wait_merging_threads().is_ok());
|
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
|
||||||
let segment_ids = index
|
|
||||||
.searchable_segment_ids()
|
|
||||||
.expect("Searchable segments failed.");
|
|
||||||
index_writer.merge(&segment_ids).wait().unwrap();
|
|
||||||
assert!(index_writer.wait_merging_threads().is_ok());
|
|
||||||
}
|
|
||||||
let searcher = index.reader()?.searcher();
|
|
||||||
|
|
||||||
let query = TermQuery::new(
|
|
||||||
Term::from_field_i64(id_field, 10i64),
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
let top_docs: Vec<(f32, DocAddress)> =
|
|
||||||
searcher.search(&query, &TopDocs::with_limit(10)).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(top_docs.len(), 1); // Fails
|
|
||||||
|
|
||||||
let query = TermQuery::new(
|
|
||||||
Term::from_field_i64(id_field, 30i64),
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
let top_docs: Vec<(f32, DocAddress)> =
|
|
||||||
searcher.search(&query, &TopDocs::with_limit(10)).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(top_docs.len(), 1); // Fails
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||||
|
use fnv::FnvHashMap;
|
||||||
use murmurhash32::murmurhash2;
|
use murmurhash32::murmurhash2;
|
||||||
use rustc_hash::FxHashMap;
|
|
||||||
|
|
||||||
use crate::fastfield::FastValue;
|
use crate::fastfield::FastValue;
|
||||||
use crate::postings::{IndexingContext, IndexingPosition, PostingsWriter};
|
use crate::postings::{IndexingContext, IndexingPosition, PostingsWriter};
|
||||||
@@ -52,7 +52,7 @@ use crate::{DatePrecision, DateTime, DocId, Term};
|
|||||||
/// path map to the same index position as long as the probability is relatively low.
|
/// path map to the same index position as long as the probability is relatively low.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct IndexingPositionsPerPath {
|
struct IndexingPositionsPerPath {
|
||||||
positions_per_path: FxHashMap<u32, IndexingPosition>,
|
positions_per_path: FnvHashMap<u32, IndexingPosition>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IndexingPositionsPerPath {
|
impl IndexingPositionsPerPath {
|
||||||
@@ -242,12 +242,10 @@ pub(crate) fn set_string_and_get_terms(
|
|||||||
) -> Vec<(usize, Term)> {
|
) -> Vec<(usize, Term)> {
|
||||||
let mut positions_and_terms = Vec::<(usize, Term)>::new();
|
let mut positions_and_terms = Vec::<(usize, Term)>::new();
|
||||||
json_term_writer.close_path_and_set_type(Type::Str);
|
json_term_writer.close_path_and_set_type(Type::Str);
|
||||||
let term_num_bytes = json_term_writer.term_buffer.len_bytes();
|
let term_num_bytes = json_term_writer.term_buffer.as_slice().len();
|
||||||
let mut token_stream = text_analyzer.token_stream(value);
|
let mut token_stream = text_analyzer.token_stream(value);
|
||||||
token_stream.process(&mut |token| {
|
token_stream.process(&mut |token| {
|
||||||
json_term_writer
|
json_term_writer.term_buffer.truncate(term_num_bytes);
|
||||||
.term_buffer
|
|
||||||
.truncate_value_bytes(term_num_bytes);
|
|
||||||
json_term_writer
|
json_term_writer
|
||||||
.term_buffer
|
.term_buffer
|
||||||
.append_bytes(token.text.as_bytes());
|
.append_bytes(token.text.as_bytes());
|
||||||
@@ -267,7 +265,7 @@ impl<'a> JsonTermWriter<'a> {
|
|||||||
json_path: &str,
|
json_path: &str,
|
||||||
term_buffer: &'a mut Term,
|
term_buffer: &'a mut Term,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
term_buffer.set_field_and_type(field, Type::Json);
|
term_buffer.set_field(Type::Json, field);
|
||||||
let mut json_term_writer = Self::wrap(term_buffer);
|
let mut json_term_writer = Self::wrap(term_buffer);
|
||||||
for segment in json_path.split('.') {
|
for segment in json_path.split('.') {
|
||||||
json_term_writer.push_path_segment(segment);
|
json_term_writer.push_path_segment(segment);
|
||||||
@@ -278,7 +276,7 @@ impl<'a> JsonTermWriter<'a> {
|
|||||||
pub fn wrap(term_buffer: &'a mut Term) -> Self {
|
pub fn wrap(term_buffer: &'a mut Term) -> Self {
|
||||||
term_buffer.clear_with_type(Type::Json);
|
term_buffer.clear_with_type(Type::Json);
|
||||||
let mut path_stack = Vec::with_capacity(10);
|
let mut path_stack = Vec::with_capacity(10);
|
||||||
path_stack.push(0);
|
path_stack.push(5);
|
||||||
Self {
|
Self {
|
||||||
term_buffer,
|
term_buffer,
|
||||||
path_stack,
|
path_stack,
|
||||||
@@ -287,28 +285,28 @@ impl<'a> JsonTermWriter<'a> {
|
|||||||
|
|
||||||
fn trim_to_end_of_path(&mut self) {
|
fn trim_to_end_of_path(&mut self) {
|
||||||
let end_of_path = *self.path_stack.last().unwrap();
|
let end_of_path = *self.path_stack.last().unwrap();
|
||||||
self.term_buffer.truncate_value_bytes(end_of_path);
|
self.term_buffer.truncate(end_of_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn close_path_and_set_type(&mut self, typ: Type) {
|
pub fn close_path_and_set_type(&mut self, typ: Type) {
|
||||||
self.trim_to_end_of_path();
|
self.trim_to_end_of_path();
|
||||||
let buffer = self.term_buffer.value_bytes_mut();
|
let buffer = self.term_buffer.as_mut();
|
||||||
let buffer_len = buffer.len();
|
let buffer_len = buffer.len();
|
||||||
buffer[buffer_len - 1] = JSON_END_OF_PATH;
|
buffer[buffer_len - 1] = JSON_END_OF_PATH;
|
||||||
self.term_buffer.append_bytes(&[typ.to_code()]);
|
buffer.push(typ.to_code());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn push_path_segment(&mut self, segment: &str) {
|
pub fn push_path_segment(&mut self, segment: &str) {
|
||||||
// the path stack should never be empty.
|
// the path stack should never be empty.
|
||||||
self.trim_to_end_of_path();
|
self.trim_to_end_of_path();
|
||||||
let buffer = self.term_buffer.value_bytes_mut();
|
let buffer = self.term_buffer.as_mut();
|
||||||
let buffer_len = buffer.len();
|
let buffer_len = buffer.len();
|
||||||
if self.path_stack.len() > 1 {
|
if self.path_stack.len() > 1 {
|
||||||
buffer[buffer_len - 1] = JSON_PATH_SEGMENT_SEP;
|
buffer[buffer_len - 1] = JSON_PATH_SEGMENT_SEP;
|
||||||
}
|
}
|
||||||
self.term_buffer.append_bytes(segment.as_bytes());
|
buffer.extend(segment.as_bytes());
|
||||||
self.term_buffer.append_bytes(&[JSON_PATH_SEGMENT_SEP]);
|
buffer.push(JSON_PATH_SEGMENT_SEP);
|
||||||
self.path_stack.push(self.term_buffer.len_bytes());
|
self.path_stack.push(buffer.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pop_path_segment(&mut self) {
|
pub fn pop_path_segment(&mut self) {
|
||||||
@@ -320,8 +318,8 @@ impl<'a> JsonTermWriter<'a> {
|
|||||||
/// Returns the json path of the term being currently built.
|
/// Returns the json path of the term being currently built.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub(crate) fn path(&self) -> &[u8] {
|
pub(crate) fn path(&self) -> &[u8] {
|
||||||
let end_of_path = self.path_stack.last().cloned().unwrap_or(1);
|
let end_of_path = self.path_stack.last().cloned().unwrap_or(6);
|
||||||
&self.term().value_bytes()[..end_of_path - 1]
|
&self.term().as_slice()[5..end_of_path - 1]
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_fast_value<T: FastValue>(&mut self, val: T) {
|
pub fn set_fast_value<T: FastValue>(&mut self, val: T) {
|
||||||
@@ -334,13 +332,14 @@ impl<'a> JsonTermWriter<'a> {
|
|||||||
val.to_u64()
|
val.to_u64()
|
||||||
};
|
};
|
||||||
self.term_buffer
|
self.term_buffer
|
||||||
.append_bytes(value.to_be_bytes().as_slice());
|
.as_mut()
|
||||||
|
.extend_from_slice(value.to_be_bytes().as_slice());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub(crate) fn set_str(&mut self, text: &str) {
|
pub(crate) fn set_str(&mut self, text: &str) {
|
||||||
self.close_path_and_set_type(Type::Str);
|
self.close_path_and_set_type(Type::Str);
|
||||||
self.term_buffer.append_bytes(text.as_bytes());
|
self.term_buffer.as_mut().extend_from_slice(text.as_bytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn term(&self) -> &Term {
|
pub fn term(&self) -> &Term {
|
||||||
@@ -357,7 +356,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_json_writer() {
|
fn test_json_writer() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_writer.push_path_segment("attributes");
|
json_writer.push_path_segment("attributes");
|
||||||
json_writer.push_path_segment("color");
|
json_writer.push_path_segment("color");
|
||||||
@@ -391,7 +391,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_string_term() {
|
fn test_string_term() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_writer.push_path_segment("color");
|
json_writer.push_path_segment("color");
|
||||||
json_writer.set_str("red");
|
json_writer.set_str("red");
|
||||||
@@ -404,7 +405,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_i64_term() {
|
fn test_i64_term() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_writer.push_path_segment("color");
|
json_writer.push_path_segment("color");
|
||||||
json_writer.set_fast_value(-4i64);
|
json_writer.set_fast_value(-4i64);
|
||||||
@@ -417,7 +419,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_u64_term() {
|
fn test_u64_term() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_writer.push_path_segment("color");
|
json_writer.push_path_segment("color");
|
||||||
json_writer.set_fast_value(4u64);
|
json_writer.set_fast_value(4u64);
|
||||||
@@ -430,7 +433,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_f64_term() {
|
fn test_f64_term() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_writer.push_path_segment("color");
|
json_writer.push_path_segment("color");
|
||||||
json_writer.set_fast_value(4.0f64);
|
json_writer.set_fast_value(4.0f64);
|
||||||
@@ -443,7 +447,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_bool_term() {
|
fn test_bool_term() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_writer.push_path_segment("color");
|
json_writer.push_path_segment("color");
|
||||||
json_writer.set_fast_value(true);
|
json_writer.set_fast_value(true);
|
||||||
@@ -456,7 +461,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_push_after_set_path_segment() {
|
fn test_push_after_set_path_segment() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_writer.push_path_segment("attribute");
|
json_writer.push_path_segment("attribute");
|
||||||
json_writer.set_str("something");
|
json_writer.set_str("something");
|
||||||
@@ -471,7 +477,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_pop_segment() {
|
fn test_pop_segment() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_writer.push_path_segment("color");
|
json_writer.push_path_segment("color");
|
||||||
json_writer.push_path_segment("hue");
|
json_writer.push_path_segment("hue");
|
||||||
@@ -486,7 +493,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_json_writer_path() {
|
fn test_json_writer_path() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_writer.push_path_segment("color");
|
json_writer.push_path_segment("color");
|
||||||
assert_eq!(json_writer.path(), b"color");
|
assert_eq!(json_writer.path(), b"color");
|
||||||
|
|||||||
@@ -6,19 +6,16 @@ use fastfield_codecs::VecColumn;
|
|||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use measure_time::debug_time;
|
use measure_time::debug_time;
|
||||||
|
|
||||||
use super::flat_map_with_buffer::FlatMapWithBufferIter;
|
|
||||||
use super::sorted_doc_id_multivalue_column::RemappedDocIdMultiValueIndexColumn;
|
|
||||||
use crate::core::{Segment, SegmentReader};
|
use crate::core::{Segment, SegmentReader};
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, TERMINATED};
|
||||||
use crate::error::DataCorruption;
|
use crate::error::DataCorruption;
|
||||||
use crate::fastfield::{
|
use crate::fastfield::{
|
||||||
get_fastfield_codecs_for_multivalue, AliveBitSet, Column, CompositeFastFieldSerializer,
|
AliveBitSet, Column, CompositeFastFieldSerializer, MultiValueLength, MultiValuedFastFieldReader,
|
||||||
MultiValueLength, MultiValuedFastFieldReader, MultiValuedU128FastFieldReader,
|
|
||||||
};
|
};
|
||||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
|
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
|
||||||
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
|
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
|
||||||
use crate::indexer::sorted_doc_id_column::RemappedDocIdColumn;
|
use crate::indexer::sorted_doc_id_column::SortedDocIdColumn;
|
||||||
use crate::indexer::sorted_doc_id_multivalue_column::RemappedDocIdMultiValueColumn;
|
use crate::indexer::sorted_doc_id_multivalue_column::SortedDocIdMultiValueColumn;
|
||||||
use crate::indexer::SegmentSerializer;
|
use crate::indexer::SegmentSerializer;
|
||||||
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
|
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
|
||||||
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||||
@@ -296,24 +293,6 @@ impl IndexMerger {
|
|||||||
self.write_bytes_fast_field(field, fast_field_serializer, doc_id_mapping)?;
|
self.write_bytes_fast_field(field, fast_field_serializer, doc_id_mapping)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::IpAddr(options) => match options.get_fastfield_cardinality() {
|
|
||||||
Some(Cardinality::SingleValue) => {
|
|
||||||
self.write_u128_single_fast_field(
|
|
||||||
field,
|
|
||||||
fast_field_serializer,
|
|
||||||
doc_id_mapping,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
Some(Cardinality::MultiValues) => {
|
|
||||||
self.write_u128_multi_fast_field(
|
|
||||||
field,
|
|
||||||
fast_field_serializer,
|
|
||||||
doc_id_mapping,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
None => {}
|
|
||||||
},
|
|
||||||
|
|
||||||
FieldType::JsonObject(_) | FieldType::Facet(_) | FieldType::Str(_) => {
|
FieldType::JsonObject(_) | FieldType::Facet(_) | FieldType::Str(_) => {
|
||||||
// We don't handle json fast field for the moment
|
// We don't handle json fast field for the moment
|
||||||
// They can be implemented using what is done
|
// They can be implemented using what is done
|
||||||
@@ -324,91 +303,6 @@ impl IndexMerger {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// used to merge `u128` single fast fields.
|
|
||||||
fn write_u128_multi_fast_field(
|
|
||||||
&self,
|
|
||||||
field: Field,
|
|
||||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
|
||||||
doc_id_mapping: &SegmentDocIdMapping,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let segment_and_ff_readers: Vec<(&SegmentReader, MultiValuedU128FastFieldReader<u128>)> =
|
|
||||||
self.readers
|
|
||||||
.iter()
|
|
||||||
.map(|segment_reader| {
|
|
||||||
let ff_reader: MultiValuedU128FastFieldReader<u128> =
|
|
||||||
segment_reader.fast_fields().u128s(field).expect(
|
|
||||||
"Failed to find index for multivalued field. This is a bug in \
|
|
||||||
tantivy, please report.",
|
|
||||||
);
|
|
||||||
(segment_reader, ff_reader)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
Self::write_1_n_fast_field_idx_generic(
|
|
||||||
field,
|
|
||||||
fast_field_serializer,
|
|
||||||
doc_id_mapping,
|
|
||||||
&segment_and_ff_readers,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let fast_field_readers = segment_and_ff_readers
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, ff_reader)| ff_reader)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let iter_gen = || {
|
|
||||||
doc_id_mapping
|
|
||||||
.iter_old_doc_addrs()
|
|
||||||
.flat_map_with_buffer(|doc_addr, buffer| {
|
|
||||||
let fast_field_reader = &fast_field_readers[doc_addr.segment_ord as usize];
|
|
||||||
fast_field_reader.get_vals(doc_addr.doc_id, buffer);
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
fast_field_serializer.create_u128_fast_field_with_idx(
|
|
||||||
field,
|
|
||||||
iter_gen,
|
|
||||||
doc_id_mapping.len() as u32,
|
|
||||||
1,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// used to merge `u128` single fast fields.
|
|
||||||
fn write_u128_single_fast_field(
|
|
||||||
&self,
|
|
||||||
field: Field,
|
|
||||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
|
||||||
doc_id_mapping: &SegmentDocIdMapping,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let fast_field_readers = self
|
|
||||||
.readers
|
|
||||||
.iter()
|
|
||||||
.map(|reader| {
|
|
||||||
let u128_reader: Arc<dyn Column<u128>> = reader.fast_fields().u128(field).expect(
|
|
||||||
"Failed to find a reader for single fast field. This is a tantivy bug and it \
|
|
||||||
should never happen.",
|
|
||||||
);
|
|
||||||
u128_reader
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let iter_gen = || {
|
|
||||||
doc_id_mapping.iter_old_doc_addrs().map(|doc_addr| {
|
|
||||||
let fast_field_reader = &fast_field_readers[doc_addr.segment_ord as usize];
|
|
||||||
fast_field_reader.get_val(doc_addr.doc_id)
|
|
||||||
})
|
|
||||||
};
|
|
||||||
fast_field_serializer.create_u128_fast_field_with_idx(
|
|
||||||
field,
|
|
||||||
iter_gen,
|
|
||||||
doc_id_mapping.len() as u32,
|
|
||||||
0,
|
|
||||||
)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// used both to merge field norms, `u64/i64` single fast fields.
|
// used both to merge field norms, `u64/i64` single fast fields.
|
||||||
fn write_single_fast_field(
|
fn write_single_fast_field(
|
||||||
&self,
|
&self,
|
||||||
@@ -416,7 +310,7 @@ impl IndexMerger {
|
|||||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||||
doc_id_mapping: &SegmentDocIdMapping,
|
doc_id_mapping: &SegmentDocIdMapping,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let fast_field_accessor = RemappedDocIdColumn::new(&self.readers, doc_id_mapping, field);
|
let fast_field_accessor = SortedDocIdColumn::new(&self.readers, doc_id_mapping, field);
|
||||||
fast_field_serializer.create_auto_detect_u64_fast_field(field, fast_field_accessor)?;
|
fast_field_serializer.create_auto_detect_u64_fast_field(field, fast_field_accessor)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -510,8 +404,8 @@ impl IndexMerger {
|
|||||||
doc_id_reader_pair
|
doc_id_reader_pair
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.kmerge_by(|a, b| {
|
.kmerge_by(|a, b| {
|
||||||
let val1 = a.2.get_val(a.0);
|
let val1 = a.2.get_val(a.0 as u64);
|
||||||
let val2 = b.2.get_val(b.0);
|
let val2 = b.2.get_val(b.0 as u64);
|
||||||
if sort_by_field.order == Order::Asc {
|
if sort_by_field.order == Order::Asc {
|
||||||
val1 < val2
|
val1 < val2
|
||||||
} else {
|
} else {
|
||||||
@@ -529,17 +423,33 @@ impl IndexMerger {
|
|||||||
// Creating the index file to point into the data, generic over `BytesFastFieldReader` and
|
// Creating the index file to point into the data, generic over `BytesFastFieldReader` and
|
||||||
// `MultiValuedFastFieldReader`
|
// `MultiValuedFastFieldReader`
|
||||||
//
|
//
|
||||||
fn write_1_n_fast_field_idx_generic<T: MultiValueLength + Send + Sync>(
|
fn write_1_n_fast_field_idx_generic<T: MultiValueLength>(
|
||||||
field: Field,
|
field: Field,
|
||||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||||
doc_id_mapping: &SegmentDocIdMapping,
|
doc_id_mapping: &SegmentDocIdMapping,
|
||||||
segment_and_ff_readers: &[(&SegmentReader, T)],
|
reader_and_field_accessors: &[(&SegmentReader, T)],
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<Vec<u64>> {
|
||||||
let column =
|
// We can now create our `idx` serializer, and in a second pass,
|
||||||
RemappedDocIdMultiValueIndexColumn::new(segment_and_ff_readers, doc_id_mapping);
|
// can effectively push the different indexes.
|
||||||
|
|
||||||
fast_field_serializer.create_auto_detect_u64_fast_field(field, column)?;
|
// copying into a temp vec is not ideal, but the fast field codec api requires random
|
||||||
Ok(())
|
// access, which is used in the estimation. It's possible to 1. calculate random
|
||||||
|
// access on the fly or 2. change the codec api to make random access optional, but
|
||||||
|
// they both have also major drawbacks.
|
||||||
|
|
||||||
|
let mut offsets = Vec::with_capacity(doc_id_mapping.len());
|
||||||
|
let mut offset = 0;
|
||||||
|
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
|
||||||
|
let reader = &reader_and_field_accessors[old_doc_addr.segment_ord as usize].1;
|
||||||
|
offsets.push(offset);
|
||||||
|
offset += reader.get_len(old_doc_addr.doc_id) as u64;
|
||||||
|
}
|
||||||
|
offsets.push(offset);
|
||||||
|
|
||||||
|
let fastfield_accessor = VecColumn::from(&offsets[..]);
|
||||||
|
|
||||||
|
fast_field_serializer.create_auto_detect_u64_fast_field(field, fastfield_accessor)?;
|
||||||
|
Ok(offsets)
|
||||||
}
|
}
|
||||||
/// Returns the fastfield index (index for the data, not the data).
|
/// Returns the fastfield index (index for the data, not the data).
|
||||||
fn write_multi_value_fast_field_idx(
|
fn write_multi_value_fast_field_idx(
|
||||||
@@ -547,8 +457,8 @@ impl IndexMerger {
|
|||||||
field: Field,
|
field: Field,
|
||||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||||
doc_id_mapping: &SegmentDocIdMapping,
|
doc_id_mapping: &SegmentDocIdMapping,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<Vec<u64>> {
|
||||||
let segment_and_ff_readers = self
|
let reader_ordinal_and_field_accessors = self
|
||||||
.readers
|
.readers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|reader| {
|
.map(|reader| {
|
||||||
@@ -567,7 +477,7 @@ impl IndexMerger {
|
|||||||
field,
|
field,
|
||||||
fast_field_serializer,
|
fast_field_serializer,
|
||||||
doc_id_mapping,
|
doc_id_mapping,
|
||||||
&segment_and_ff_readers,
|
&reader_ordinal_and_field_accessors,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -616,12 +526,7 @@ impl IndexMerger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let col = VecColumn::from(&vals[..]);
|
let col = VecColumn::from(&vals[..]);
|
||||||
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx_and_codecs(
|
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx(field, col, 1)?;
|
||||||
field,
|
|
||||||
col,
|
|
||||||
1,
|
|
||||||
&get_fastfield_codecs_for_multivalue(),
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -656,21 +561,20 @@ impl IndexMerger {
|
|||||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||||
doc_id_mapping: &SegmentDocIdMapping,
|
doc_id_mapping: &SegmentDocIdMapping,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
// Multifastfield consists of 2 fastfields.
|
// Multifastfield consists in 2 fastfields.
|
||||||
// The first serves as an index into the second one and is strictly increasing.
|
// The first serves as an index into the second one and is strictly increasing.
|
||||||
// The second contains the actual values.
|
// The second contains the actual values.
|
||||||
|
|
||||||
// First we merge the idx fast field.
|
// First we merge the idx fast field.
|
||||||
|
let offsets =
|
||||||
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
|
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
|
||||||
|
|
||||||
let fastfield_accessor =
|
let fastfield_accessor =
|
||||||
RemappedDocIdMultiValueColumn::new(&self.readers, doc_id_mapping, field);
|
SortedDocIdMultiValueColumn::new(&self.readers, doc_id_mapping, &offsets, field);
|
||||||
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx_and_codecs(
|
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx(
|
||||||
field,
|
field,
|
||||||
fastfield_accessor,
|
fastfield_accessor,
|
||||||
1,
|
1,
|
||||||
&get_fastfield_codecs_for_multivalue(),
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -682,7 +586,7 @@ impl IndexMerger {
|
|||||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||||
doc_id_mapping: &SegmentDocIdMapping,
|
doc_id_mapping: &SegmentDocIdMapping,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let segment_and_ff_readers = self
|
let reader_and_field_accessors = self
|
||||||
.readers
|
.readers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|reader| {
|
.map(|reader| {
|
||||||
@@ -693,17 +597,17 @@ impl IndexMerger {
|
|||||||
(reader, bytes_reader)
|
(reader, bytes_reader)
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
Self::write_1_n_fast_field_idx_generic(
|
Self::write_1_n_fast_field_idx_generic(
|
||||||
field,
|
field,
|
||||||
fast_field_serializer,
|
fast_field_serializer,
|
||||||
doc_id_mapping,
|
doc_id_mapping,
|
||||||
&segment_and_ff_readers,
|
&reader_and_field_accessors,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field(field);
|
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field(field);
|
||||||
|
|
||||||
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
|
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
|
||||||
let bytes_reader = &segment_and_ff_readers[old_doc_addr.segment_ord as usize].1;
|
let bytes_reader = &reader_and_field_accessors[old_doc_addr.segment_ord as usize].1;
|
||||||
let val = bytes_reader.get_bytes(old_doc_addr.doc_id);
|
let val = bytes_reader.get_bytes(old_doc_addr.doc_id);
|
||||||
serialize_vals.write_all(val)?;
|
serialize_vals.write_all(val)?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -190,13 +190,13 @@ mod tests {
|
|||||||
assert_eq!(fast_field.get_val(4), 2u64);
|
assert_eq!(fast_field.get_val(4), 2u64);
|
||||||
assert_eq!(fast_field.get_val(3), 3u64);
|
assert_eq!(fast_field.get_val(3), 3u64);
|
||||||
if force_disjunct_segment_sort_values {
|
if force_disjunct_segment_sort_values {
|
||||||
assert_eq!(fast_field.get_val(2), 20u64);
|
assert_eq!(fast_field.get_val(2u64), 20u64);
|
||||||
assert_eq!(fast_field.get_val(1), 100u64);
|
assert_eq!(fast_field.get_val(1u64), 100u64);
|
||||||
} else {
|
} else {
|
||||||
assert_eq!(fast_field.get_val(2), 10u64);
|
assert_eq!(fast_field.get_val(2u64), 10u64);
|
||||||
assert_eq!(fast_field.get_val(1), 20u64);
|
assert_eq!(fast_field.get_val(1u64), 20u64);
|
||||||
}
|
}
|
||||||
assert_eq!(fast_field.get_val(0), 1_000u64);
|
assert_eq!(fast_field.get_val(0u64), 1_000u64);
|
||||||
|
|
||||||
// test new field norm mapping
|
// test new field norm mapping
|
||||||
{
|
{
|
||||||
@@ -545,7 +545,7 @@ mod bench_sorted_index_merge {
|
|||||||
// add values in order of the new doc_ids
|
// add values in order of the new doc_ids
|
||||||
let mut val = 0;
|
let mut val = 0;
|
||||||
for (doc_id, _reader, field_reader) in sorted_doc_ids {
|
for (doc_id, _reader, field_reader) in sorted_doc_ids {
|
||||||
val = field_reader.get_val(doc_id);
|
val = field_reader.get_val(doc_id as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
val
|
val
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ pub mod delete_queue;
|
|||||||
pub mod demuxer;
|
pub mod demuxer;
|
||||||
pub mod doc_id_mapping;
|
pub mod doc_id_mapping;
|
||||||
mod doc_opstamp_mapping;
|
mod doc_opstamp_mapping;
|
||||||
mod flat_map_with_buffer;
|
|
||||||
pub mod index_writer;
|
pub mod index_writer;
|
||||||
mod index_writer_status;
|
mod index_writer_status;
|
||||||
mod json_term_writer;
|
mod json_term_writer;
|
||||||
|
|||||||
@@ -1,11 +1,20 @@
|
|||||||
use crate::query::Weight;
|
|
||||||
use crate::schema::{Document, Term};
|
use crate::schema::{Document, Term};
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
|
|
||||||
/// Timestamped Delete operation.
|
/// Timestamped Delete operation.
|
||||||
|
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||||
pub struct DeleteOperation {
|
pub struct DeleteOperation {
|
||||||
pub opstamp: Opstamp,
|
pub opstamp: Opstamp,
|
||||||
pub target: Box<dyn Weight>,
|
pub term: Term,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for DeleteOperation {
|
||||||
|
fn default() -> Self {
|
||||||
|
DeleteOperation {
|
||||||
|
opstamp: 0u64,
|
||||||
|
term: Term::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Timestamped Add operation.
|
/// Timestamped Add operation.
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ impl<'a> PreparedCommit<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the opstamp associated with the prepared commit.
|
/// Returns the opstamp associated to the prepared commit.
|
||||||
pub fn opstamp(&self) -> Opstamp {
|
pub fn opstamp(&self) -> Opstamp {
|
||||||
self.opstamp
|
self.opstamp
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,27 +24,12 @@ impl SegmentSerializer {
|
|||||||
// In the merge case this is not necessary because we can kmerge the already sorted
|
// In the merge case this is not necessary because we can kmerge the already sorted
|
||||||
// segments
|
// segments
|
||||||
let remapping_required = segment.index().settings().sort_by_field.is_some() && !is_in_merge;
|
let remapping_required = segment.index().settings().sort_by_field.is_some() && !is_in_merge;
|
||||||
let settings = segment.index().settings().clone();
|
let store_component = if remapping_required {
|
||||||
let store_writer = if remapping_required {
|
SegmentComponent::TempStore
|
||||||
let store_write = segment.open_write(SegmentComponent::TempStore)?;
|
|
||||||
StoreWriter::new(
|
|
||||||
store_write,
|
|
||||||
crate::store::Compressor::None,
|
|
||||||
// We want fast random access on the docs, so we choose a small block size.
|
|
||||||
// If this is zero, the skip index will contain too many checkpoints and
|
|
||||||
// therefore will be relatively slow.
|
|
||||||
16000,
|
|
||||||
settings.docstore_compress_dedicated_thread,
|
|
||||||
)?
|
|
||||||
} else {
|
} else {
|
||||||
let store_write = segment.open_write(SegmentComponent::Store)?;
|
SegmentComponent::Store
|
||||||
StoreWriter::new(
|
|
||||||
store_write,
|
|
||||||
settings.docstore_compression,
|
|
||||||
settings.docstore_blocksize,
|
|
||||||
settings.docstore_compress_dedicated_thread,
|
|
||||||
)?
|
|
||||||
};
|
};
|
||||||
|
let store_write = segment.open_write(store_component)?;
|
||||||
|
|
||||||
let fast_field_write = segment.open_write(SegmentComponent::FastFields)?;
|
let fast_field_write = segment.open_write(SegmentComponent::FastFields)?;
|
||||||
let fast_field_serializer = CompositeFastFieldSerializer::from_write(fast_field_write)?;
|
let fast_field_serializer = CompositeFastFieldSerializer::from_write(fast_field_write)?;
|
||||||
@@ -53,6 +38,13 @@ impl SegmentSerializer {
|
|||||||
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
||||||
|
|
||||||
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
||||||
|
let settings = segment.index().settings();
|
||||||
|
let store_writer = StoreWriter::new(
|
||||||
|
store_write,
|
||||||
|
settings.docstore_compression,
|
||||||
|
settings.docstore_blocksize,
|
||||||
|
settings.docstore_compress_dedicated_thread,
|
||||||
|
)?;
|
||||||
Ok(SegmentSerializer {
|
Ok(SegmentSerializer {
|
||||||
segment,
|
segment,
|
||||||
store_writer,
|
store_writer,
|
||||||
|
|||||||
@@ -133,15 +133,15 @@ fn merge(
|
|||||||
|
|
||||||
/// Advanced: Merges a list of segments from different indices in a new index.
|
/// Advanced: Merges a list of segments from different indices in a new index.
|
||||||
///
|
///
|
||||||
/// Returns `TantivyError` if the indices list is empty or their
|
/// Returns `TantivyError` if the the indices list is empty or their
|
||||||
/// schemas don't match.
|
/// schemas don't match.
|
||||||
///
|
///
|
||||||
/// `output_directory`: is assumed to be empty.
|
/// `output_directory`: is assumed to be empty.
|
||||||
///
|
///
|
||||||
/// # Warning
|
/// # Warning
|
||||||
/// This function does NOT check or take the `IndexWriter` is running. It is not
|
/// This function does NOT check or take the `IndexWriter` is running. It is not
|
||||||
/// meant to work if you have an `IndexWriter` running for the origin indices, or
|
/// meant to work if you have an IndexWriter running for the origin indices, or
|
||||||
/// the destination `Index`.
|
/// the destination Index.
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn merge_indices<T: Into<Box<dyn Directory>>>(
|
pub fn merge_indices<T: Into<Box<dyn Directory>>>(
|
||||||
indices: &[Index],
|
indices: &[Index],
|
||||||
@@ -179,15 +179,15 @@ pub fn merge_indices<T: Into<Box<dyn Directory>>>(
|
|||||||
/// Advanced: Merges a list of segments from different indices in a new index.
|
/// Advanced: Merges a list of segments from different indices in a new index.
|
||||||
/// Additional you can provide a delete bitset for each segment to ignore doc_ids.
|
/// Additional you can provide a delete bitset for each segment to ignore doc_ids.
|
||||||
///
|
///
|
||||||
/// Returns `TantivyError` if the indices list is empty or their
|
/// Returns `TantivyError` if the the indices list is empty or their
|
||||||
/// schemas don't match.
|
/// schemas don't match.
|
||||||
///
|
///
|
||||||
/// `output_directory`: is assumed to be empty.
|
/// `output_directory`: is assumed to be empty.
|
||||||
///
|
///
|
||||||
/// # Warning
|
/// # Warning
|
||||||
/// This function does NOT check or take the `IndexWriter` is running. It is not
|
/// This function does NOT check or take the `IndexWriter` is running. It is not
|
||||||
/// meant to work if you have an `IndexWriter` running for the origin indices, or
|
/// meant to work if you have an IndexWriter running for the origin indices, or
|
||||||
/// the destination `Index`.
|
/// the destination Index.
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn merge_filtered_segments<T: Into<Box<dyn Directory>>>(
|
pub fn merge_filtered_segments<T: Into<Box<dyn Directory>>>(
|
||||||
segments: &[Segment],
|
segments: &[Segment],
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||||
use itertools::Itertools;
|
|
||||||
|
|
||||||
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
|
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
|
||||||
use super::operation::AddOperation;
|
use super::operation::AddOperation;
|
||||||
@@ -12,9 +11,11 @@ use crate::postings::{
|
|||||||
compute_table_size, serialize_postings, IndexingContext, IndexingPosition,
|
compute_table_size, serialize_postings, IndexingContext, IndexingPosition,
|
||||||
PerFieldPostingsWriter, PostingsWriter,
|
PerFieldPostingsWriter, PostingsWriter,
|
||||||
};
|
};
|
||||||
use crate::schema::{FieldEntry, FieldType, Schema, Term, Value};
|
use crate::schema::{FieldEntry, FieldType, FieldValue, Schema, Term, Value};
|
||||||
use crate::store::{StoreReader, StoreWriter};
|
use crate::store::{StoreReader, StoreWriter};
|
||||||
use crate::tokenizer::{FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer};
|
use crate::tokenizer::{
|
||||||
|
BoxTokenStream, FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer,
|
||||||
|
};
|
||||||
use crate::{DatePrecision, DocId, Document, Opstamp, SegmentComponent};
|
use crate::{DatePrecision, DocId, Document, Opstamp, SegmentComponent};
|
||||||
|
|
||||||
/// Computes the initial size of the hash table.
|
/// Computes the initial size of the hash table.
|
||||||
@@ -114,7 +115,7 @@ impl SegmentWriter {
|
|||||||
fast_field_writers: FastFieldsWriter::from_schema(&schema),
|
fast_field_writers: FastFieldsWriter::from_schema(&schema),
|
||||||
doc_opstamps: Vec::with_capacity(1_000),
|
doc_opstamps: Vec::with_capacity(1_000),
|
||||||
per_field_text_analyzers,
|
per_field_text_analyzers,
|
||||||
term_buffer: Term::with_capacity(16),
|
term_buffer: Term::new(),
|
||||||
schema,
|
schema,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -156,12 +157,7 @@ impl SegmentWriter {
|
|||||||
|
|
||||||
fn index_document(&mut self, doc: &Document) -> crate::Result<()> {
|
fn index_document(&mut self, doc: &Document) -> crate::Result<()> {
|
||||||
let doc_id = self.max_doc;
|
let doc_id = self.max_doc;
|
||||||
let vals_grouped_by_field = doc
|
for (field, values) in doc.get_sorted_field_values() {
|
||||||
.field_values()
|
|
||||||
.sorted_by_key(|el| el.field())
|
|
||||||
.group_by(|el| el.field());
|
|
||||||
for (field, field_values) in &vals_grouped_by_field {
|
|
||||||
let values = field_values.map(|field_value| field_value.value());
|
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
let make_schema_error = || {
|
let make_schema_error = || {
|
||||||
crate::TantivyError::SchemaError(format!(
|
crate::TantivyError::SchemaError(format!(
|
||||||
@@ -173,12 +169,10 @@ impl SegmentWriter {
|
|||||||
if !field_entry.is_indexed() {
|
if !field_entry.is_indexed() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let (term_buffer, ctx) = (&mut self.term_buffer, &mut self.ctx);
|
let (term_buffer, ctx) = (&mut self.term_buffer, &mut self.ctx);
|
||||||
let postings_writer: &mut dyn PostingsWriter =
|
let postings_writer: &mut dyn PostingsWriter =
|
||||||
self.per_field_postings_writers.get_for_field_mut(field);
|
self.per_field_postings_writers.get_for_field_mut(field);
|
||||||
term_buffer.clear_with_field_and_type(field_entry.field_type().value_type(), field);
|
term_buffer.set_field(field_entry.field_type().value_type(), field);
|
||||||
|
|
||||||
match *field_entry.field_type() {
|
match *field_entry.field_type() {
|
||||||
FieldType::Facet(_) => {
|
FieldType::Facet(_) => {
|
||||||
for value in values {
|
for value in values {
|
||||||
@@ -203,23 +197,35 @@ impl SegmentWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Str(_) => {
|
FieldType::Str(_) => {
|
||||||
let mut indexing_position = IndexingPosition::default();
|
let mut token_streams: Vec<BoxTokenStream> = vec![];
|
||||||
|
let mut offsets = vec![];
|
||||||
|
let mut total_offset = 0;
|
||||||
|
|
||||||
for value in values {
|
for value in values {
|
||||||
let mut token_stream = match value {
|
match value {
|
||||||
Value::PreTokStr(tok_str) => {
|
Value::PreTokStr(tok_str) => {
|
||||||
PreTokenizedStream::from(tok_str.clone()).into()
|
offsets.push(total_offset);
|
||||||
|
if let Some(last_token) = tok_str.tokens.last() {
|
||||||
|
total_offset += last_token.offset_to;
|
||||||
|
}
|
||||||
|
token_streams
|
||||||
|
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
||||||
}
|
}
|
||||||
Value::Str(ref text) => {
|
Value::Str(ref text) => {
|
||||||
let text_analyzer =
|
let text_analyzer =
|
||||||
&self.per_field_text_analyzers[field.field_id() as usize];
|
&self.per_field_text_analyzers[field.field_id() as usize];
|
||||||
text_analyzer.token_stream(text)
|
offsets.push(total_offset);
|
||||||
|
total_offset += text.len();
|
||||||
|
token_streams.push(text_analyzer.token_stream(text));
|
||||||
}
|
}
|
||||||
_ => {
|
_ => (),
|
||||||
continue;
|
}
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
assert!(term_buffer.is_empty());
|
let mut indexing_position = IndexingPosition::default();
|
||||||
|
|
||||||
|
for mut token_stream in token_streams {
|
||||||
|
assert_eq!(term_buffer.as_slice().len(), 5);
|
||||||
postings_writer.index_text(
|
postings_writer.index_text(
|
||||||
doc_id,
|
doc_id,
|
||||||
&mut *token_stream,
|
&mut *token_stream,
|
||||||
@@ -235,81 +241,52 @@ impl SegmentWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::U64(_) => {
|
FieldType::U64(_) => {
|
||||||
let mut num_vals = 0;
|
|
||||||
for value in values {
|
for value in values {
|
||||||
num_vals += 1;
|
|
||||||
let u64_val = value.as_u64().ok_or_else(make_schema_error)?;
|
let u64_val = value.as_u64().ok_or_else(make_schema_error)?;
|
||||||
term_buffer.set_u64(u64_val);
|
term_buffer.set_u64(u64_val);
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
if field_entry.has_fieldnorms() {
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_vals);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
FieldType::Date(_) => {
|
FieldType::Date(_) => {
|
||||||
let mut num_vals = 0;
|
|
||||||
for value in values {
|
for value in values {
|
||||||
num_vals += 1;
|
|
||||||
let date_val = value.as_date().ok_or_else(make_schema_error)?;
|
let date_val = value.as_date().ok_or_else(make_schema_error)?;
|
||||||
term_buffer.set_u64(date_val.truncate(DatePrecision::Seconds).to_u64());
|
term_buffer.set_u64(date_val.truncate(DatePrecision::Seconds).to_u64());
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
if field_entry.has_fieldnorms() {
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_vals);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
FieldType::I64(_) => {
|
FieldType::I64(_) => {
|
||||||
let mut num_vals = 0;
|
|
||||||
for value in values {
|
for value in values {
|
||||||
num_vals += 1;
|
|
||||||
let i64_val = value.as_i64().ok_or_else(make_schema_error)?;
|
let i64_val = value.as_i64().ok_or_else(make_schema_error)?;
|
||||||
term_buffer.set_i64(i64_val);
|
term_buffer.set_i64(i64_val);
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
if field_entry.has_fieldnorms() {
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_vals);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
FieldType::F64(_) => {
|
FieldType::F64(_) => {
|
||||||
let mut num_vals = 0;
|
|
||||||
for value in values {
|
for value in values {
|
||||||
num_vals += 1;
|
|
||||||
let f64_val = value.as_f64().ok_or_else(make_schema_error)?;
|
let f64_val = value.as_f64().ok_or_else(make_schema_error)?;
|
||||||
term_buffer.set_f64(f64_val);
|
term_buffer.set_f64(f64_val);
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
if field_entry.has_fieldnorms() {
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_vals);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
FieldType::Bool(_) => {
|
FieldType::Bool(_) => {
|
||||||
let mut num_vals = 0;
|
|
||||||
for value in values {
|
for value in values {
|
||||||
num_vals += 1;
|
|
||||||
let bool_val = value.as_bool().ok_or_else(make_schema_error)?;
|
let bool_val = value.as_bool().ok_or_else(make_schema_error)?;
|
||||||
term_buffer.set_bool(bool_val);
|
term_buffer.set_bool(bool_val);
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
if field_entry.has_fieldnorms() {
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_vals);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
FieldType::Bytes(_) => {
|
FieldType::Bytes(_) => {
|
||||||
let mut num_vals = 0;
|
|
||||||
for value in values {
|
for value in values {
|
||||||
num_vals += 1;
|
|
||||||
let bytes = value.as_bytes().ok_or_else(make_schema_error)?;
|
let bytes = value.as_bytes().ok_or_else(make_schema_error)?;
|
||||||
term_buffer.set_bytes(bytes);
|
term_buffer.set_bytes(bytes);
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
if field_entry.has_fieldnorms() {
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_vals);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
FieldType::JsonObject(_) => {
|
FieldType::JsonObject(_) => {
|
||||||
let text_analyzer = &self.per_field_text_analyzers[field.field_id() as usize];
|
let text_analyzer = &self.per_field_text_analyzers[field.field_id() as usize];
|
||||||
let json_values_it =
|
let json_values_it = values
|
||||||
values.map(|value| value.as_json().ok_or_else(make_schema_error));
|
.iter()
|
||||||
|
.map(|value| value.as_json().ok_or_else(make_schema_error));
|
||||||
index_json_values(
|
index_json_values(
|
||||||
doc_id,
|
doc_id,
|
||||||
json_values_it,
|
json_values_it,
|
||||||
@@ -319,18 +296,6 @@ impl SegmentWriter {
|
|||||||
ctx,
|
ctx,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
FieldType::IpAddr(_) => {
|
|
||||||
let mut num_vals = 0;
|
|
||||||
for value in values {
|
|
||||||
num_vals += 1;
|
|
||||||
let ip_addr = value.as_ip_addr().ok_or_else(make_schema_error)?;
|
|
||||||
term_buffer.set_ip_addr(ip_addr);
|
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
|
||||||
}
|
|
||||||
if field_entry.has_fieldnorms() {
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_vals);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -342,10 +307,11 @@ impl SegmentWriter {
|
|||||||
pub fn add_document(&mut self, add_operation: AddOperation) -> crate::Result<()> {
|
pub fn add_document(&mut self, add_operation: AddOperation) -> crate::Result<()> {
|
||||||
let doc = add_operation.document;
|
let doc = add_operation.document;
|
||||||
self.doc_opstamps.push(add_operation.opstamp);
|
self.doc_opstamps.push(add_operation.opstamp);
|
||||||
self.fast_field_writers.add_document(&doc)?;
|
self.fast_field_writers.add_document(&doc);
|
||||||
self.index_document(&doc)?;
|
self.index_document(&doc)?;
|
||||||
|
let prepared_doc = prepare_doc_for_store(doc, &self.schema);
|
||||||
let doc_writer = self.segment_serializer.get_store_writer();
|
let doc_writer = self.segment_serializer.get_store_writer();
|
||||||
doc_writer.store(&doc, &self.schema)?;
|
doc_writer.store(&prepared_doc)?;
|
||||||
self.max_doc += 1;
|
self.max_doc += 1;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -408,9 +374,9 @@ fn remap_and_write(
|
|||||||
doc_id_map,
|
doc_id_map,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
debug!("resort-docstore");
|
||||||
// finalize temp docstore and create version, which reflects the doc_id_map
|
// finalize temp docstore and create version, which reflects the doc_id_map
|
||||||
if let Some(doc_id_map) = doc_id_map {
|
if let Some(doc_id_map) = doc_id_map {
|
||||||
debug!("resort-docstore");
|
|
||||||
let store_write = serializer
|
let store_write = serializer
|
||||||
.segment_mut()
|
.segment_mut()
|
||||||
.open_write(SegmentComponent::Store)?;
|
.open_write(SegmentComponent::Store)?;
|
||||||
@@ -427,8 +393,7 @@ fn remap_and_write(
|
|||||||
serializer
|
serializer
|
||||||
.segment()
|
.segment()
|
||||||
.open_read(SegmentComponent::TempStore)?,
|
.open_read(SegmentComponent::TempStore)?,
|
||||||
1, /* The docstore is configured to have one doc per block, and each doc is accessed
|
50,
|
||||||
* only once: we don't need caching. */
|
|
||||||
)?;
|
)?;
|
||||||
for old_doc_id in doc_id_map.iter_old_doc_ids() {
|
for old_doc_id in doc_id_map.iter_old_doc_ids() {
|
||||||
let doc_bytes = store_read.get_document_bytes(old_doc_id)?;
|
let doc_bytes = store_read.get_document_bytes(old_doc_id)?;
|
||||||
@@ -442,24 +407,40 @@ fn remap_and_write(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Prepares Document for being stored in the document store
|
||||||
|
///
|
||||||
|
/// Method transforms PreTokenizedString values into String
|
||||||
|
/// values.
|
||||||
|
pub fn prepare_doc_for_store(doc: Document, schema: &Schema) -> Document {
|
||||||
|
Document::from(
|
||||||
|
doc.into_iter()
|
||||||
|
.filter(|field_value| schema.get_field_entry(field_value.field()).is_stored())
|
||||||
|
.map(|field_value| match field_value {
|
||||||
|
FieldValue {
|
||||||
|
field,
|
||||||
|
value: Value::PreTokStr(pre_tokenized_text),
|
||||||
|
} => FieldValue {
|
||||||
|
field,
|
||||||
|
value: Value::Str(pre_tokenized_text.text),
|
||||||
|
},
|
||||||
|
field_value => field_value,
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use super::compute_initial_table_size;
|
use super::compute_initial_table_size;
|
||||||
use crate::collector::Count;
|
use crate::collector::Count;
|
||||||
use crate::directory::RamDirectory;
|
|
||||||
use crate::indexer::json_term_writer::JsonTermWriter;
|
use crate::indexer::json_term_writer::JsonTermWriter;
|
||||||
use crate::postings::TermInfo;
|
use crate::postings::TermInfo;
|
||||||
use crate::query::PhraseQuery;
|
use crate::query::PhraseQuery;
|
||||||
use crate::schema::{IndexRecordOption, Schema, Type, STORED, STRING, TEXT};
|
use crate::schema::{IndexRecordOption, Schema, Type, STORED, STRING, TEXT};
|
||||||
use crate::store::{Compressor, StoreReader, StoreWriter};
|
|
||||||
use crate::time::format_description::well_known::Rfc3339;
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
use crate::time::OffsetDateTime;
|
use crate::time::OffsetDateTime;
|
||||||
use crate::tokenizer::{PreTokenizedString, Token};
|
use crate::tokenizer::{PreTokenizedString, Token};
|
||||||
use crate::{
|
use crate::{DateTime, DocAddress, DocSet, Document, Index, Postings, Term, TERMINATED};
|
||||||
DateTime, Directory, DocAddress, DocSet, Document, Index, Postings, Term, TERMINATED,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_hashmap_size() {
|
fn test_hashmap_size() {
|
||||||
@@ -489,29 +470,14 @@ mod tests {
|
|||||||
|
|
||||||
doc.add_pre_tokenized_text(text_field, pre_tokenized_text);
|
doc.add_pre_tokenized_text(text_field, pre_tokenized_text);
|
||||||
doc.add_text(text_field, "title");
|
doc.add_text(text_field, "title");
|
||||||
|
let prepared_doc = super::prepare_doc_for_store(doc, &schema);
|
||||||
|
|
||||||
let path = Path::new("store");
|
assert_eq!(prepared_doc.field_values().len(), 2);
|
||||||
let directory = RamDirectory::create();
|
assert_eq!(prepared_doc.field_values()[0].value().as_text(), Some("A"));
|
||||||
let store_wrt = directory.open_write(path).unwrap();
|
|
||||||
|
|
||||||
let mut store_writer = StoreWriter::new(store_wrt, Compressor::None, 0, false).unwrap();
|
|
||||||
store_writer.store(&doc, &schema).unwrap();
|
|
||||||
store_writer.close().unwrap();
|
|
||||||
|
|
||||||
let reader = StoreReader::open(directory.open_read(path).unwrap(), 0).unwrap();
|
|
||||||
let doc = reader.get(0).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(doc.value_count(), 2);
|
|
||||||
let mut field_value_iter = doc.field_values();
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
field_value_iter.next().unwrap().value().as_text(),
|
prepared_doc.field_values()[1].value().as_text(),
|
||||||
Some("A")
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
field_value_iter.next().unwrap().value().as_text(),
|
|
||||||
Some("title")
|
Some("title")
|
||||||
);
|
);
|
||||||
assert!(field_value_iter.next().is_none());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -561,7 +527,8 @@ mod tests {
|
|||||||
let inv_idx = segment_reader.inverted_index(json_field).unwrap();
|
let inv_idx = segment_reader.inverted_index(json_field).unwrap();
|
||||||
let term_dict = inv_idx.terms();
|
let term_dict = inv_idx.terms();
|
||||||
|
|
||||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, json_field);
|
||||||
let mut term_stream = term_dict.stream().unwrap();
|
let mut term_stream = term_dict.stream().unwrap();
|
||||||
|
|
||||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||||
@@ -654,7 +621,8 @@ mod tests {
|
|||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0u32);
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
||||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, json_field);
|
||||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_term_writer.push_path_segment("mykey");
|
json_term_writer.push_path_segment("mykey");
|
||||||
json_term_writer.set_str("token");
|
json_term_writer.set_str("token");
|
||||||
@@ -698,7 +666,8 @@ mod tests {
|
|||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0u32);
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
||||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, json_field);
|
||||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_term_writer.push_path_segment("mykey");
|
json_term_writer.push_path_segment("mykey");
|
||||||
json_term_writer.set_str("two tokens");
|
json_term_writer.set_str("two tokens");
|
||||||
@@ -743,7 +712,8 @@ mod tests {
|
|||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, json_field);
|
||||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||||
json_term_writer.push_path_segment("mykey");
|
json_term_writer.push_path_segment("mykey");
|
||||||
json_term_writer.push_path_segment("field");
|
json_term_writer.push_path_segment("field");
|
||||||
@@ -758,124 +728,4 @@ mod tests {
|
|||||||
let phrase_query = PhraseQuery::new(vec![nothello_term, happy_term]);
|
let phrase_query = PhraseQuery::new(vec![nothello_term, happy_term]);
|
||||||
assert_eq!(searcher.search(&phrase_query, &Count).unwrap(), 0);
|
assert_eq!(searcher.search(&phrase_query, &Count).unwrap(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_bug_regression_1629_position_when_array_with_a_field_value_that_does_not_contain_any_token(
|
|
||||||
) {
|
|
||||||
// We experienced a bug where we would have a position underflow when computing position
|
|
||||||
// delta in an horrible corner case.
|
|
||||||
//
|
|
||||||
// See the commit with this unit test if you want the details.
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let text = schema_builder.add_text_field("text", TEXT);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let doc = schema
|
|
||||||
.parse_document(r#"{"text": [ "bbb", "aaa", "", "aaa"]}"#)
|
|
||||||
.unwrap();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
|
||||||
index_writer.add_document(doc).unwrap();
|
|
||||||
// On debug this did panic on the underflow
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
let seg_reader = searcher.segment_reader(0);
|
|
||||||
let inv_index = seg_reader.inverted_index(text).unwrap();
|
|
||||||
let term = Term::from_field_text(text, "aaa");
|
|
||||||
let mut postings = inv_index
|
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
|
||||||
.unwrap()
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(postings.doc(), 0u32);
|
|
||||||
let mut positions = Vec::new();
|
|
||||||
postings.positions(&mut positions);
|
|
||||||
// On release this was [2, 1]. (< note the decreasing values)
|
|
||||||
assert_eq!(positions, &[2, 5]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_multiple_field_value_and_long_tokens() {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let text = schema_builder.add_text_field("text", TEXT);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let mut doc = Document::default();
|
|
||||||
// This is a bit of a contrived example.
|
|
||||||
let tokens = PreTokenizedString {
|
|
||||||
text: "roller-coaster".to_string(),
|
|
||||||
tokens: vec![Token {
|
|
||||||
offset_from: 0,
|
|
||||||
offset_to: 14,
|
|
||||||
position: 0,
|
|
||||||
text: "rollercoaster".to_string(),
|
|
||||||
position_length: 2,
|
|
||||||
}],
|
|
||||||
};
|
|
||||||
doc.add_pre_tokenized_text(text, tokens.clone());
|
|
||||||
doc.add_pre_tokenized_text(text, tokens);
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
|
||||||
index_writer.add_document(doc).unwrap();
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
let seg_reader = searcher.segment_reader(0);
|
|
||||||
let inv_index = seg_reader.inverted_index(text).unwrap();
|
|
||||||
let term = Term::from_field_text(text, "rollercoaster");
|
|
||||||
let mut postings = inv_index
|
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
|
||||||
.unwrap()
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(postings.doc(), 0u32);
|
|
||||||
let mut positions = Vec::new();
|
|
||||||
postings.positions(&mut positions);
|
|
||||||
assert_eq!(positions, &[0, 3]); //< as opposed to 0, 2 if we had a position length of 1.
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_last_token_not_ending_last() {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let text = schema_builder.add_text_field("text", TEXT);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let mut doc = Document::default();
|
|
||||||
// This is a bit of a contrived example.
|
|
||||||
let tokens = PreTokenizedString {
|
|
||||||
text: "contrived-example".to_string(), //< I can't think of a use case where this corner case happens in real life.
|
|
||||||
tokens: vec![
|
|
||||||
Token {
|
|
||||||
// Not the last token, yet ends after the last token.
|
|
||||||
offset_from: 0,
|
|
||||||
offset_to: 14,
|
|
||||||
position: 0,
|
|
||||||
text: "long_token".to_string(),
|
|
||||||
position_length: 3,
|
|
||||||
},
|
|
||||||
Token {
|
|
||||||
offset_from: 0,
|
|
||||||
offset_to: 14,
|
|
||||||
position: 1,
|
|
||||||
text: "short".to_string(),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
doc.add_pre_tokenized_text(text, tokens);
|
|
||||||
doc.add_text(text, "hello");
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
|
||||||
index_writer.add_document(doc).unwrap();
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
let seg_reader = searcher.segment_reader(0);
|
|
||||||
let inv_index = seg_reader.inverted_index(text).unwrap();
|
|
||||||
let term = Term::from_field_text(text, "hello");
|
|
||||||
let mut postings = inv_index
|
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
|
||||||
.unwrap()
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(postings.doc(), 0u32);
|
|
||||||
let mut positions = Vec::new();
|
|
||||||
postings.positions(&mut positions);
|
|
||||||
assert_eq!(positions, &[4]); //< as opposed to 3 if we had a position length of 1.
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,14 +5,14 @@ use itertools::Itertools;
|
|||||||
|
|
||||||
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::SegmentReader;
|
use crate::{DocAddress, SegmentReader};
|
||||||
|
|
||||||
pub(crate) struct RemappedDocIdColumn<'a> {
|
pub(crate) struct SortedDocIdColumn<'a> {
|
||||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||||
fast_field_readers: Vec<Arc<dyn Column<u64>>>,
|
fast_field_readers: Vec<Arc<dyn Column<u64>>>,
|
||||||
min_value: u64,
|
min_value: u64,
|
||||||
max_value: u64,
|
max_value: u64,
|
||||||
num_vals: u32,
|
num_vals: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_min_max_val(
|
fn compute_min_max_val(
|
||||||
@@ -32,12 +32,12 @@ fn compute_min_max_val(
|
|||||||
// we need to recompute the max / min
|
// we need to recompute the max / min
|
||||||
segment_reader
|
segment_reader
|
||||||
.doc_ids_alive()
|
.doc_ids_alive()
|
||||||
.map(|doc_id| u64_reader.get_val(doc_id))
|
.map(|doc_id| u64_reader.get_val(doc_id as u64))
|
||||||
.minmax()
|
.minmax()
|
||||||
.into_option()
|
.into_option()
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> RemappedDocIdColumn<'a> {
|
impl<'a> SortedDocIdColumn<'a> {
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
readers: &'a [SegmentReader],
|
readers: &'a [SegmentReader],
|
||||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||||
@@ -68,19 +68,23 @@ impl<'a> RemappedDocIdColumn<'a> {
|
|||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
RemappedDocIdColumn {
|
SortedDocIdColumn {
|
||||||
doc_id_mapping,
|
doc_id_mapping,
|
||||||
fast_field_readers,
|
fast_field_readers,
|
||||||
min_value,
|
min_value,
|
||||||
max_value,
|
max_value,
|
||||||
num_vals: doc_id_mapping.len() as u32,
|
num_vals: doc_id_mapping.len() as u64,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Column for RemappedDocIdColumn<'a> {
|
impl<'a> Column for SortedDocIdColumn<'a> {
|
||||||
fn get_val(&self, _doc: u32) -> u64 {
|
fn get_val(&self, doc: u64) -> u64 {
|
||||||
unimplemented!()
|
let DocAddress {
|
||||||
|
doc_id,
|
||||||
|
segment_ord,
|
||||||
|
} = self.doc_id_mapping.get_old_doc_addr(doc as u32);
|
||||||
|
self.fast_field_readers[segment_ord as usize].get_val(doc_id as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||||
@@ -90,7 +94,7 @@ impl<'a> Column for RemappedDocIdColumn<'a> {
|
|||||||
.map(|old_doc_addr| {
|
.map(|old_doc_addr| {
|
||||||
let fast_field_reader =
|
let fast_field_reader =
|
||||||
&self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
&self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
||||||
fast_field_reader.get_val(old_doc_addr.doc_id)
|
fast_field_reader.get_val(old_doc_addr.doc_id as u64)
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -102,7 +106,7 @@ impl<'a> Column for RemappedDocIdColumn<'a> {
|
|||||||
self.max_value
|
self.max_value
|
||||||
}
|
}
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u64 {
|
||||||
self.num_vals
|
self.num_vals
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,24 +2,26 @@ use std::cmp;
|
|||||||
|
|
||||||
use fastfield_codecs::Column;
|
use fastfield_codecs::Column;
|
||||||
|
|
||||||
use super::flat_map_with_buffer::FlatMapWithBufferIter;
|
|
||||||
use crate::fastfield::{MultiValueLength, MultiValuedFastFieldReader};
|
use crate::fastfield::{MultiValueLength, MultiValuedFastFieldReader};
|
||||||
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::{DocAddress, SegmentReader};
|
use crate::{DocId, SegmentReader};
|
||||||
|
|
||||||
pub(crate) struct RemappedDocIdMultiValueColumn<'a> {
|
// We can now initialize our serializer, and push it the different values
|
||||||
|
pub(crate) struct SortedDocIdMultiValueColumn<'a> {
|
||||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||||
fast_field_readers: Vec<MultiValuedFastFieldReader<u64>>,
|
fast_field_readers: Vec<MultiValuedFastFieldReader<u64>>,
|
||||||
|
offsets: &'a [u64],
|
||||||
min_value: u64,
|
min_value: u64,
|
||||||
max_value: u64,
|
max_value: u64,
|
||||||
num_vals: u32,
|
num_vals: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> RemappedDocIdMultiValueColumn<'a> {
|
impl<'a> SortedDocIdMultiValueColumn<'a> {
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
readers: &'a [SegmentReader],
|
readers: &'a [SegmentReader],
|
||||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||||
|
offsets: &'a [u64],
|
||||||
field: Field,
|
field: Field,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
// Our values are bitpacked and we need to know what should be
|
// Our values are bitpacked and we need to know what should be
|
||||||
@@ -56,28 +58,52 @@ impl<'a> RemappedDocIdMultiValueColumn<'a> {
|
|||||||
min_value = 0;
|
min_value = 0;
|
||||||
max_value = 0;
|
max_value = 0;
|
||||||
}
|
}
|
||||||
RemappedDocIdMultiValueColumn {
|
SortedDocIdMultiValueColumn {
|
||||||
doc_id_mapping,
|
doc_id_mapping,
|
||||||
fast_field_readers,
|
fast_field_readers,
|
||||||
|
offsets,
|
||||||
min_value,
|
min_value,
|
||||||
max_value,
|
max_value,
|
||||||
num_vals: num_vals as u32,
|
num_vals: num_vals as u64,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Column for RemappedDocIdMultiValueColumn<'a> {
|
impl<'a> Column for SortedDocIdMultiValueColumn<'a> {
|
||||||
fn get_val(&self, _pos: u32) -> u64 {
|
fn get_val(&self, pos: u64) -> u64 {
|
||||||
unimplemented!()
|
// use the offsets index to find the doc_id which will contain the position.
|
||||||
|
// the offsets are strictly increasing so we can do a simple search on it.
|
||||||
|
let new_doc_id: DocId = self
|
||||||
|
.offsets
|
||||||
|
.iter()
|
||||||
|
.position(|&offset| offset > pos)
|
||||||
|
.expect("pos is out of bounds") as DocId
|
||||||
|
- 1u32;
|
||||||
|
|
||||||
|
// now we need to find the position of `pos` in the multivalued bucket
|
||||||
|
let num_pos_covered_until_now = self.offsets[new_doc_id as usize];
|
||||||
|
let pos_in_values = pos - num_pos_covered_until_now;
|
||||||
|
|
||||||
|
let old_doc_addr = self.doc_id_mapping.get_old_doc_addr(new_doc_id);
|
||||||
|
let num_vals =
|
||||||
|
self.fast_field_readers[old_doc_addr.segment_ord as usize].get_len(old_doc_addr.doc_id);
|
||||||
|
assert!(num_vals >= pos_in_values);
|
||||||
|
let mut vals = Vec::new();
|
||||||
|
self.fast_field_readers[old_doc_addr.segment_ord as usize]
|
||||||
|
.get_vals(old_doc_addr.doc_id, &mut vals);
|
||||||
|
|
||||||
|
vals[pos_in_values as usize]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||||
Box::new(
|
Box::new(
|
||||||
self.doc_id_mapping
|
self.doc_id_mapping
|
||||||
.iter_old_doc_addrs()
|
.iter_old_doc_addrs()
|
||||||
.flat_map_with_buffer(|old_doc_addr: DocAddress, buffer| {
|
.flat_map(|old_doc_addr| {
|
||||||
let ff_reader = &self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
let ff_reader = &self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
||||||
ff_reader.get_vals(old_doc_addr.doc_id, buffer);
|
let mut vals = Vec::new();
|
||||||
|
ff_reader.get_vals(old_doc_addr.doc_id, &mut vals);
|
||||||
|
vals.into_iter()
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -89,80 +115,7 @@ impl<'a> Column for RemappedDocIdMultiValueColumn<'a> {
|
|||||||
self.max_value
|
self.max_value
|
||||||
}
|
}
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u64 {
|
||||||
self.num_vals
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct RemappedDocIdMultiValueIndexColumn<'a, T: MultiValueLength> {
|
|
||||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
|
||||||
multi_value_length_readers: Vec<&'a T>,
|
|
||||||
min_value: u64,
|
|
||||||
max_value: u64,
|
|
||||||
num_vals: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T: MultiValueLength> RemappedDocIdMultiValueIndexColumn<'a, T> {
|
|
||||||
pub(crate) fn new(
|
|
||||||
segment_and_ff_readers: &'a [(&'a SegmentReader, T)],
|
|
||||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
|
||||||
) -> Self {
|
|
||||||
// We go through a complete first pass to compute the minimum and the
|
|
||||||
// maximum value and initialize our Column.
|
|
||||||
let mut num_vals = 0;
|
|
||||||
let min_value = 0;
|
|
||||||
let mut max_value = 0;
|
|
||||||
let mut multi_value_length_readers = Vec::with_capacity(segment_and_ff_readers.len());
|
|
||||||
for segment_and_ff_reader in segment_and_ff_readers {
|
|
||||||
let segment_reader = segment_and_ff_reader.0;
|
|
||||||
let multi_value_length_reader = &segment_and_ff_reader.1;
|
|
||||||
if !segment_reader.has_deletes() {
|
|
||||||
max_value += multi_value_length_reader.get_total_len();
|
|
||||||
} else {
|
|
||||||
for doc in segment_reader.doc_ids_alive() {
|
|
||||||
max_value += multi_value_length_reader.get_len(doc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
num_vals += segment_reader.num_docs();
|
|
||||||
multi_value_length_readers.push(multi_value_length_reader);
|
|
||||||
}
|
|
||||||
Self {
|
|
||||||
doc_id_mapping,
|
|
||||||
multi_value_length_readers,
|
|
||||||
min_value,
|
|
||||||
max_value,
|
|
||||||
num_vals,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T: MultiValueLength + Send + Sync> Column for RemappedDocIdMultiValueIndexColumn<'a, T> {
|
|
||||||
fn get_val(&self, _pos: u32) -> u64 {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
|
||||||
let mut offset = 0;
|
|
||||||
Box::new(
|
|
||||||
std::iter::once(0).chain(self.doc_id_mapping.iter_old_doc_addrs().map(
|
|
||||||
move |old_doc_addr| {
|
|
||||||
let ff_reader =
|
|
||||||
&self.multi_value_length_readers[old_doc_addr.segment_ord as usize];
|
|
||||||
offset += ff_reader.get_len(old_doc_addr.doc_id);
|
|
||||||
offset
|
|
||||||
},
|
|
||||||
)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
fn min_value(&self) -> u64 {
|
|
||||||
self.min_value
|
|
||||||
}
|
|
||||||
|
|
||||||
fn max_value(&self) -> u64 {
|
|
||||||
self.max_value
|
|
||||||
}
|
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
|
||||||
self.num_vals
|
self.num_vals
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -311,7 +311,7 @@ pub use crate::postings::Postings;
|
|||||||
pub use crate::schema::{DateOptions, DatePrecision, Document, Term};
|
pub use crate::schema::{DateOptions, DatePrecision, Document, Term};
|
||||||
|
|
||||||
/// Index format version.
|
/// Index format version.
|
||||||
const INDEX_FORMAT_VERSION: u32 = 5;
|
const INDEX_FORMAT_VERSION: u32 = 4;
|
||||||
|
|
||||||
/// Structure version for the index.
|
/// Structure version for the index.
|
||||||
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
@@ -819,7 +819,7 @@ pub mod tests {
|
|||||||
fn test_indexedfield_not_in_documents() -> crate::Result<()> {
|
fn test_indexedfield_not_in_documents() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let absent_field = schema_builder.add_text_field("absent_text", TEXT);
|
let absent_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
@@ -1001,7 +1001,7 @@ pub mod tests {
|
|||||||
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
|
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
|
||||||
let fast_field_float = schema_builder.add_f64_field("float", FAST);
|
let fast_field_float = schema_builder.add_f64_field("float", FAST);
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let stored_int_field = schema_builder.add_u64_field("stored_int", STORED);
|
let stored_int_field = schema_builder.add_u64_field("text", STORED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ impl BlockDecoder {
|
|||||||
|
|
||||||
pub trait VIntEncoder {
|
pub trait VIntEncoder {
|
||||||
/// Compresses an array of `u32` integers,
|
/// Compresses an array of `u32` integers,
|
||||||
/// using [delta-encoding](https://en.wikipedia.org/wiki/Delta_encoding)
|
/// using [delta-encoding](https://en.wikipedia.org/wiki/Delta_ encoding)
|
||||||
/// and variable bytes encoding.
|
/// and variable bytes encoding.
|
||||||
///
|
///
|
||||||
/// The method takes an array of ints to compress, and returns
|
/// The method takes an array of ints to compress, and returns
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::io;
|
|||||||
use crate::fastfield::MultiValuedFastFieldWriter;
|
use crate::fastfield::MultiValuedFastFieldWriter;
|
||||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
use crate::postings::postings_writer::SpecializedPostingsWriter;
|
use crate::postings::postings_writer::SpecializedPostingsWriter;
|
||||||
use crate::postings::recorder::{BufferLender, DocIdRecorder, Recorder};
|
use crate::postings::recorder::{BufferLender, NothingRecorder, Recorder};
|
||||||
use crate::postings::stacker::Addr;
|
use crate::postings::stacker::Addr;
|
||||||
use crate::postings::{
|
use crate::postings::{
|
||||||
FieldSerializer, IndexingContext, IndexingPosition, PostingsWriter, UnorderedTermId,
|
FieldSerializer, IndexingContext, IndexingPosition, PostingsWriter, UnorderedTermId,
|
||||||
@@ -16,7 +16,7 @@ use crate::{DocId, Term};
|
|||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub(crate) struct JsonPostingsWriter<Rec: Recorder> {
|
pub(crate) struct JsonPostingsWriter<Rec: Recorder> {
|
||||||
str_posting_writer: SpecializedPostingsWriter<Rec>,
|
str_posting_writer: SpecializedPostingsWriter<Rec>,
|
||||||
non_str_posting_writer: SpecializedPostingsWriter<DocIdRecorder>,
|
non_str_posting_writer: SpecializedPostingsWriter<NothingRecorder>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Rec: Recorder> From<JsonPostingsWriter<Rec>> for Box<dyn PostingsWriter> {
|
impl<Rec: Recorder> From<JsonPostingsWriter<Rec>> for Box<dyn PostingsWriter> {
|
||||||
@@ -77,7 +77,7 @@ impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
|
|||||||
serializer,
|
serializer,
|
||||||
)?;
|
)?;
|
||||||
} else {
|
} else {
|
||||||
SpecializedPostingsWriter::<DocIdRecorder>::serialize_one_term(
|
SpecializedPostingsWriter::<NothingRecorder>::serialize_one_term(
|
||||||
term,
|
term,
|
||||||
*addr,
|
*addr,
|
||||||
doc_id_map,
|
doc_id_map,
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ pub use self::term_info::TermInfo;
|
|||||||
|
|
||||||
pub(crate) type UnorderedTermId = u64;
|
pub(crate) type UnorderedTermId = u64;
|
||||||
|
|
||||||
#[allow(clippy::enum_variant_names)]
|
#[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))]
|
||||||
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
|
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
|
||||||
pub(crate) enum FreqReadingOption {
|
pub(crate) enum FreqReadingOption {
|
||||||
NoFreq,
|
NoFreq,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use crate::postings::json_postings_writer::JsonPostingsWriter;
|
use crate::postings::json_postings_writer::JsonPostingsWriter;
|
||||||
use crate::postings::postings_writer::SpecializedPostingsWriter;
|
use crate::postings::postings_writer::SpecializedPostingsWriter;
|
||||||
use crate::postings::recorder::{DocIdRecorder, TermFrequencyRecorder, TfAndPositionRecorder};
|
use crate::postings::recorder::{NothingRecorder, TermFrequencyRecorder, TfAndPositionRecorder};
|
||||||
use crate::postings::PostingsWriter;
|
use crate::postings::PostingsWriter;
|
||||||
use crate::schema::{Field, FieldEntry, FieldType, IndexRecordOption, Schema};
|
use crate::schema::{Field, FieldEntry, FieldType, IndexRecordOption, Schema};
|
||||||
|
|
||||||
@@ -34,7 +34,7 @@ fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> Box<dyn Postings
|
|||||||
.get_indexing_options()
|
.get_indexing_options()
|
||||||
.map(|indexing_options| match indexing_options.index_option() {
|
.map(|indexing_options| match indexing_options.index_option() {
|
||||||
IndexRecordOption::Basic => {
|
IndexRecordOption::Basic => {
|
||||||
SpecializedPostingsWriter::<DocIdRecorder>::default().into()
|
SpecializedPostingsWriter::<NothingRecorder>::default().into()
|
||||||
}
|
}
|
||||||
IndexRecordOption::WithFreqs => {
|
IndexRecordOption::WithFreqs => {
|
||||||
SpecializedPostingsWriter::<TermFrequencyRecorder>::default().into()
|
SpecializedPostingsWriter::<TermFrequencyRecorder>::default().into()
|
||||||
@@ -43,20 +43,19 @@ fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> Box<dyn Postings
|
|||||||
SpecializedPostingsWriter::<TfAndPositionRecorder>::default().into()
|
SpecializedPostingsWriter::<TfAndPositionRecorder>::default().into()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.unwrap_or_else(|| SpecializedPostingsWriter::<DocIdRecorder>::default().into()),
|
.unwrap_or_else(|| SpecializedPostingsWriter::<NothingRecorder>::default().into()),
|
||||||
FieldType::U64(_)
|
FieldType::U64(_)
|
||||||
| FieldType::I64(_)
|
| FieldType::I64(_)
|
||||||
| FieldType::F64(_)
|
| FieldType::F64(_)
|
||||||
| FieldType::Bool(_)
|
| FieldType::Bool(_)
|
||||||
| FieldType::Date(_)
|
| FieldType::Date(_)
|
||||||
| FieldType::Bytes(_)
|
| FieldType::Bytes(_)
|
||||||
| FieldType::IpAddr(_)
|
| FieldType::Facet(_) => Box::new(SpecializedPostingsWriter::<NothingRecorder>::default()),
|
||||||
| FieldType::Facet(_) => Box::new(SpecializedPostingsWriter::<DocIdRecorder>::default()),
|
|
||||||
FieldType::JsonObject(ref json_object_options) => {
|
FieldType::JsonObject(ref json_object_options) => {
|
||||||
if let Some(text_indexing_option) = json_object_options.get_text_indexing_options() {
|
if let Some(text_indexing_option) = json_object_options.get_text_indexing_options() {
|
||||||
match text_indexing_option.index_option() {
|
match text_indexing_option.index_option() {
|
||||||
IndexRecordOption::Basic => {
|
IndexRecordOption::Basic => {
|
||||||
JsonPostingsWriter::<DocIdRecorder>::default().into()
|
JsonPostingsWriter::<NothingRecorder>::default().into()
|
||||||
}
|
}
|
||||||
IndexRecordOption::WithFreqs => {
|
IndexRecordOption::WithFreqs => {
|
||||||
JsonPostingsWriter::<TermFrequencyRecorder>::default().into()
|
JsonPostingsWriter::<TermFrequencyRecorder>::default().into()
|
||||||
@@ -66,7 +65,7 @@ fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> Box<dyn Postings
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
JsonPostingsWriter::<DocIdRecorder>::default().into()
|
JsonPostingsWriter::<NothingRecorder>::default().into()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::io;
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
use rustc_hash::FxHashMap;
|
use fnv::FnvHashMap;
|
||||||
|
|
||||||
use super::stacker::Addr;
|
use super::stacker::Addr;
|
||||||
use crate::fastfield::MultiValuedFastFieldWriter;
|
use crate::fastfield::MultiValuedFastFieldWriter;
|
||||||
@@ -56,12 +56,12 @@ pub(crate) fn serialize_postings(
|
|||||||
doc_id_map: Option<&DocIdMapping>,
|
doc_id_map: Option<&DocIdMapping>,
|
||||||
schema: &Schema,
|
schema: &Schema,
|
||||||
serializer: &mut InvertedIndexSerializer,
|
serializer: &mut InvertedIndexSerializer,
|
||||||
) -> crate::Result<HashMap<Field, FxHashMap<UnorderedTermId, TermOrdinal>>> {
|
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
|
||||||
let mut term_offsets: Vec<(Term<&[u8]>, Addr, UnorderedTermId)> =
|
let mut term_offsets: Vec<(Term<&[u8]>, Addr, UnorderedTermId)> =
|
||||||
Vec::with_capacity(ctx.term_index.len());
|
Vec::with_capacity(ctx.term_index.len());
|
||||||
term_offsets.extend(ctx.term_index.iter());
|
term_offsets.extend(ctx.term_index.iter());
|
||||||
term_offsets.sort_unstable_by_key(|(k, _, _)| k.clone());
|
term_offsets.sort_unstable_by_key(|(k, _, _)| k.clone());
|
||||||
let mut unordered_term_mappings: HashMap<Field, FxHashMap<UnorderedTermId, TermOrdinal>> =
|
let mut unordered_term_mappings: HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>> =
|
||||||
HashMap::new();
|
HashMap::new();
|
||||||
|
|
||||||
let field_offsets = make_field_partition(&term_offsets);
|
let field_offsets = make_field_partition(&term_offsets);
|
||||||
@@ -74,7 +74,7 @@ pub(crate) fn serialize_postings(
|
|||||||
let unordered_term_ids = term_offsets[byte_offsets.clone()]
|
let unordered_term_ids = term_offsets[byte_offsets.clone()]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|&(_, _, bucket)| bucket);
|
.map(|&(_, _, bucket)| bucket);
|
||||||
let mapping: FxHashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
|
let mapping: FnvHashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(term_ord, unord_term_id)| {
|
.map(|(term_ord, unord_term_id)| {
|
||||||
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal)
|
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal)
|
||||||
@@ -89,7 +89,6 @@ pub(crate) fn serialize_postings(
|
|||||||
| FieldType::Bool(_) => {}
|
| FieldType::Bool(_) => {}
|
||||||
FieldType::Bytes(_) => {}
|
FieldType::Bytes(_) => {}
|
||||||
FieldType::JsonObject(_) => {}
|
FieldType::JsonObject(_) => {}
|
||||||
FieldType::IpAddr(_) => {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let postings_writer = per_field_postings_writers.get_for_field(field);
|
let postings_writer = per_field_postings_writers.get_for_field(field);
|
||||||
@@ -153,9 +152,9 @@ pub(crate) trait PostingsWriter: Send + Sync {
|
|||||||
indexing_position: &mut IndexingPosition,
|
indexing_position: &mut IndexingPosition,
|
||||||
mut term_id_fast_field_writer_opt: Option<&mut MultiValuedFastFieldWriter>,
|
mut term_id_fast_field_writer_opt: Option<&mut MultiValuedFastFieldWriter>,
|
||||||
) {
|
) {
|
||||||
let end_of_path_idx = term_buffer.len_bytes();
|
let end_of_path_idx = term_buffer.as_slice().len();
|
||||||
let mut num_tokens = 0;
|
let mut num_tokens = 0;
|
||||||
let mut end_position = indexing_position.end_position;
|
let mut end_position = 0;
|
||||||
token_stream.process(&mut |token: &Token| {
|
token_stream.process(&mut |token: &Token| {
|
||||||
// We skip all tokens with a len greater than u16.
|
// We skip all tokens with a len greater than u16.
|
||||||
if token.text.len() > MAX_TOKEN_LEN {
|
if token.text.len() > MAX_TOKEN_LEN {
|
||||||
@@ -167,10 +166,10 @@ pub(crate) trait PostingsWriter: Send + Sync {
|
|||||||
);
|
);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
term_buffer.truncate_value_bytes(end_of_path_idx);
|
term_buffer.truncate(end_of_path_idx);
|
||||||
term_buffer.append_bytes(token.text.as_bytes());
|
term_buffer.append_bytes(token.text.as_bytes());
|
||||||
let start_position = indexing_position.end_position + token.position as u32;
|
let start_position = indexing_position.end_position + token.position as u32;
|
||||||
end_position = end_position.max(start_position + token.position_length as u32);
|
end_position = start_position + token.position_length as u32;
|
||||||
let unordered_term_id = self.subscribe(doc_id, start_position, term_buffer, ctx);
|
let unordered_term_id = self.subscribe(doc_id, start_position, term_buffer, ctx);
|
||||||
if let Some(term_id_fast_field_writer) = term_id_fast_field_writer_opt.as_mut() {
|
if let Some(term_id_fast_field_writer) = term_id_fast_field_writer_opt.as_mut() {
|
||||||
term_id_fast_field_writer.add_val(unordered_term_id);
|
term_id_fast_field_writer.add_val(unordered_term_id);
|
||||||
@@ -181,7 +180,7 @@ pub(crate) trait PostingsWriter: Send + Sync {
|
|||||||
|
|
||||||
indexing_position.end_position = end_position + POSITION_GAP;
|
indexing_position.end_position = end_position + POSITION_GAP;
|
||||||
indexing_position.num_tokens += num_tokens;
|
indexing_position.num_tokens += num_tokens;
|
||||||
term_buffer.truncate_value_bytes(end_of_path_idx);
|
term_buffer.truncate(end_of_path_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn total_num_tokens(&self) -> u64;
|
fn total_num_tokens(&self) -> u64;
|
||||||
|
|||||||
@@ -47,11 +47,11 @@ impl<'a> Iterator for VInt32Reader<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `Recorder` is in charge of recording relevant information about
|
/// Recorder is in charge of recording relevant information about
|
||||||
/// the presence of a term in a document.
|
/// the presence of a term in a document.
|
||||||
///
|
///
|
||||||
/// Depending on the [`TextOptions`](crate::schema::TextOptions) associated
|
/// Depending on the `TextIndexingOptions` associated to the
|
||||||
/// with the field, the recorder may record:
|
/// field, the recorder may records
|
||||||
/// * the document frequency
|
/// * the document frequency
|
||||||
/// * the document id
|
/// * the document id
|
||||||
/// * the term frequency
|
/// * the term frequency
|
||||||
@@ -83,21 +83,21 @@ pub(crate) trait Recorder: Copy + Default + Send + Sync + 'static {
|
|||||||
|
|
||||||
/// Only records the doc ids
|
/// Only records the doc ids
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
pub struct DocIdRecorder {
|
pub struct NothingRecorder {
|
||||||
stack: ExpUnrolledLinkedList,
|
stack: ExpUnrolledLinkedList,
|
||||||
current_doc: DocId,
|
current_doc: DocId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for DocIdRecorder {
|
impl Default for NothingRecorder {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
DocIdRecorder {
|
NothingRecorder {
|
||||||
stack: ExpUnrolledLinkedList::new(),
|
stack: ExpUnrolledLinkedList::new(),
|
||||||
current_doc: u32::MAX,
|
current_doc: u32::MAX,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Recorder for DocIdRecorder {
|
impl Recorder for NothingRecorder {
|
||||||
fn current_doc(&self) -> DocId {
|
fn current_doc(&self) -> DocId {
|
||||||
self.current_doc
|
self.current_doc
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
|||||||
use crate::postings::{branchless_binary_search, BlockSegmentPostings, Postings};
|
use crate::postings::{branchless_binary_search, BlockSegmentPostings, Postings};
|
||||||
use crate::{DocId, TERMINATED};
|
use crate::{DocId, TERMINATED};
|
||||||
|
|
||||||
/// `SegmentPostings` represents the inverted list or postings associated with
|
/// `SegmentPostings` represents the inverted list or postings associated to
|
||||||
/// a term in a `Segment`.
|
/// a term in a `Segment`.
|
||||||
///
|
///
|
||||||
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
|
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
|
||||||
@@ -216,7 +216,7 @@ impl HasLen for SegmentPostings {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Postings for SegmentPostings {
|
impl Postings for SegmentPostings {
|
||||||
/// Returns the frequency associated with the current document.
|
/// Returns the frequency associated to the current document.
|
||||||
/// If the schema is set up so that no frequency have been encoded,
|
/// If the schema is set up so that no frequency have been encoded,
|
||||||
/// this method should always return 1.
|
/// this method should always return 1.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -98,7 +98,7 @@ impl<'a> Iterator for Iter<'a> {
|
|||||||
/// # Panics if n == 0
|
/// # Panics if n == 0
|
||||||
fn compute_previous_power_of_two(n: usize) -> usize {
|
fn compute_previous_power_of_two(n: usize) -> usize {
|
||||||
assert!(n > 0);
|
assert!(n > 0);
|
||||||
let msb = (63u32 - (n as u64).leading_zeros()) as u8;
|
let msb = (63u32 - n.leading_zeros()) as u8;
|
||||||
1 << msb
|
1 << msb
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -199,7 +199,7 @@ impl TermHashMap {
|
|||||||
/// `update` create a new entry for a given key if it does not exists
|
/// `update` create a new entry for a given key if it does not exists
|
||||||
/// or updates the existing entry.
|
/// or updates the existing entry.
|
||||||
///
|
///
|
||||||
/// The actual logic for this update is define in the `updater`
|
/// The actual logic for this update is define in the the `updater`
|
||||||
/// argument.
|
/// argument.
|
||||||
///
|
///
|
||||||
/// If the key is not present, `updater` will receive `None` and
|
/// If the key is not present, `updater` will receive `None` and
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use std::ops::Range;
|
|||||||
|
|
||||||
use common::{BinarySerializable, FixedSize};
|
use common::{BinarySerializable, FixedSize};
|
||||||
|
|
||||||
/// `TermInfo` wraps the metadata associated with a Term.
|
/// `TermInfo` wraps the metadata associated to a Term.
|
||||||
/// It is segment-local.
|
/// It is segment-local.
|
||||||
#[derive(Debug, Default, Eq, PartialEq, Clone)]
|
#[derive(Debug, Default, Eq, PartialEq, Clone)]
|
||||||
pub struct TermInfo {
|
pub struct TermInfo {
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ impl Query for AllQuery {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Weight associated with the `AllQuery` query.
|
/// Weight associated to the `AllQuery` query.
|
||||||
pub struct AllWeight;
|
pub struct AllWeight;
|
||||||
|
|
||||||
impl Weight for AllWeight {
|
impl Weight for AllWeight {
|
||||||
@@ -37,7 +37,7 @@ impl Weight for AllWeight {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Scorer associated with the `AllQuery` query.
|
/// Scorer associated to the `AllQuery` query.
|
||||||
pub struct AllScorer {
|
pub struct AllScorer {
|
||||||
doc: DocId,
|
doc: DocId,
|
||||||
max_doc: DocId,
|
max_doc: DocId,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user