Compare commits

...

1046 Commits

Author SHA1 Message Date
Paul Masurel
58cf4a382b Removing Deserializer trait 2022-08-27 20:57:06 +02:00
Pascal Seitz
fdd0f63787 merge traits 2022-08-27 17:01:41 +02:00
Pascal Seitz
fd60e6fe08 rename get_u64 to ge_val 2022-08-27 17:01:41 +02:00
Pascal Seitz
02c3252d1e split open_from_bytes to own trait 2022-08-27 17:01:39 +02:00
Pascal Seitz
4a6f36937c num_vals to FastFieldCodecReader 2022-08-27 17:00:55 +02:00
Paul Masurel
3a9727aa91 Pleasing Clippy 2022-08-27 11:33:03 +02:00
UEDA Akira
17093e8ffe Collapse overlapped highlighted ranges (#1473) 2022-08-26 14:37:08 +09:00
Paul Masurel
03e4630cd8 Mark the CI as successful regardless of whether uploading to Coverall fails. 2022-08-26 07:35:29 +02:00
Paul Masurel
4ae0317d68 Cargo fmt 2022-08-26 00:50:07 +02:00
Paul Masurel
107b19855f Fixing the fastfield codec benchmark (#1484) 2022-08-26 05:54:14 +09:00
Paul Masurel
d8f66ba07e Rename fastfield codecs (#1483) 2022-08-26 01:19:30 +09:00
Paul Masurel
f908549245 Argument missing in bench 2022-08-25 15:42:59 +02:00
Paul Masurel
3673a5df9b Homogeneous codec names. (#1481) 2022-08-25 05:51:37 +09:00
Paul Masurel
298b5dd726 GCD wrapper uses DividerU64 (#1478) 2022-08-25 02:29:13 +09:00
Paul Masurel
8bbb22e9bf Minor refactoring. Introducing a codec type enum. (#1477) 2022-08-25 02:21:41 +09:00
PSeitz
513f68209d Merge pull request #1476 from quickwit-oss/fix_interpol
add proptest to ff codecs
2022-08-24 08:01:36 -07:00
Pascal Seitz
91f2f7e722 add proptest to ff codecs 2022-08-24 16:42:40 +02:00
PSeitz
c476b530cf Merge pull request #1432 from quickwit-oss/gcd_encoding
add gcd test for DateTime
2022-08-24 06:50:34 -07:00
PSeitz
77dd202e19 Merge pull request #1475 from quickwit-oss/extend_ff_access
move fastfield stats to trait
2022-08-24 06:44:57 -07:00
Pascal Seitz
00ebff3c16 move fastfield stats to trait 2022-08-24 15:29:55 +02:00
Paul Masurel
9a6d37c42c Apply suggestions from code review 2022-08-24 21:20:17 +09:00
PSeitz
bb01e99e05 Fixes race condition in Searcher (#1464)
Fixes a race condition in Searcher, by avoiding repeated calls to open_segment_readers and passing them instead as argument

Closes #1461
2022-08-24 21:17:37 +09:00
PSeitz
535f1a5d83 Merge pull request #1471 from adamreichold/ci-no-nightly-no-cry
Split test into check and test CI jobs
2022-08-24 04:41:42 -07:00
Pascal Seitz
625f9174a7 check for size 2022-08-24 10:32:45 +02:00
Adam Reichold
11a4d97cf5 Use a job matrix to further split and deduplicate the test CI job. 2022-08-24 10:27:57 +02:00
Adam Reichold
1c3d39677a Split checking and testing to a bit more parallelism in the CI. 2022-08-24 10:27:57 +02:00
Pascal Seitz
6f65995cfd remove gcd from api 2022-08-24 10:24:09 +02:00
Pascal Seitz
e2e4190571 add gcd test for DateTime 2022-08-24 10:24:09 +02:00
PSeitz
82209c58aa reuse get_calculated_value (#1472) 2022-08-24 17:16:25 +09:00
Paul Masurel
21519788ea Build fix (#1470) 2022-08-24 07:16:38 +09:00
Shikhar Bhushan
4c6c6e4a9c ConstScoreQuery (#1463) 2022-08-24 06:37:34 +09:00
Adam Reichold
df0ac9e901 Extend facet deserialization to handle owned in addition to borrowed strings. (#1466) 2022-08-24 06:37:13 +09:00
Adam Reichold
71ab482720 RFC: Use a more general but still object-safe signature for Query::query_terms. (#1468)
* Use a more general but still object-safe signature for Query::query_terms.

* Further constraint the generalized Query::query_terms signature to allow extracting references to terms.
2022-08-24 06:34:07 +09:00
Adam Reichold
2ae383e452 Cache dependencies in CI to speed up build times. (#1469)
* Cache dependencies in CI to speed up build times.

* Give cargo-nextest a try.
2022-08-24 06:27:29 +09:00
PSeitz
8b3a6f6231 Merge pull request #1439 from quickwit-oss/fix_value_range
fix get calculated value
2022-08-23 10:15:13 -07:00
PSeitz
11edd6bd59 fix for api change (#1467) 2022-08-24 01:10:12 +09:00
Pascal Seitz
193a3c21f4 fix neg slope calculated value 2022-08-23 13:42:09 +02:00
PSeitz
998b1263f6 Merge pull request #1460 from quickwit-oss/merge_ff_access_iterator
move iter to FastFieldDataAccess
2022-08-23 02:58:10 -07:00
Pascal Seitz
72272bdf81 fix variable name 2022-08-23 11:38:27 +02:00
Pascal Seitz
c39c2d79da move iter to FastFieldDataAccess 2022-08-23 11:26:47 +02:00
Paul Masurel
67d94f5bd2 Getting rid of the gcd dependency and using NonZeroU64 in gcd. (#1459) 2022-08-23 07:25:26 +09:00
Paul Masurel
abbd934ac9 Embeds OwnedBytes into the FastFieldCodecReader. (#1458) 2022-08-23 00:02:31 +09:00
Paul Masurel
7f9ba0ee50 Minor readability refactoring in the SegmentDocIdMapping (#1451) 2022-08-22 22:44:36 +09:00
PSeitz
8edcd6f958 Merge pull request #1428 from izihawa/feature/dismax
[feat] Implement `DisjunctionMaxQuery` and refactor `ScoreCombiner`
2022-08-22 06:15:30 -07:00
Pasha Podolsky
f50700835d [fix] Fn -> FnOnce 2022-08-22 15:57:30 +03:00
PSeitz
494e92ca59 fix issue in composite (#1456)
The file offsets were recorded incorrectly in some cases, e.g. when the recording looked like this [(Field 1, Index 0, Offset 0), (Field 1, Index 1, Offset 14), (Field 0, Index 0, Offset 14)]. The last file is offset 14 to end of file for field 0. But the data was converted to a vec and sorted, which changes the last file to Field 1.
2022-08-22 17:52:12 +09:00
Paul Masurel
4a3169011d clippy (#1452) 2022-08-20 20:01:33 +09:00
Pascal Seitz
050fc5dde9 add comment for diff dance 2022-08-20 08:56:03 +02:00
Paul Masurel
ce45889add Minor codestyle change is prefix of (#1450)
* Minor code stlye change in the Facet::is_prefix_of.

* bugfix
2022-08-19 21:20:33 +09:00
dependabot[bot]
4875174d16 Update prettytable-rs requirement from 0.8.0 to 0.9.0 (#1446)
Updates the requirements on [prettytable-rs](https://github.com/phsym/prettytable-rs) to permit the latest version.
- [Release notes](https://github.com/phsym/prettytable-rs/releases)
- [Commits](https://github.com/phsym/prettytable-rs/compare/v0.8.0...v0.9.0)

---
updated-dependencies:
- dependency-name: prettytable-rs
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-08-19 18:09:59 +09:00
Kanji Yomoda
0c634c5bc6 Add missing seek to RequiredOptionalScorer (#1442) 2022-08-19 18:08:52 +09:00
Paul Masurel
e25ab5d537 Minor code stlye change in the Facet::is_prefix_of. (#1449) 2022-08-19 18:05:11 +09:00
Adam Reichold
27400c9ad3 Check for the special case of the root facet as prefix of other facets. (#1448) 2022-08-19 17:45:14 +09:00
PSeitz
19074e1d5e Merge pull request #1445 from kianmeng/fix-typos-and-markdowns
Fix typos and markdowns
2022-08-18 00:03:37 -07:00
Kian-Meng Ang
014b1adc3e cargo +nightly fmt 2022-08-17 22:33:44 +08:00
Kian-Meng Ang
84295d5b35 cargo fmt 2022-08-15 21:07:01 +08:00
Kian-Meng Ang
625bcb4877 Fix typos and markdowns
Found via these commands:

    codespell -L crate,ser,panting,beauti,hart,ue,atleast,childs,ond,pris,hel,mot
    markdownlint *.md doc/src/*.md --disable MD013 MD025 MD033 MD001 MD024 MD036 MD041 MD003
2022-08-13 18:25:47 +08:00
Pascal Seitz
f01cb7d3aa remove cast 2022-08-12 19:50:06 +02:00
PSeitz
8e773ade77 Merge pull request #1444 from quickwit-oss/add-async-doc-freq
Support for SnippetGenerator in async context
2022-08-12 05:46:13 -07:00
Evance Soumaoro
fad3faefe2 added InvertedIndexReader::doc_freq_async and SnippetGenerator::new methods 2022-08-12 06:39:10 +00:00
Pascal Seitz
9811d15657 improve slope calculation by delaying f64 cast 2022-08-11 13:32:10 +02:00
Pascal Seitz
31ba5a3c16 fix get calculated value
fix get calculated value by delaying cast
2022-08-11 09:44:20 +02:00
PSeitz
f4d7621370 Merge pull request #1436 from boraarslan/bora--warmup-fieldnorms
Expose inner file slice for fieldnorms
2022-08-09 02:45:45 -07:00
boraarslan
d4b2b7de8b Expose inner file slice 2022-08-04 18:13:17 +03:00
PSeitz
d5ee4edf25 Merge pull request #1426 from k-yomo/support-custom-key-in-range-aggregation
Add support for custom key param for range aggregation
2022-08-03 04:31:02 -07:00
PSeitz
fcc7bd7024 Merge pull request #1418 from quickwit-oss/gcd_encoding
apply gcd on fastfield as preprocessing
2022-07-29 02:00:14 -07:00
Pascal Seitz
ce8d6b259a early return 2022-07-29 10:05:30 +02:00
k-yomo
099e626156 Refactor InternalRangeAggregationRange initialization with From trait 2022-07-29 05:41:29 +09:00
Pasha Podolsky
71041b2314 [fix] Fix bench 2022-07-28 21:36:28 +03:00
Pasha Podolsky
09aae134e6 [feat] Implement DisjunctionMaxQuery and refactor ScoreCombiner 2022-07-28 20:47:20 +03:00
Pascal Seitz
6a9d09cf7a handle gcd like a composable codec 2022-07-28 09:54:35 +02:00
k-yomo
704d0a8d8b Refactor range aggregation tests 2022-07-28 06:31:25 +09:00
k-yomo
195309a557 Add support for custom key param for range aggregation 2022-07-28 06:21:39 +09:00
PSeitz
da0f78e06c Merge pull request #1424 from k-yomo/support-keyed-parameter-in-aggregation
Add support for keyed parameter in range and histgram aggregations
2022-07-27 06:22:29 -07:00
k-yomo
9b6b60cc2b Remove unnecessary keyed parameter setting 2022-07-27 18:43:52 +09:00
k-yomo
6444516a82 User serde default for the keyed params 2022-07-27 01:12:56 +09:00
k-yomo
a9b0d1a0ab Fix aggreagtion examples 2022-07-26 18:54:27 +09:00
k-yomo
2b333ca635 Fix keyed param type in the comment 2022-07-26 18:35:01 +09:00
k-yomo
80a1418284 Use FnvHashMap for keyed bucket entries 2022-07-26 18:24:54 +09:00
k-yomo
5ab5f070ed Fix to use bool directory for the keyed parameter 2022-07-26 18:18:38 +09:00
k-yomo
d122f2c74e Add tests for keyed buckets 2022-07-26 04:28:21 +09:00
k-yomo
5b564916f0 Add support for keyed parameter in range and histgram aggregations 2022-07-26 04:28:21 +09:00
Pascal Seitz
06fd8684b7 use filter to filter zero 2022-07-25 10:26:35 +02:00
Kanji Yomoda
931bab8010 Fix failing nanosec truncation check on mac OS (#1423) 2022-07-25 09:32:15 +09:00
Pascal Seitz
8dac30e6d1 fix benchmark 2022-07-22 17:44:06 +02:00
Pascal Seitz
2e0a7d072f use single pass for gcd 2022-07-22 16:04:32 +02:00
Kanji Yomoda
af84e74284 Replace deprecated std package's constants on floats and integers (#1420) 2022-07-22 08:05:08 +09:00
Pascal Seitz
fff1a03842 replace generic with impl T 2022-07-21 14:26:45 +02:00
Pascal Seitz
90e296f2d0 fix var name 2022-07-21 14:26:45 +02:00
PSeitz
5f966d747b Apply suggestions from code review
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-07-21 14:25:35 +02:00
PSeitz
d24f31f965 Merge pull request #1419 from quickwit-oss/expose-final-bucket-result
Re(Expose) IntermediateAggregationResults method
2022-07-21 04:40:23 -07:00
Evance Soumaoro
f26b686a1c expose IntermediateAggregationResults->into_final_bucket_result 2022-07-21 11:19:23 +00:00
Pier-Olivier Thibault
775e936f7d FileHandle: Change from boxed to Arc. (#1415)
* FileHandle: Change from boxed to Arc.

Changing from a Box<dyn FileHandle> to an Arc<dyn FileHandle> would
allow for a user of tantivy to manage file handles outside of tantivy
and be able to manage their life cycle.

* Fix: Rust linter
2022-07-21 16:19:18 +09:00
Pascal Seitz
7e032a9efd apply gcd on fastfield as preprocessing 2022-07-20 16:19:47 +02:00
PSeitz
23fe73a6c0 remove searcher pool and make Searcher cloneable (#1411)
* remove searcher pool and make Searcher cloneable

closes #1410

* use SearcherInner in InnerIndexReader
2022-07-12 18:07:48 +09:00
Evance Soumaoro
a4be239d38 Updated DateTime to hold timestamp in microseconds, while making date field precision configurable (#1396) 2022-07-12 10:04:28 +09:00
PSeitz
2406d9278b allow set doc store cache size on IndexReaderBuilder (#1407) 2022-07-06 14:40:35 +09:00
PSeitz
6c2d9737f1 Merge pull request #1405 from quickwit-oss/fix_action
fix workflow action
2022-07-04 23:05:28 -07:00
PSeitz
a5688572a5 Merge pull request #1406 from quickwit-oss/edition_2021
edition 2021 for subcrates
2022-07-04 19:42:24 -07:00
Pascal Seitz
431b5a091e remove test trigger 2022-07-05 10:32:33 +08:00
PSeitz
2c17271cd9 Merge pull request #1403 from quickwit-oss/docstore_cache_size
expose doc store cache size
2022-07-04 19:28:51 -07:00
Pascal Seitz
5750224d4c set docstore cache size at construction 2022-07-04 14:27:55 +08:00
Pascal Seitz
02691f2445 edition 2021 for subcrates 2022-07-04 14:19:32 +08:00
Pascal Seitz
e31e78f39f fix workflow action 2022-07-04 14:04:49 +08:00
Pascal Seitz
9db2f0e82b expose doc store cache size
expose lru doc store cache size
optimize doc store cache size
2022-07-04 13:54:41 +08:00
PSeitz
2ed5cc873d Merge pull request #1404 from quickwit-oss/total_cmp
use total_cmp
2022-07-03 22:51:00 -07:00
Pascal Seitz
d278417300 move build step down 2022-07-04 13:22:04 +08:00
Pascal Seitz
d89a8dd118 set rust version 2022-07-04 13:15:32 +08:00
Pascal Seitz
1bd44a5f61 use total_cmp 2022-07-04 12:48:23 +08:00
Ryan Russell
d750ced813 chore(collector): src/collector readability (#1399)
* chore(collector): `src/collector` readability

Signed-off-by: Ryan Russell <git@ryanrussell.org>

* Update src/collector/tests.rs
2022-07-04 12:12:53 +09:00
dependabot[bot]
fbc469e5df Update pprof requirement from 0.9.0 to 0.10.0 (#1400)
Updates the requirements on [pprof](https://github.com/tikv/pprof-rs) to permit the latest version.
- [Release notes](https://github.com/tikv/pprof-rs/releases)
- [Changelog](https://github.com/tikv/pprof-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tikv/pprof-rs/compare/v0.9.1...v0.10.0)

---
updated-dependencies:
- dependency-name: pprof
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-07-04 11:29:33 +09:00
PSeitz
c1273670e4 Merge pull request #1402 from PSeitz/cloneable_error
make errors cloneable
2022-06-30 20:09:37 +08:00
Pascal Seitz
7eb267341e make errors cloneable 2022-06-30 19:42:23 +08:00
PSeitz
db1836691e fix visibility (#1398) 2022-06-28 16:21:39 +09:00
Antoine G
437cd350a2 Add support for phrase slop in query language (#1393)
Closes #1390
2022-06-28 13:55:47 +09:00
PSeitz
8024ecf013 Merge pull request #1389 from quickwit-oss/doc_writer_thread
use separate thread to compress block store
2022-06-23 16:17:41 +08:00
PSeitz
9baefbe2ab Update src/store/writer.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-06-23 15:34:21 +08:00
PSeitz
ad76d11008 Update src/store/writer.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-06-23 15:34:21 +08:00
PSeitz
c3220bece0 Update src/store/writer.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-06-23 15:34:21 +08:00
PSeitz
2b713f0977 Update src/store/writer.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-06-23 15:34:21 +08:00
Pascal Seitz
0bc6b4a117 renames and refactoring 2022-06-23 15:34:21 +08:00
PSeitz
79e42d4a6d Update src/store/writer.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-06-23 15:34:21 +08:00
PSeitz
0135fbc4c8 Update src/store/writer.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-06-23 15:34:21 +08:00
PSeitz
449594f67a Update src/store/writer.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-06-23 15:34:21 +08:00
Pascal Seitz
8b6647e908 move writer to compressor thread 2022-06-23 15:34:21 +08:00
PSeitz
efabcbcdf5 Update src/store/writer.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-06-23 15:34:21 +08:00
Pascal Seitz
7bf5962554 merge match, explicit type 2022-06-23 15:34:21 +08:00
Pascal Seitz
4c7dedef29 use seperate thread to compress block store
Use seperate thread to compress block store for increased indexing performance. This allows to use slower compressors with higher compression ratio, with less or no perfomance impact (with enough cores).

A seperate thread is spawned to compress the docstore, which handles single blocks and stacking from other docstores.
The spawned compressor thread does not write, instead it sends back the compressed data. This is done in order to avoid writing multithreaded on the same file.
2022-06-23 15:34:21 +08:00
PSeitz
93f356a7a7 Extend FAQ (#1388)
* Extend FAQ

Co-authored-by: Maxim Kraynyuchenko <100854040+maximkpa@users.noreply.github.com>
2022-06-23 11:53:20 +09:00
PSeitz
6ca5f77466 Merge pull request #1363 from quickwit-oss/refactor_aggregation
Add aggregation bucket limit
2022-06-23 10:27:57 +08:00
Paul Masurel
2e2822f89d Apply suggestions from code review 2022-06-23 09:48:28 +09:00
PSeitz
de178a1901 Merge pull request #1395 from PSeitz/fix_clippy
fix clippy
2022-06-21 16:30:59 +08:00
Antoine G
11e4225f23 doc fix (#1391)
Documentation fix.
2022-06-21 15:53:33 +09:00
Paul Masurel
f21b73d1f6 Apply suggestions from code review 2022-06-21 15:52:43 +09:00
Pascal Seitz
1440f3243b fix clippy 2022-06-21 14:47:01 +08:00
Kanji Yomoda
83d0c13fb0 Fix outdated variable naming and comments to alive bitset (#1387)
* Fix outdated variables and comments for alive bitset

* Fix expired link to delete bitset
2022-06-14 15:59:15 +09:00
PSeitz
88054aa333 Merge pull request #1382 from boraarslan/bool-fields
Add boolean fields
2022-06-13 13:20:05 +08:00
boraarslan
635c39ba48 cargo fmt 2022-06-10 19:54:44 +03:00
boraarslan
eab2257637 Change var name 2022-06-10 19:36:25 +03:00
PSeitz
328bd96c24 Merge pull request #1378 from quickwit-oss/test_compression
enable setting compression level
2022-06-10 11:10:07 +08:00
dependabot[bot]
fc24842a43 Update more-asserts requirement from 0.2.1 to 0.3.0 (#1384)
Updates the requirements on [more-asserts](https://github.com/thomcc/rust-more-asserts) to permit the latest version.
- [Release notes](https://github.com/thomcc/rust-more-asserts/releases)
- [Commits](https://github.com/thomcc/rust-more-asserts/compare/v0.2.2...v0.3.0)

---
updated-dependencies:
- dependency-name: more-asserts
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-10 10:38:28 +09:00
boraarslan
2d6f1d43ff Add bool and explicit types for merger 2022-06-07 10:10:33 +03:00
boraarslan
ca0973ec78 Fix tests 2022-06-07 10:10:33 +03:00
boraarslan
38ee60d792 Edit Test 2022-06-07 10:10:33 +03:00
boraarslan
f68be28284 Add bool 2022-06-07 10:09:37 +03:00
boraarslan
fc43ab9280 Add tests 2022-06-07 10:09:37 +03:00
boraarslan
38c2ea6a5d Remove unnecessary line 2022-06-07 10:09:37 +03:00
boraarslan
26a0fd1fbe cargo fmt 2022-06-07 10:09:37 +03:00
boraarslan
811b91ecb3 Edit and add tests 2022-06-07 10:09:37 +03:00
boraarslan
25c00ce856 Fix indexing for bool 2022-06-07 10:09:37 +03:00
boraarslan
e5debb97a7 Edit test 2022-06-07 10:09:37 +03:00
boraarslan
bc4cd9ffaa typo fix 2022-06-07 10:09:37 +03:00
boraarslan
9a13d8709b Explicitly write types 2022-06-07 10:09:37 +03:00
boraarslan
e6eadf1a2f Add tests 2022-06-07 10:09:37 +03:00
boraarslan
7cca7e6a47 Fix of last commit 2022-06-07 10:09:37 +03:00
boraarslan
ef2492dba6 Broken commit 2022-06-07 10:09:37 +03:00
boraarslan
2981e6c1df First commit 2022-06-07 10:09:37 +03:00
Ryan Russell
b33b4c0092 Fix various occurrence var names and references (#1385)
Thank you Ryan!

Signed-off-by: Ryan Russell <git@ryanrussell.org>
2022-06-07 11:08:19 +09:00
Pascal Seitz
4d9d2b6db0 split into compressor/decompressor
use custom de/serializer for compressor
accept parameters like zstd(compression_level=5) as compressor
2022-06-02 23:29:24 +08:00
Pascal Seitz
ed868f93a3 enable setting compression level 2022-06-02 16:47:29 +08:00
PSeitz
5e599d96d7 Merge pull request #1372 from quickwit-oss/doc_store_api
refactor doc store
2022-06-02 15:19:57 +08:00
Pascal Seitz
314ae43a45 fix fmt 2022-06-02 14:54:23 +08:00
Pascal Seitz
fce91b2f3a vec without capacity 2022-06-02 13:50:18 +08:00
Pascal Seitz
9bcd2b8104 fix read_block_async 2022-06-02 13:37:52 +08:00
Pascal Seitz
0c9c257150 move cache handling into single function 2022-06-02 13:25:29 +08:00
Pascal Seitz
1af85a2956 accept usize instead &usize 2022-06-02 11:23:36 +08:00
Pascal Seitz
bc4c3d0c6b add peek_lru test 2022-06-02 11:13:17 +08:00
Pascal Seitz
6937c75f05 hide advanced doc store api 2022-06-02 11:13:17 +08:00
Pascal Seitz
e54429e827 expose doc store functions
expose doc store functions for advanced usage
refactor cache
expose cache statistics
remove unnecessary arc
unduplicate code
2022-06-02 11:13:17 +08:00
Ryan Russell
ca836b6414 Improve Docs Readability (#1380)
Signed-off-by: Ryan Russell <git@ryanrussell.org>
2022-06-02 09:32:57 +09:00
Paul Masurel
f0a2b1cc44 Bumped tantivy and subcrate versions. 2022-05-25 22:50:33 +09:00
Paul Masurel
fcfdc44c61 Bumped tantivy-grammar version 2022-05-25 21:52:46 +09:00
Paul Masurel
3171f0b9ba Added ZSTD support in CHANGELOG 2022-05-25 21:51:46 +09:00
PSeitz
89e19f14b5 Merge pull request #1374 from kryesh/main
Add Zstd compression support, Make block size configurable via IndexSettings
2022-05-25 07:39:46 +02:00
PSeitz
1a6a1396cd Merge pull request #1376 from saroh/json-example
Add examples to explain default field handling in the json example
2022-05-24 07:09:37 +02:00
saroh
e766375700 remove useless example 2022-05-23 19:49:31 +02:00
PSeitz
496b4a4fdb Update examples/json_field.rs 2022-05-23 12:24:36 +02:00
PSeitz
93cc8498b3 Update examples/json_field.rs 2022-05-23 11:59:42 +02:00
PSeitz
0aa3d63a9f Update examples/json_field.rs 2022-05-23 11:39:45 +02:00
PSeitz
4e2a053b69 Update examples/json_field.rs 2022-05-23 11:27:05 +02:00
Paul Masurel
71c4393ec4 Clippy 2022-05-23 10:20:37 +09:00
saroh
b2e97e266a more examples to explain default field handling 2022-05-21 17:36:39 +02:00
Antoine G
9ee4772140 Fix deps for unicode regex compiling (#1373)
* lint doc warning

* fix regex build
2022-05-20 10:18:44 +09:00
Kryesh
c95013b11e Add zstd-compression feature to github workflow tests 2022-05-19 22:15:18 +10:00
Pascal Seitz
71f75071d2 cache and return error in aggregations 2022-05-19 16:58:56 +08:00
Pascal Seitz
b114e553cd Revert "return result from segment collector"
This reverts commit a99e5459e3.
2022-05-19 16:57:55 +08:00
Pascal Seitz
17dcc99e43 Revert "introduce optional collect_block in segmentcollector"
This reverts commit c5c2e59b2b.
2022-05-19 16:25:21 +08:00
Pascal Seitz
c5c2e59b2b introduce optional collect_block in segmentcollector
add collect_block in segment_collector to handle groups of documents as performance optimization
add collect_block for MultiCollector
2022-05-19 16:23:25 +08:00
Kryesh
fc045e6bf9 Cleanup imports, remove unneeded error mapping 2022-05-19 10:34:02 +10:00
Kryesh
6837a4d468 Fix bench 2022-05-18 20:35:29 +10:00
Kryesh
0759bf9448 Cleanup zstd structure and serialise to u32 in line with lz4 2022-05-18 20:31:22 +10:00
Kryesh
152e8238d7 Fix silly errors from running tests without feature flag 2022-05-18 19:49:10 +10:00
Kryesh
d4e5b48437 Apply feedback - standardise on u64 and fix correct compression bounds 2022-05-18 19:37:28 +10:00
Kryesh
03040ed81d Add Zstd compression support 2022-05-18 14:04:43 +10:00
Kryesh
aaa22ad225 Make block size configurable to allow for better compression ratios on large documents 2022-05-18 11:13:15 +10:00
Pascal Seitz
44ea7313ca set max bucket size as parameter 2022-05-13 13:21:52 +08:00
Antoine G
3223bdf254 Refactorize PhraseScorer::compute_phrase_match (#1364)
* Refactorize PhraseScorer::compute_phrase_match
* implem optim for slop
2022-05-13 09:57:21 +09:00
Pascal Seitz
11ac451250 abort aggregation when too many buckets are created
Validation happens on different phases depending on the aggregation
Term: During segment collection
Histogram: At the end when converting in intermediate buckets (we preallocate empty buckets for the range) Revisit after #1370
Range: When validating the request

update CHANGELOG
2022-05-12 12:26:43 +08:00
Pascal Seitz
6a4632211a forward error in aggregation collect 2022-05-12 12:26:43 +08:00
Pascal Seitz
a99e5459e3 return result from segment collector 2022-05-12 12:26:43 +08:00
Pascal Seitz
3f88718f38 refactor aggregations 2022-05-12 12:26:43 +08:00
dependabot[bot]
cbd06ab189 Update pprof requirement from 0.8.0 to 0.9.0 (#1365)
Updates the requirements on [pprof](https://github.com/tikv/pprof-rs) to permit the latest version.
- [Release notes](https://github.com/tikv/pprof-rs/releases)
- [Changelog](https://github.com/tikv/pprof-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tikv/pprof-rs/commits)

---
updated-dependencies:
- dependency-name: pprof
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-11 11:42:04 +09:00
Paul Masurel
749395bbb8 Added rustdoc for MultiFruit extract function (#1369) 2022-05-11 11:41:39 +09:00
Paul Masurel
617ba1f0c0 Bugfix in the document deserialization. (#1368)
Deserializing a json field does not expect the
end of the document anymore.

This behavior is well documented in serde_json.
https://docs.serde.rs/serde_json/fn.from_reader.html

Closes #1366
2022-05-11 11:38:10 +09:00
Paul Masurel
2f1cd7e7f0 Bugfix in the document deserialization. (#1367)
Deserializing a json field does not expect the
end of the document anymore.

This behavior is well documented in serde_json.
https://docs.serde.rs/serde_json/fn.from_reader.html

Closes #1366
2022-05-11 11:27:04 +09:00
PSeitz
58c0cb5fc4 Merge pull request #1357 from saroh/1302-json-term-writer-API
Expose helpers to generate json field writer terms
2022-05-10 11:02:05 +08:00
PSeitz
7f45a6ac96 allow setting tokenizer manager on index (#1362)
handle json in tokenizer_for_field
2022-05-09 18:15:45 +09:00
saroh
0ade871126 rename constructor to be more explicit 2022-05-06 13:29:07 +02:00
PSeitz
aab65490c9 Merge pull request #1358 from quickwit-oss/fix_docs
add alias shard_size to split_size for quickwit
2022-05-06 18:41:34 +08:00
Pascal Seitz
d77e8de36a flip alias variable name 2022-05-06 17:52:36 +08:00
Pascal Seitz
d11a8cce26 minor docs fix 2022-05-06 17:52:36 +08:00
Pascal Seitz
bc607a921b add alias shard_size split_size for quickwit
improve some docs
2022-05-06 17:52:36 +08:00
Paul Masurel
1273f33338 Fixed comment. 2022-05-06 18:35:25 +09:00
Paul Masurel
e30449743c Shortens blocks' last_key in the SSTable block index. (#1361)
Right now we store last key in the blocks of the SSTable index.
This PR replaces the last key by a shorter string that is greater or
equal and still lesser than the next key.
This property is sufficiently to ensure the block index
works properly.

Related to quickwit#1366
2022-05-06 16:29:06 +08:00
Paul Masurel
ed26552296 Minor changes in query parsing for quickwit#1334. (#1356)
Quickwit's still heavily relies on generating field names
containing a '.' for nested object, yet allows for
user defined field names to contain a dot.

In order to reuse tantivy query parser, we will end up
using quickwit field names directly into tantivy.
Only '.' will be escaped.

This PR makes minor changes in how tantivy query parser parses
a field name and resolves it to a field.
Some of the new edge case behavior is hacky.

Closes #1355
2022-05-06 13:20:10 +09:00
Saroh
65d129afbd better function names 2022-05-05 10:12:28 +02:00
Antoine G
386ffab76c Fix documentation regression (#1359)
This breaks the doc on doc.rs as the type seems to shadow the struct https://docs.rs/tantivy/latest/tantivy/termdict/type.TermDictionary.html
introduced by #1293 which may not have been up to date with what was done in #1242
2022-05-05 14:59:25 +09:00
Pasha Podolsky
57a8d0359c Make FruitHandle and MultiFruit public (#1360)
* Make `FruitHandle` and `MultiFruit` public

* Add docs for `MultiFruit` and `FruitHandle`
2022-05-05 14:58:33 +09:00
Saroh
14cb66ee00 move helper to indexer module 2022-05-04 18:01:57 +02:00
Saroh
9e38343352 expose helpers for json field writer manipulation
closes #1302
2022-05-04 18:01:45 +02:00
PSeitz
944302ae2f Merge pull request #1350 from quickwit-oss/update_edition
update edition
2022-05-04 11:02:52 +02:00
Paul Masurel
be70804d17 Removed AtomicUsize. 2022-05-04 16:45:24 +09:00
PSeitz
a1afc80600 Update src/core/executor.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-05-04 08:39:44 +02:00
Paul Masurel
02e24fda52 Clippy fix 2022-05-04 12:24:07 +09:00
PSeitz
7e3c0c5392 Merge pull request #1353 from quickwit-oss/fix_docs
minor docs fixes
2022-05-02 07:48:25 +02:00
Pascal Seitz
fdb2524f9e minor docs fixes 2022-05-02 12:26:12 +08:00
Pascal Seitz
4db655ae82 update dependencies, update edition 2022-04-28 22:50:55 +08:00
Pascal Seitz
bb44cc84c4 update dependencies 2022-04-28 20:55:36 +08:00
PSeitz
8c1e1cf1ad Merge pull request #1349 from quickwit-oss/fix_error_message
print whole query on syntax error
2022-04-28 09:31:45 +02:00
Pascal Seitz
b5b16948b0 print whole query on syntax error 2022-04-27 12:48:30 +08:00
PSeitz
c305d3a2a2 Merge pull request #1346 from quickwit-oss/term_agg
term agg
2022-04-26 07:08:07 +02:00
PSeitz
038d234ff1 Merge pull request #1347 from quickwit-oss/query_parser_error
fix query parser error field not found
2022-04-26 07:01:48 +02:00
Pascal Seitz
c45eb9a9fa improve readability, add json test 2022-04-26 11:22:34 +08:00
Pascal Seitz
824d6f96fe return query on parse error 2022-04-22 16:11:36 +08:00
Pascal Seitz
7cf821bac0 fix query parser error field not found 2022-04-22 12:40:00 +08:00
PSeitz
ae83fc8298 bump uuid to 1.0 (#1345) 2022-04-22 10:02:24 +09:00
dependabot[bot]
a7bc361145 Update pprof requirement from 0.7 to 0.8 (#1343)
Updates the requirements on [pprof](https://github.com/tikv/pprof-rs) to permit the latest version.
- [Release notes](https://github.com/tikv/pprof-rs/releases)
- [Changelog](https://github.com/tikv/pprof-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tikv/pprof-rs/commits)

---
updated-dependencies:
- dependency-name: pprof
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-04-21 09:35:13 +09:00
Pascal Seitz
2805291400 minor fixes 2022-04-20 14:22:44 +08:00
Pascal Seitz
6614a2cba0 fix is_fast for bytes field 2022-04-20 12:02:38 +08:00
Pascal Seitz
6f4d203d1b return error on missing sub aggregation 2022-04-20 11:19:36 +08:00
Pascal Seitz
1be6c6111c support order property on term aggregations
support order property on term aggregations
order can be by doc_count, key, or a metric sub_aggregation
2022-04-20 00:34:38 +08:00
PSeitz
c7c3eab256 Merge pull request #1340 from PSeitz/term_agg
fix collecting term_dict field names
2022-04-18 08:21:27 +02:00
Pascal Seitz
ec69875d15 fix collecting term_dict field names
fix collecting term_dict field names for sub_aggregations, minor refactoring
2022-04-15 17:49:20 +08:00
PSeitz
d832cfcfd8 Merge pull request #1329 from quickwit-oss/term_agg
add term aggregation
2022-04-14 14:45:21 +08:00
Pascal Seitz
ab6b532cc4 add comments 2022-04-14 12:06:36 +08:00
Pascal Seitz
4b6047f7d7 return Option from as_ methods 2022-04-14 10:48:36 +08:00
Pascal Seitz
5ca04beb94 add min_doc_count test 2022-04-13 19:51:18 +08:00
Pascal Seitz
902d05ebec refactor getffreader function 2022-04-13 19:51:18 +08:00
Pascal Seitz
f1b298642a remove unnecessary benchmarks 2022-04-13 19:51:18 +08:00
Pascal Seitz
dd13dedaeb forward errors, remove unwrap 2022-04-13 19:51:18 +08:00
Pascal Seitz
46724b4a05 add segment_size, add get term dict fields, add tests 2022-04-13 19:51:18 +08:00
Pascal Seitz
24432bf523 add term aggregation 2022-04-13 19:51:18 +08:00
PSeitz
31d3bcfff2 Merge pull request #1334 from PSeitz/minor_fixes
fix DateTime naming, fix docs, cleanup
2022-04-13 13:13:57 +08:00
Pascal Seitz
706fbd6886 fix DateTime naming, fix docs, cleanup 2022-04-13 13:01:00 +08:00
PSeitz
8a8a048015 fix coverage (#1335) 2022-04-13 13:47:47 +09:00
dependabot[bot]
c72549cb9a Bump codecov/codecov-action from 2 to 3 (#1328)
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 2 to 3.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v2...v3)

---
updated-dependencies:
- dependency-name: codecov/codecov-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-04-11 21:26:52 +09:00
PSeitz
d6f803212c Merge pull request #1325 from quickwit-oss/term_agg
fast field on string
2022-04-04 15:34:31 +08:00
Pascal Seitz
dac73537d2 update changelog 2022-04-04 14:15:40 +08:00
Pascal Seitz
bb5254de12 always serialize, use enum as param 2022-04-04 13:50:23 +08:00
Maxim Kraynyuchenko
be5218c2f6 Company Logos were not visible in Dark Theme. (#1326) 2022-04-04 11:53:31 +09:00
Pascal Seitz
ec9478830a add text test
move get multiple values to test code
remove sorting term ids per docidi for non facets
2022-03-30 11:31:33 +08:00
Pascal Seitz
8807bfd13d fast field on string
enables FAST on string fields, which creates a fastfield containing the term ordinals
2022-03-29 12:40:10 +08:00
Maxim Kraynyuchenko
447811c111 Update README following sections: features, benchmark illustration & FAQ. (#1318)
* Updated features, benchmark illustration & FAQ.
* Updated README: Feat,Graph,Non-Feat,Companies,FAQ
2022-03-23 10:02:09 +09:00
PSeitz
f29acf5d8c fix clippy (#1321) 2022-03-22 12:48:23 +09:00
Uwe Klotz
125707dbe0 Replace chrono with time (#1307)
For date values `chrono` has been replaced with `time` 
- The `time` crate is re-exported as `tantivy::time` instead of `tantivy::chrono`.
- The type alias `tantivy::DateTime` has been removed.
- `Value::Date` wraps `time::PrimitiveDateTime` without time zone information.
- Internally date/time values are stored as seconds since UNIX epoch in UTC.
- Converting a `time::OffsetDateTime` to `Value::Date` implicitly converts the value into UTC.
If this is not desired do the time zone conversion yourself and use `time::PrimitiveDateTime`
directly instead.

Closes #1304
2022-03-21 10:50:19 +09:00
Paul Masurel
46d5de920d Removes all usage of block_on, and use a oneshot channel instead. (#1315)
* Removes all usage of block_on, and use a oneshot channel instead.

Calling `block_on` panics in certain context.
For instance, it panics when it is called in a the context of another
call to block.

Using it in tantivy is unnecessary. We replace it by a thin wrapper
around a oneshot channel that supports both async/sync.

* Removing needless uses of async in the API.

Co-authored-by: PSeitz <PSeitz@users.noreply.github.com>
2022-03-18 16:54:58 +09:00
PSeitz
d2a7bcf217 fix fmt (#1317) 2022-03-18 15:53:27 +09:00
PSeitz
141b9aa245 Merge pull request #1306 from PSeitz/histogram
add Histogram aggregation
2022-03-18 05:03:46 +01:00
PSeitz
c5a6282fa8 Update src/aggregation/bucket/histogram/histogram.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-03-18 04:55:31 +01:00
PSeitz
c0f524e1a3 Update src/aggregation/bucket/histogram/histogram.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-03-18 04:55:25 +01:00
Paul Masurel
958b2bee08 Clippy comments (#1316) 2022-03-17 18:57:55 +09:00
Pascal Seitz
f619658e2c rename 2022-03-17 16:37:57 +08:00
Pascal Seitz
aa391bf843 refactor parameters 2022-03-17 16:28:37 +08:00
Pascal Seitz
47dcbdbeae handle empty results, empty indices, add tests 2022-03-17 10:24:34 +08:00
Pascal Seitz
691245bf20 make code more concise 2022-03-16 14:21:58 +08:00
Pascal Seitz
90798d4b39 address comments, add single bucket test 2022-03-16 13:58:13 +08:00
Pascal Seitz
0b6d9f90cf improve docs 2022-03-16 12:39:26 +08:00
PSeitz
8a5a12d961 add setter to json object options (#1311) 2022-03-16 10:36:30 +09:00
Pascal Seitz
e73542e2e8 Elasticsearch behaviour on hard/extended_bounds 2022-03-15 16:46:45 +08:00
Pascal Seitz
0262e44bbd merge_fruits pass by value 2022-03-15 12:59:22 +08:00
Pascal Seitz
613aad7a8a vec optional, improve performance 2022-03-14 21:29:07 +08:00
Pascal Seitz
1aa88b0c51 improve performance 2022-03-14 20:28:08 +08:00
Pascal Seitz
564fa38085 move sub_aggregations to own vec, use itertools minmax 2022-03-14 16:20:26 +08:00
dependabot[bot]
59ec21479f Update pprof requirement from 0.6 to 0.7 (#1305)
Updates the requirements on [pprof](https://github.com/tikv/pprof-rs) to permit the latest version.
- [Release notes](https://github.com/tikv/pprof-rs/releases)
- [Changelog](https://github.com/tikv/pprof-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tikv/pprof-rs/commits)

---
updated-dependencies:
- dependency-name: pprof
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-14 13:57:22 +09:00
PSeitz
42283f9e91 fix error message UnknownTokenizer (#1308)
closes #1303
2022-03-14 13:54:47 +09:00
PSeitz
b105bf72e1 use defaults in meta.json (#1310)
This change allows to have unset fields in meta.json and fall back to their defaults
Currently it is required to explicitly put e.g. fieldnorms: false
2022-03-14 13:54:06 +09:00
Pascal Seitz
226f577803 Add Histogram aggregation 2022-03-11 21:52:07 +08:00
Paul Masurel
2e255c4bef Preparing for release 2022-03-09 09:59:08 +09:00
Paul Masurel
387592809f Updated CHANGELOG 2022-03-07 15:31:35 +09:00
Halvor Fladsrud Bø
cedced5bb0 Slop support for phrase queries (#1241)
Closes #1068
2022-03-07 15:29:18 +09:00
dependabot[bot]
d31f045872 Bump actions/checkout from 2 to 3 (#1300)
Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v2...v3)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-07 11:54:26 +09:00
PSeitz
6656a70d1b Merge pull request #1301 from saroh/1232-doc-fastfield
update fastfield doc
2022-03-04 08:18:21 +01:00
saroh
d36e0a9549 fix fastfield doc 2022-03-03 17:43:18 +01:00
Antoine G
8771b2673f Update src/fastfield/writer.rs
Co-authored-by: PSeitz <PSeitz@users.noreply.github.com>
2022-03-03 11:25:24 +01:00
Antoine G
a41d3d51a4 Update fastfield_codecs/src/lib.rs 2022-03-03 11:25:06 +01:00
Saroh
cae34ffe47 update fastfield doc 2022-03-02 16:04:15 +01:00
PSeitz
4b62f7907d Merge pull request #1297 from PSeitz/fix_clippy
fix clippy issues
2022-03-02 10:11:56 +01:00
Pascal Seitz
7fa6a0b665 cargo fmt 2022-03-02 09:24:14 +01:00
PSeitz
458ed29a31 Merge pull request #1299 from saroh/1232-doc-lint
doc lint for errors and aggregations
2022-03-02 09:22:07 +01:00
Antoine G
e37775fe21 iff->if or if and only if (#1298)
* has_xxx is_xxx -> if, these function usualy define equivalence
xxx returns bool -> specify equivalence when appropriate

* fix doc
2022-03-02 11:00:00 +09:00
Saroh
1cd2434a32 fix(aggregations) Readme 2022-03-01 20:37:48 +01:00
Saroh
de2cba6d1e error definitions 2022-03-01 20:13:59 +01:00
Paul Masurel
c0b1a58d27 Apply suggestions from code review 2022-03-01 18:41:58 +09:00
Paul Masurel
848b795b9f Apply suggestions from code review 2022-03-01 18:37:51 +09:00
Pascal Seitz
091b668624 fix clippy issues 2022-03-01 08:58:51 +01:00
Paul Masurel
5004290daa Return an error on certain type of corruption. (#1296) 2022-03-01 11:35:56 +09:00
StyMaar
5d2c2b804c Fix link to RamDirectory and MMapDirectory in Directory's documentation (#1295) 2022-03-01 09:46:53 +09:00
PSeitz
1a92b588e0 Merge pull request #1294 from PSeitz/aggregation
fix intermediate result de/serialization
2022-02-28 08:39:23 +01:00
Pascal Seitz
010e92c118 fix intermediate result de/serialization
return None for empty average/stats metric
add test for de/serialization of intermediate result
add test for metric on empty result
2022-02-25 16:39:57 +01:00
Paul Masurel
2ead010c83 Tantivy quickwit (#1293)
* Added sstable and enabling it by default, and parallel boolean query.
* Added async API for FileSlice.
* Added async get_doc
* Reduce blocksize to 32_000
* Added debug logs

Quickwit specific feature a hidden behind the quickwit feature flag.
2022-02-25 17:32:49 +09:00
PSeitz
c4f66eb185 improve validation in aggregation, extend invalid field test (#1292)
* improve validation in aggregation, extend invalid field test

improve validation in aggregation
extend invalid field test
Fixes #1291

* collect fast field names on request structure

* fix visibility of AggregationSegmentCollector
2022-02-25 15:21:19 +09:00
Paul Masurel
d7b46d2137 Added JSON Type (#1270)
- Removed useless copy when ingesting JSON.
- Bugfix in phrase query with a missing field norms.
- Disabled range query on default fields

Closes #1251
2022-02-24 16:25:22 +09:00
PSeitz
d042ce74c7 Merge pull request #1289 from PSeitz/numeric_options
rename IntOptions to NumericOptions
2022-02-23 14:04:40 +01:00
PSeitz
7ba9e662b8 Merge pull request #1290 from PSeitz/improve_docs
improve aggregation docs
2022-02-23 14:04:20 +01:00
Pascal Seitz
fdd5ef85e5 improve aggregation docs 2022-02-22 10:37:54 +01:00
Pascal Seitz
704498a1ac rename IntOptions to NumericOptions
keep IntOptions with deprecation warning
Fixes #1286
2022-02-21 22:20:07 +01:00
PSeitz
1232af7928 fix docs (#1288) 2022-02-21 23:15:58 +09:00
Paul Masurel
d37633e034 Minor changes in indexing. (#1285) 2022-02-21 17:16:52 +09:00
Paul Masurel
9815067171 Minor changes 2022-02-21 13:55:01 +09:00
PSeitz
972cb6c26d Aggregation (#1276)
Added support for aggregation compatible with Elasticsearch's API.
2022-02-21 09:59:11 +09:00
Paul Masurel
4dc80cfa25 Removes TokenStream chain. (#1283)
This change is mostly motivated by the introduction of json object.

We need to be able to inject a position object to make the position
shift.
2022-02-21 09:51:27 +09:00
PSeitz
cef145790c Fix opening bytes index with dynamic codec (#1279)
* Fix opening bytes index with dynamic codec

Fix #1278

* extend proptest to cover bytes field codec bug
2022-02-18 20:44:21 +09:00
Paul Masurel
e05e2a0c51 Added profiling to indexing bench (#1282) 2022-02-18 20:43:28 +09:00
Paul Masurel
e028515caf Simplified expull code. (#1281) 2022-02-18 18:57:10 +09:00
Paul Masurel
850b9eaea4 added a bench to measure the perf of indexing logs (#1275) 2022-02-18 16:48:29 +09:00
Shikhar Bhushan
505e6a440c Remove test assertion sensitive to background segment merging (#1274) 2022-02-17 10:59:46 +09:00
Koichi Akabe
fcd651f6a9 Add Vaporetto tokenizer to README (#1271)
* Add Vaporetto tokenizer to README

* Update README.md
2022-02-14 18:19:57 +09:00
Paul Masurel
e6653228a9 Renamed github workflows (#1269) 2022-02-04 15:10:24 +09:00
Paul Masurel
bdedefe07d Adding an IndexingContext object (#1268) 2022-02-04 15:08:01 +09:00
Paul Masurel
13a4473faa Removing obsolete clippy allow thingy. 2022-02-01 11:54:01 +09:00
Paul Masurel
2069e3e52b Fixing clippy comments 2022-02-01 10:24:05 +09:00
Paul Masurel
0d8263cba1 Using nightly to format 2022-01-31 16:10:11 +09:00
Paul Masurel
65b365b81c Fixing all-features build. 2022-01-31 14:41:14 +09:00
dependabot[bot]
4c1366da87 Update fastdivide requirement from 0.3 to 0.4 (#1265)
Updates the requirements on fastdivide to permit the latest version.

---
updated-dependencies:
- dependency-name: fastdivide
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-01-31 11:26:50 +09:00
Paul Masurel
eca6628b3c Minor refactoring (#1266) 2022-01-28 15:55:55 +09:00
Paul Masurel
9679c5f306 Rename quickwit-inc -> quickwit-oss 2022-01-27 15:37:09 +09:00
Shikhar Bhushan
5a2497b6fd Avoid exposing TrackedObject from Warmer API (#1264) 2022-01-25 10:04:08 +09:00
Shikhar Bhushan
99d4b1a177 Searcher Warming API (#1261)
Adds an API to register Warmers in the IndexReader.

Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-01-20 23:40:25 +09:00
Paul Masurel
732f6847c0 Field type with codes (#1255)
* Term are now typed.

This change is backward compatible:
While the Term has a byte representation that is modified, a Term itself
is a transient object that is not serialized as is in the index.

Its .field() and .value_bytes() on the other hand are unchanged.
This change offers better Debug information for terms.

While not necessary it also will help in the support for JSON types.

* Renamed Hierarchical Facet -> Facet
2022-01-07 20:49:00 +09:00
Paul Masurel
1c6d9bdc6a Comparison of Value based on serialization. (#1250) 2022-01-07 20:31:26 +09:00
Paul Masurel
3ea6800ac5 Pleasing clippy (#1253) 2022-01-06 16:41:24 +09:00
Antoine G
395303b644 Collector + directory doc fixes (#1247)
* doc(collector)

* doc(directory)

* doc(misc)

* wording
2022-01-04 09:22:58 +09:00
Daniel Müller
2c200b46cb Use test-log instead of test-env-log (#1248)
The test-env-log crate has been renamed to test-log to better reflect
its intent of not only catering to env_logger specific initialization
but also tracing (and potentially others in the future).
This change updates the crate to use test-log instead of the now
deprecated test-env-log.
2022-01-04 09:20:30 +09:00
Liam Warfield
17e00df112 Change Snippet.fragments -> Snippet.fragment (#1243)
* Change Snippet.fragments -> Snippet.fragment
* Apply suggestions from code review

Co-authored-by: Liam Warfield <lwarfield@arista.com>
2022-01-03 22:23:51 +09:00
Antoine G
3129d86743 doc(termdict) expose structs (#1242)
* doc(termdict) expose structs
also add merger doc + lint
refs #1232
2022-01-03 22:20:31 +09:00
Shikhar Bhushan
e5e252cbc0 LogMergePolicy knob del_docs_percentage_before_merge (#1238)
Add a knob to LogMergePolicy to always merge segments that exceed a threshold of deleted docs

Closes #115
2021-12-20 13:14:56 +09:00
Paul Masurel
b2da82f151 Making MergeCandidate public in order to allow the usage of custom merge (#1237)
policies.

Closes #1235
2021-12-13 09:54:21 +09:00
Paul Masurel
c81b3030fa Issue/922b (#1233)
* Add a NORMED options on field

Make fieldnorm indexation optional:

* for all types except text => added a NORMED options
* for text field
** if STRING, field has not fieldnorm retained
** if TEXT, field has fieldnorm computed

* Finalize making fieldnorm optional for all field types.

- Using Option for fieldnorm readers.
2021-12-10 21:12:29 +09:00
Paul Masurel
9e66c75fc6 Using stable in CI as rustc nightly seems broken 2021-12-10 18:45:23 +09:00
Paul Masurel
ebdbb6bd2e Fixing compilation warnings & clippy comments. 2021-12-10 16:47:59 +09:00
Antoine G
c980b19dd9 canonicalize path when opening MmapDirectory (#1231)
* canonicalize path when opening `MmapDirectory`
fixes #1229
2021-12-09 10:19:52 +09:00
Paul Masurel
098eea843a Reducing the number of call to fsync on the directory. (#1228)
This work by introducing a new API method in the Directory
trait. The user needs to explicitely call this method.
(In particular, once before a commmit)

Closes #1225
2021-12-03 03:10:52 +00:00
Paul Masurel
466dc8233c Cargo fmt 2021-12-02 18:46:28 +09:00
Paul Masurel
03c2f6ece2 We are missing 4 bytes in the LZ4 compression buffer. (#1226)
Closes #831
2021-12-02 16:00:29 +09:00
Paul Masurel
1d4e9a29db Cargo fmt 2021-12-02 15:51:44 +09:00
Paul Masurel
f378d9a57b Pleasing clippy 2021-12-02 14:48:33 +09:00
Paul Masurel
dde49ac8e2 Closes #1195 (#1222)
Removes the indexed option for facets.
Facets are now always indexed.

Closes #1195
2021-12-02 14:37:19 +09:00
Paul Masurel
c3cc93406d Bugfix: adds missing fdatasync on atomic_write.
In addition this PR:
- removes unnecessary flushes and fsyncs on files.
- replace all fsync by fdatasync. The latter triggers
a meta sync if a metadata required to read the file
has changed. It is therefore sufficient for us.

Closes #1224
2021-12-02 13:42:44 +09:00
Kanji Yomoda
bd0f9211da Remove unused sort for segmenta meta list (#1218)
* Remove unused sort for segment meta list
* Fix segment meta order dependent test
2021-12-01 11:18:17 +09:00
PSeitz
c503c6e4fa Switch to non-strict schema (#1216)
Fixes #1211
2021-11-29 10:38:59 +09:00
PSeitz
02174d26af Merge pull request #1209 from quickwit-inc/lz4_flex_version
fix lz4_flex version
2021-11-16 14:12:45 +08:00
PSeitz
cf92be3bd6 fix lz4_flex version 2021-11-16 06:03:04 +00:00
Shikhar Bhushan
72cef12db1 Add none compression (#1208) 2021-11-16 10:50:42 +09:00
Paul Masurel
bbc0a2e233 Fixing the build 2021-11-16 09:37:25 +09:00
François Massot
4fd1a6c84b Merge pull request #1207 from quickwit-inc/fix-chat-links
Remove patron link and changer gitter links to discord links.
2021-11-15 19:23:21 +01:00
François Massot
c83d99c414 Remove patron link and changer gitter links to discord links. 2021-11-15 19:17:35 +01:00
Paul Masurel
eacf510175 Exchange gitter link for discord 2021-11-15 16:44:13 +09:00
Paul Masurel
8802d125f8 Prepare commit is public again (#1202)
- Simplified some of the prepare commit & segment updater code using
async.
- Made PrepareCommit public again.
2021-11-12 23:25:39 +09:00
dependabot[bot]
33301a3eb4 Update fail requirement from 0.4 to 0.5 (#1197)
Updates the requirements on [fail](https://github.com/tikv/fail-rs) to permit the latest version.
- [Release notes](https://github.com/tikv/fail-rs/releases)
- [Changelog](https://github.com/tikv/fail-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tikv/fail-rs/compare/v0.4.0...v0.5.0)

---
updated-dependencies:
- dependency-name: fail
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-11-12 23:21:16 +09:00
Paul Masurel
7234bef0eb Issue/1198 (#1201)
* Unit test reproducing #1198
* Fixing unit test to handle the error from add_document.
* Bump project version
2021-11-11 16:42:19 +09:00
azerowall
fcff91559b Fix the deserialization error of FieldEntry when the 'options' field appears before the 'type' field (#1199)
Co-authored-by: quel <azerowall>
2021-11-10 18:39:58 +09:00
Paul Masurel
b75d4e59d1 Remove the broken panic on drop unit test. (#1200) 2021-11-10 18:39:37 +09:00
Paul Masurel
c6b5ab1dbe Replacing the panic check in the RAM Directory on lack of flush. 2021-11-09 11:04:31 +09:00
PSeitz
c12e07f0ce Merge pull request #1196 from quickwit-inc/dependabot/cargo/measure_time-0.8.0
Update measure_time requirement from 0.7.0 to 0.8.0
2021-11-05 08:47:51 +08:00
dependabot[bot]
8b877a4c26 Update measure_time requirement from 0.7.0 to 0.8.0
Updates the requirements on [measure_time](https://github.com/PSeitz/rust_measure_time) to permit the latest version.
- [Release notes](https://github.com/PSeitz/rust_measure_time/releases)
- [Commits](https://github.com/PSeitz/rust_measure_time/commits)

---
updated-dependencies:
- dependency-name: measure_time
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2021-11-04 20:27:16 +00:00
PSeitz
7dc0dc1c9b extend proptests with adding case (#1191)
This extends the proptest to cover a case where up to a 100 documents are added to an index.
2021-11-01 09:27:10 +09:00
François Massot
0462754673 Optimize block wand for one and several TermScorer. (#1190)
* Added optimisation using block wand for single TermScorer.

A proptest was also added.

* Fix block wand algorithm by taking the last doc id of scores until the pivot scorer (included).
* In block wand, when block max score is lower than the threshold, advance the scorer with best score.
* Fix wrong condition in block_wand_single_scorer and add debug_assert to have an equality check on doc to break the loop.
2021-11-01 09:18:05 +09:00
PSeitz
5916ceda73 Merge pull request #1188 from PSeitz/sort_issue
fix incorrect padding in bitset for multiple of 64
2021-10-29 17:06:38 +08:00
Pascal Seitz
70283dc6c8 fix incorrect padding in bitset for multiple of 64 2021-10-29 16:49:22 +08:00
PSeitz
dbaf4f3623 Merge pull request #1187 from PSeitz/sort_issue
check searcher num docs in proptest
2021-10-29 16:19:24 +08:00
Pascal Seitz
4808648322 check searcher num docs in proptest 2021-10-29 14:38:30 +08:00
Paul Masurel
54afb9b34a Made PrepareCommit private 2021-10-29 14:13:14 +09:00
Paul Masurel
d336c8b938 Fixed logo 2021-10-27 08:54:16 +09:00
Paul Masurel
980d1b2796 Removing Patreon link 2021-10-27 08:53:45 +09:00
Dan Cecile
6317982876 Make indexer::prepared_commit public (#1184)
* Make indexer::prepared_commit public

* Add PreparedCommit to lib
2021-10-26 12:21:24 +09:00
PSeitz
e2fbbc08ca Merge pull request #1182 from PSeitz/remove_directory_generic
use Box<dyn Directory> as parameter to open/create an Index
2021-10-25 12:49:55 +08:00
Pascal Seitz
99cd25beae use <T: Into<Box<dyn Directory>>> as parameter to open/create an Index
This is done in order to support Box<dyn Directory> additionally to generic implementations of the trait Directory.
Remove boxing in ManagedDirectory.
2021-10-25 12:34:40 +08:00
Kanji Yomoda
737ecc7015 Fix outdated comment for IndexWriter::new (#1183) 2021-10-25 10:59:18 +09:00
Kanji Yomoda
09668459c8 Update codecov-action to v2 and make it possible to keep it up-to-date with dependabot (#1181)
* Update codecov-action to v2

* Add github-actions to dependabot
2021-10-25 10:58:16 +09:00
Evance Soumaoro
e5fd30f438 Fixed links (#1177) 2021-10-25 10:56:04 +09:00
Tom Parker-Shemilt
c412a46105 Remove travis config (#1180) 2021-10-24 15:40:43 +09:00
PSeitz
3a78402496 update links (#1176) 2021-10-18 20:45:40 +09:00
Paul Masurel
d18ac136c0 Search simplified (#1175) 2021-10-18 12:52:43 +09:00
Paul Masurel
b5b1244857 More functionality in the ownedbytes crate (#1172) 2021-10-07 18:14:49 +09:00
Paul Masurel
27acfa4dea Removing dead file (#1170) 2021-10-07 14:15:21 +09:00
Paul Masurel
02cffa4dea Code simplification. (#1169)
Code simplification and Clippy
2021-10-07 14:11:44 +09:00
Paul Masurel
b52abbc771 Bugfix transposition_cost_one in FuzzyQuery (#1167) 2021-10-07 09:38:39 +09:00
Paul Masurel
894c61867f Fix test compilation (#1168) 2021-10-06 17:50:10 +09:00
PSeitz
352e0cc58d Adde demux operation (#1150)
* add merge for DeleteBitSet, allow custom DeleteBitSet on merge
* forward delete bitsets on merge, add tests
* add demux operation and tests
2021-10-06 16:05:16 +09:00
Paul Masurel
ffe4446d90 Minor lint comments (#1166) 2021-10-06 11:27:48 +09:00
dependabot[bot]
4d05b26e7a Update lru requirement from 0.6.5 to 0.7.0 (#1165)
Updates the requirements on [lru](https://github.com/jeromefroe/lru-rs) to permit the latest version.
- [Release notes](https://github.com/jeromefroe/lru-rs/releases)
- [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/jeromefroe/lru-rs/compare/0.6.5...0.7.0)

---
updated-dependencies:
- dependency-name: lru
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-10-06 05:50:24 +09:00
Paul Masurel
0855649986 Leaning more on the alive (vs delete) semantics. (#1164) 2021-10-05 18:53:29 +09:00
PSeitz
d828e58903 Merge pull request #1163 from PSeitz/reduce_mem_usage
reduce mem usage
2021-10-01 08:03:41 +02:00
Pascal Seitz
aa0396fe27 fix variable names 2021-10-01 13:48:51 +08:00
Pascal Seitz
8d8315f8d0 prealloc vec in postinglist 2021-09-29 09:02:38 +08:00
Pascal Seitz
078c0a2e2e reserve vec 2021-09-29 08:45:04 +08:00
Pascal Seitz
f21e8dd875 use only segment ordinal in docidmapping 2021-09-29 08:44:56 +08:00
Tomoko Uchida
74e36c7e97 Add unit tests for tokenizers and filters (#1156)
* add unit test for SimpleTokenizer
* add unit tests for tokenizers and filters.
2021-09-27 10:22:01 +09:00
PSeitz
f27ae04282 fix slope calculation in multilinear interpol (#1161)
add test to check for compression
2021-09-27 10:14:03 +09:00
PSeitz
0ce49c9dd4 use lz4_flex 0.9.0 (#1160) 2021-09-27 10:12:20 +09:00
PSeitz
fe8e58e078 Merge pull request #1154 from PSeitz/delete_bitset
add DeleteBitSet iterator
2021-09-24 09:37:39 +02:00
Pascal Seitz
efc0d8341b fix comment 2021-09-24 15:09:21 +08:00
Pascal Seitz
22bcc83d10 fix padding in initialization 2021-09-24 14:43:04 +08:00
Pascal Seitz
5ee5037934 create and use ReadSerializedBitSet 2021-09-24 12:53:33 +08:00
Pascal Seitz
c217bfed1e cargo fmt 2021-09-23 21:02:19 +08:00
Pascal Seitz
c27ccd3e24 improve naming 2021-09-23 21:02:09 +08:00
Paul Masurel
367f5da782 Fixed comment to the index accessor 2021-09-23 21:53:48 +09:00
Mestery
b256df6599 add index accessor for index writer (#1159)
* add index accessor for index writer

* Update src/indexer/index_writer.rs

Co-authored-by: Paul Masurel <paul@quickwit.io>
2021-09-23 21:49:20 +09:00
Pascal Seitz
d7a6a409a1 renames 2021-09-23 20:33:11 +08:00
Pascal Seitz
a1f5cead96 AliveBitSet instead of DeleteBitSet 2021-09-23 20:03:57 +08:00
dependabot[bot]
37c5fe3c86 Update memmap2 requirement from 0.4 to 0.5 (#1157)
Updates the requirements on [memmap2](https://github.com/RazrFalcon/memmap2-rs) to permit the latest version.
- [Release notes](https://github.com/RazrFalcon/memmap2-rs/releases)
- [Changelog](https://github.com/RazrFalcon/memmap2-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/RazrFalcon/memmap2-rs/compare/v0.4.0...v0.5.0)

---
updated-dependencies:
- dependency-name: memmap2
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-09-23 20:18:27 +09:00
Pascal Seitz
4583fa270b fixes 2021-09-23 10:39:53 +08:00
Pascal Seitz
beb3a5bd73 fix len 2021-09-18 17:58:15 +08:00
Pascal Seitz
93cbd52bf0 move code to biset, add inline, add benchmark 2021-09-18 17:35:22 +08:00
Pascal Seitz
c22177a005 add iterator 2021-09-17 15:29:27 +08:00
Pascal Seitz
4da71273e1 add de/serialization for bitset
remove len footgun
2021-09-17 10:28:12 +08:00
dependabot[bot]
2c78b31aab Update memmap2 requirement from 0.3 to 0.4 (#1155)
Updates the requirements on [memmap2](https://github.com/RazrFalcon/memmap2-rs) to permit the latest version.
- [Release notes](https://github.com/RazrFalcon/memmap2-rs/releases)
- [Changelog](https://github.com/RazrFalcon/memmap2-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/RazrFalcon/memmap2-rs/compare/v.0.3.0...v0.4.0)
2021-09-17 08:52:52 +09:00
Pascal Seitz
4ae1d87632 add DeleteBitSet iterator 2021-09-15 23:10:04 +08:00
Paul Masurel
46b86a7976 Bounced version and edited changelog 2021-09-10 23:05:09 +09:00
PSeitz
3bc177e69d fix #1151 (#1152)
* fix #1151

Fixes a off by one error in the stats for the index fast field in the multi value fast field.
When retrieving the data range for a docid, `get(doc)..get(docid+1)` is requested. On creation
the num_vals statistic was set to doc instead of docid + 1. In the multivaluelinearinterpol fast
field the last value was therefore not serialized (and would return 0 instead in most cases).
So the last document get(lastdoc)..get(lastdoc + 1) would return the invalid range `value..0`.

This PR adds a proptest to cover this scenario. A combination of a large number values, since multilinear
interpolation is only active for more than 5_000 values, and a merge is required.
2021-09-10 23:00:37 +09:00
PSeitz
319609e9c1 test cargo-llvm-cov (#1149) 2021-09-03 22:00:43 +09:00
Kanji Yomoda
9d87b89718 Fix incorrect comment for Index::create_in_dir (#1148)
* Fix incorrect comment for Index::create_in_dir
2021-09-03 10:37:16 +09:00
Tomoko Uchida
dd81e38e53 Add WhitespaceTokenizer (#1147)
* Add WhitespaceTokenizer.
2021-08-29 18:20:49 +09:00
Paul Masurel
9f32b22602 Preparing for release. 2021-08-26 09:07:08 +09:00
sigaloid
096ce7488e Resolve some clippys, format (#1144)
* cargo +nightly clippy --fix -Z unstable-options
2021-08-26 08:46:00 +09:00
PSeitz
a1782dd172 Update index_sorting.md 2021-08-25 07:55:50 +01:00
PSeitz
000d76b11a Update index_sorting.md 2021-08-24 19:28:06 +01:00
PSeitz
abd29f6646 Update index_sorting.md 2021-08-24 19:26:19 +01:00
PSeitz
b4ecf0ab2f Merge pull request #1146 from tantivy-search/sorting_doc
add sorting to book
2021-08-23 17:37:54 +01:00
Pascal Seitz
798f7dbf67 add sorting to book 2021-08-23 17:36:41 +01:00
PSeitz
06a2e47c8d Merge pull request #1145 from tantivy-search/blub2
cargo fmt
2021-08-21 18:52:50 +01:00
Pascal Seitz
e0b83eb291 cargo fmt 2021-08-21 18:52:10 +01:00
PSeitz
13401f46ea add wildcard mention 2021-08-21 18:10:33 +01:00
PSeitz
1a45b030dc Merge pull request #1141 from tantivy-search/tantivy_common
dissolve common module
2021-08-20 08:03:37 +01:00
Pascal Seitz
62052bcc2d add missing test function
closes #1139
2021-08-20 07:26:22 +01:00
Pascal Seitz
3265f7bec3 dissolve common module 2021-08-19 23:26:34 +01:00
Pascal Seitz
ee0881712a move bitset to common crate, move composite file to directory 2021-08-19 17:45:09 +01:00
PSeitz
483e0336b6 Merge pull request #1140 from tantivy-search/tantivy_common
rename common to tantivy-common
2021-08-19 13:02:54 +01:00
Pascal Seitz
3e8f267e33 rename common to tantivy-common 2021-08-19 10:27:20 +01:00
Paul Masurel
3b247fd968 Version bump 2021-08-19 10:12:30 +09:00
Paul Masurel
750f6e6479 Removed obsolete unit test (#1138) 2021-08-19 10:07:49 +09:00
Evance Soumaoro
5b475e6603 Checksum validation using active files (#1130)
* now validate checksum uses segment files not managed files
2021-08-19 10:03:20 +09:00
PSeitz
0ca7f73dc5 add docs badge, fix build badge 2021-08-13 19:40:33 +01:00
PSeitz
47ed18845e Merge pull request #1136 from tantivy-search/minor_fixes
more docs detail
2021-08-13 18:11:47 +01:00
Pascal Seitz
dc141cdb29 more docs detail
remove code duplicate
2021-08-13 17:40:13 +01:00
PSeitz
f6cf6e889b Merge pull request #1133 from tantivy-search/merge_overflow
test doc_freq and term_freq in sorted index
2021-08-05 07:53:46 +01:00
Pascal Seitz
f379a80233 test doc_freq and term_freq in sorted index 2021-08-03 11:38:05 +01:00
PSeitz
4a320fd1ff fix delta position in merge and index sorting (#1132)
fixes #1125
2021-08-03 18:06:36 +09:00
PSeitz
85d23e8e3b Merge pull request #1129 from tantivy-search/merge_overflow
add long running test in ci
2021-08-02 15:54:31 +01:00
Pascal Seitz
022ab9d298 don't run as pr 2021-08-02 15:44:00 +01:00
Pascal Seitz
605e8603dc add positions to long running test 2021-08-02 15:29:49 +01:00
Pascal Seitz
70f160b329 add long running test in ci 2021-08-02 11:35:39 +01:00
PSeitz
6d265e6bed fix gh action name 2021-08-02 10:38:01 +01:00
PSeitz
fdc512391b Merge pull request #1128 from tantivy-search/merge_overflow
add sort to functional test, add env for iterations
2021-08-02 10:29:16 +01:00
Pascal Seitz
108714c934 add sort to functional test, add env for iterations 2021-08-02 10:11:17 +01:00
Paul Masurel
44e8cf98a5 Cargo fmt 2021-07-30 15:30:01 +09:00
Paul Masurel
f0ee69d9e9 Remove the complicated block search logic for a simpler branchless (#1124)
binary search

The code is simpler and faster.

Before
test postings::bench::bench_segment_intersection                                                                         ... bench:   2,093,697 ns/iter (+/- 115,509)
test postings::bench::bench_skip_next_p01                                                                                ... bench:      58,585 ns/iter (+/- 796)
test postings::bench::bench_skip_next_p1                                                                                 ... bench:     160,872 ns/iter (+/- 5,164)
test postings::bench::bench_skip_next_p10                                                                                ... bench:     615,229 ns/iter (+/- 25,108)
test postings::bench::bench_skip_next_p90                                                                                ... bench:   1,120,509 ns/iter (+/- 22,271)

After
test postings::bench::bench_segment_intersection                                                                         ... bench:   1,747,726 ns/iter (+/- 52,867)
test postings::bench::bench_skip_next_p01                                                                                ... bench:      55,205 ns/iter (+/- 714)
test postings::bench::bench_skip_next_p1                                                                                 ... bench:     131,433 ns/iter (+/- 2,814)
test postings::bench::bench_skip_next_p10                                                                                ... bench:     478,830 ns/iter (+/- 12,794)
test postings::bench::bench_skip_next_p90                                                                                ... bench:     931,082 ns/iter (+/- 31,468)
2021-07-30 14:38:42 +09:00
Evance Soumaoro
b8a10c8406 switched to memmap2-rs (#1120) 2021-07-27 18:40:41 +09:00
PSeitz
ff4813529e add comments on compression (#1119) 2021-07-26 22:54:22 +09:00
PSeitz
470bc18e9b Merge pull request #1118 from tantivy-search/remove_rand
move rand to optional dependencies
2021-07-21 18:01:22 +01:00
Pascal Seitz
0b1add0ec6 move rand to optional dependencies
closes #1117
2021-07-21 17:49:24 +01:00
François Massot
1db76dd9cf Merge pull request #1113 from shikhar/patch-1
stale comments in segment_reader.rs
2021-07-20 23:02:20 +02:00
François Massot
467a9517db Merge pull request #1114 from shikhar/patch-2
FilterCollector doc fix
2021-07-20 21:02:28 +02:00
Shikhar Bhushan
b361315a67 FilterCollector doc fix
Other types supported since https://github.com/tantivy-search/tantivy/pull/953/files
2021-07-15 22:55:47 -04:00
Shikhar Bhushan
4e3771bffc stale comments in segment_reader.rs 2021-07-15 22:47:32 -04:00
PSeitz
8176b0335a Merge pull request #1108 from PSeitz/pwnedbytes
move ownedbytes to own crate
2021-07-05 16:07:56 +02:00
Pascal Seitz
811ac98f36 more inlines 2021-07-05 15:49:42 +02:00
François Massot
f4b2e71800 Handle field names with any characters with a known set of special (#1109)
* Handle field names with any characters with a known set of special characters and an escape one

* Update field name validation rule to check only if it has at least one character and does not start with `-`

Closes #1087.
2021-07-05 22:31:36 +09:00
PSeitz
c431cfcf12 extend proptests, fix race condition (#1107)
* extend proptests, fix race condition
* cargo fmt
2021-07-05 18:28:56 +09:00
PSeitz
92f20bc5a2 use nightly image in coverage 2021-07-03 09:38:44 +02:00
PSeitz
57f931da3c Create coverage.yml 2021-07-03 09:35:07 +02:00
Pascal Seitz
9b662e6d03 move ownedbytes to own crate
fixes #1106
2021-07-02 16:51:59 +02:00
PSeitz
18377d949c Merge pull request #1105 from PSeitz/clippy
Fix clippy warnings
2021-07-02 10:01:19 +02:00
Pascal Seitz
e6427b2588 cleanup 2021-07-02 09:21:22 +02:00
Pascal Seitz
0062fe705d cargo fmt 2021-07-01 18:17:08 +02:00
Pascal Seitz
9b3e508753 fix clippy 2021-07-01 18:06:09 +02:00
Pascal Seitz
a1ac63ee1c fix clippy 2021-07-01 18:06:03 +02:00
Pascal Seitz
e496ae0470 clippy fixes 2021-07-01 17:43:50 +02:00
Pascal Seitz
1e4df54ab3 fix clippy 2021-07-01 17:41:53 +02:00
Pascal Seitz
2de249af74 clippy fixes 2021-07-01 17:37:37 +02:00
Pascal Seitz
10f056fbb4 apply clippy fixes 2021-07-01 17:08:44 +02:00
PSeitz
074b09d0c0 Merge pull request #1102 from PSeitz/proptests
extend proptests for sorting and merge
2021-07-01 16:23:53 +02:00
Pascal Seitz
86d0727659 add facet test
closes #1100
2021-07-01 15:36:17 +02:00
Pascal Seitz
be3e1b8718 cargo fmt 2021-07-01 14:02:09 +02:00
Pascal Seitz
8fdf59bdac add search test for proptest 2021-07-01 14:01:30 +02:00
Pascal Seitz
ebebce2102 cargo fmt 2021-07-01 10:47:20 +02:00
Pascal Seitz
8044ec38da test docstore in proptest 2021-07-01 10:15:42 +02:00
Pascal Seitz
7413f87265 use set instead of vec in proptest 2021-07-01 08:28:51 +02:00
PSeitz
aea2e77665 Merge pull request #1097 from PSeitz/multifastfield
Use dynamic fastfield codes for multivalues fixes #1093
2021-06-30 14:38:26 +02:00
Pascal Seitz
a15845f9fd add merge case to proptest, test multivalue fastfields
#1100
2021-06-30 13:13:33 +02:00
Pascal Seitz
94ac44df4f proptest with optional sorting 2021-06-30 12:06:03 +02:00
Pascal Seitz
f80d804a57 add random commits in proptest 2021-06-30 11:18:07 +02:00
Pascal Seitz
3b5c1d7817 use measure_time 0.7 2021-06-30 11:08:02 +02:00
Pascal Seitz
24274edf81 remove trait impl fpr &Vec 2021-06-30 09:50:47 +02:00
Paul Masurel
d58497529b Fixed CHANGELOG to include 0.15.2. 2021-06-30 16:34:47 +09:00
Pascal Seitz
130495abab cleanup 2021-06-30 08:57:55 +02:00
Pascal Seitz
9b743d60fb make docid mapping non optional
make docid mapping non optional
add trivial flag for docid mapping
add time measurements
2021-06-30 08:57:55 +02:00
Pascal Seitz
5c9e2ef036 wrap docidmapping in struct 2021-06-30 08:57:55 +02:00
Pascal Seitz
8526434b63 add dynamic fastfield case
add dynamic fastfield for single fast field unsorted
fix scary documentation bug
add num_len instead of len
2021-06-30 08:57:55 +02:00
Pascal Seitz
6ba302c481 Use dynamic fastfield codes for multivalues fixes #1093
Use dynamic fastfield codes for multivalues fixes (only sorting case covered)
Rename get to get_val due to conflict with Vec
use u64 precision instead of u32 for get_range, to allow use of existing fast field interface interface (actually not sure if it would be better have a different interface)
2021-06-30 08:57:55 +02:00
Paul Masurel
de92f094aa Closes #1101 fix delete documents with sort by field
Closes #1101

* fix delete documents with sort by field

Co-authored-by: Andre-Philippe Paquet <appaquet@gmail.com>
2021-06-30 15:51:32 +09:00
Evance Soumaoro
c82cee66de exposing min/max value interface on MultiValuedFastField Reader (#1096) 2021-06-23 17:38:50 +09:00
Paul Masurel
6eed05b1ce Revert "Exposing min/max value interface on MultiValuedFastField Reader (#1094)" (#1095)
This reverts commit bb488305c9.
2021-06-23 10:25:11 +09:00
Evance Soumaoro
bb488305c9 Exposing min/max value interface on MultiValuedFastField Reader (#1094)
Exposing min/max value interface on MultiValuedFastField Reader
2021-06-23 08:51:36 +09:00
PSeitz
f05e84f964 add FieldEntry constructor, closes #1086 (#1090) 2021-06-17 10:15:48 +09:00
PSeitz
65546ed22b Merge pull request #1088 from appaquet/fix/store-reader-iterator-take2
Fix corruption in store reader iterator, take 2
2021-06-16 14:44:00 +02:00
Andre-Philippe Paquet
57ae5b27dc fix store reader iterator, take 2 2021-06-16 07:51:39 -04:00
PSeitz
f9531ec3c9 Merge pull request #1085 from PSeitz/fastfieldcompression
use concrete return type, fixes #1084
2021-06-16 13:07:12 +02:00
Pascal Seitz
5b54a32563 use concrete return type, fixes #1084 2021-06-16 12:03:11 +02:00
PSeitz
cd049e28bc Merge pull request #1082 from PSeitz/fastfieldcompression
use dynamic fast field codec for offset index
2021-06-16 11:59:00 +02:00
PSeitz
646e41bec4 Merge pull request #1083 from PSeitz/termdict_block_layout
Move counting writer to common
2021-06-16 08:57:55 +02:00
Pascal Seitz
36528c5e83 move counting writer to common
move counting writer to common
reuse counting writer in fastfield codec
2021-06-16 08:14:04 +02:00
Pascal Seitz
cd169dee23 use dynamic fast field codec for offset index 2021-06-15 13:34:42 +02:00
PSeitz
b5cc60f80b Merge pull request #1080 from PSeitz/more_tests
test all features in github actions
2021-06-15 10:51:57 +02:00
Pascal Seitz
060b83159a use nightly for tests 2021-06-15 10:08:49 +02:00
Pascal Seitz
a40ff35453 test all features 2021-06-15 09:31:39 +02:00
PSeitz
268e6bfe6e update fast field codec readme 2021-06-15 09:19:39 +02:00
PSeitz
f902440b8b Merge pull request #1072 from PSeitz/fastfieldcompression
Enable support for multiple fastfield codecs, add linear interpolation
2021-06-15 09:07:17 +02:00
Pascal Seitz
77a0902605 replace unwrap, use vec in bench 2021-06-14 17:01:46 +02:00
Pascal Seitz
c889ae10e4 add is_applicable to fast field codecs 2021-06-14 16:16:25 +02:00
Pascal Seitz
0a534c6ee0 rename create to serialize 2021-06-14 15:40:07 +02:00
Pascal Seitz
167d88b449 fix tests behind unstable feature flag 2021-06-14 15:31:12 +02:00
Pascal Seitz
1071ed84f2 fix cond compilation 2021-06-14 14:05:04 +02:00
Pascal Seitz
abb5624af2 add contributing guidelines, add codec comparer binary
add contributing guidelines
add codec comparer binary to test codec compressions with different test data sets
2021-06-14 13:56:40 +02:00
Pascal Seitz
1d41b96d32 rename, add codec_tester 2021-06-14 13:56:40 +02:00
Pascal Seitz
ef4665945f rename file 2021-06-14 13:56:40 +02:00
Pascal Seitz
294cd5fd0b streamline traits and tests 2021-06-14 13:56:40 +02:00
Pascal Seitz
f4d271177c add inline, add readme 2021-06-14 13:56:40 +02:00
Pascal Seitz
451538fecf add serialize for bool 2021-06-14 13:56:40 +02:00
Pascal Seitz
e78e0fec59 add multilinearinterpolation
add multilinearinterpolation, which compresses blocks of size 512
add checks for linear interpolation
2021-06-14 13:56:40 +02:00
Pascal Seitz
2e639cebf8 fix bitpacker bug, reset internal value 2021-06-14 13:56:40 +02:00
Pascal Seitz
e296da7ade add debug and failsafes 2021-06-14 13:56:40 +02:00
Pascal Seitz
3b3e26c4b8 use f64 precision for slope calculation 2021-06-14 13:56:40 +02:00
Pascal Seitz
6a4883ac69 use uniform distribution sampling 2021-06-14 13:56:40 +02:00
Pascal Seitz
0ba05df545 add f32::MAX to disable a compressor 2021-06-14 13:56:40 +02:00
Pascal Seitz
aa3c4d4029 use f32 precision, add inline 2021-06-14 13:56:40 +02:00
Pascal Seitz
60df629725 cargo.toml license desc and author 2021-06-14 13:56:40 +02:00
Pascal Seitz
2570b005ac fix estimation test 2021-06-14 13:56:40 +02:00
Pascal Seitz
d5212cd19d fix clippy 2021-06-14 13:56:40 +02:00
Pascal Seitz
2193d85622 fix clippy and common crate tests 2021-06-14 13:56:40 +02:00
Pascal Seitz
dfdbfe9eff add benchmark for fast field codecs
test tests::bench_fastfield_bitpack_create        ... bench:      57,628 ns/iter (+/- 23,486)
test tests::bench_fastfield_bitpack_get           ... bench:      43,323 ns/iter (+/- 4,286)
test tests::bench_fastfield_linearinterpol_create ... bench:     223,625 ns/iter (+/- 33,563)
test tests::bench_fastfield_linearinterpol_get    ... bench:      82,839 ns/iter (+/- 9,575)
2021-06-14 13:56:40 +02:00
Pascal Seitz
b999e836b2 replace BitpackedFastFieldReader, delete FastFieldSerializer trait 2021-06-14 13:56:40 +02:00
Pascal Seitz
be2dd41e69 add interface to create and read codecs
add CodecReader as common interface in fastfield codec crate
add LinearInterpolation to DynamicFastFieldReader
calc estimation and choose best codec
cleanup
2021-06-14 13:56:40 +02:00
Pascal Seitz
483fdb79cc add linear interpolation estimation
add estimation tests
add codec test data in tests
2021-06-14 13:56:40 +02:00
Pascal Seitz
aefd0fc907 refactor, add fastfield metadata to footer
change api to fastfield reader in codec crate
add fastfield metadata to footer
remove old code
merge codec files
2021-06-14 13:56:40 +02:00
Pascal Seitz
3298d6cb71 move common to common crate, create fastfield_codecs crate
move common to common crate
create fastfield_codecs crate
add bitpacker to fast field codecs
add linear interpolation to fast field codecs
add tests
2021-06-14 13:56:40 +02:00
Pascal Seitz
c02c78ea73 implement linear interpol serializer 2021-06-14 13:56:40 +02:00
Pascal Seitz
6bf4fee1ba support multiple codecs
support multiple codes
prepend codec id to all fast fields
add new api to create fastfields with access to all data
use new fastfield creation api in initial creation and merge
remove unused collect of data in doc_id_mapping
2021-06-14 13:56:40 +02:00
PSeitz
5209238c1b use github actions for tests 2021-06-14 12:51:46 +02:00
Paul Masurel
7ef25ec400 Bump to 0.15.1 to publish bugfix 2021-06-14 18:45:38 +09:00
PSeitz
221e7cbb55 Merge pull request #1076 from appaquet/fix/store-reader-iterator
Fix panic in store reader raw document iterator during segment merge
2021-06-14 11:22:58 +02:00
Pascal Seitz
873ac1a3ac cleanup import 2021-06-14 10:31:45 +02:00
Pascal Seitz
ebe55a7ae1 refactor test, fixes #1077
replace test with smaller test in doc_store
2021-06-14 10:10:05 +02:00
Bernard Swart
9f32d40b27 Misspelling of misspelled was fixed (#1078) 2021-06-14 16:29:12 +09:00
Andre-Philippe Paquet
8ae10a930a fix formatting 2021-06-13 17:23:40 -04:00
Andre-Philippe Paquet
473a346814 remove debugging 2021-06-13 16:49:44 -04:00
Andre-Philippe Paquet
3a8a0fe79a add fuzzy merge test 2021-06-13 16:42:24 -04:00
Andre-Philippe Paquet
511dc8f87f fix store reader iterator 2021-06-13 16:00:13 -04:00
Paul Masurel
3901295329 Bumped query-grammar version 2021-06-07 10:00:14 +09:00
Paul Masurel
f5918c6c74 Completed bitpacker README 2021-06-07 09:57:17 +09:00
Paul Masurel
abe6b4baec Bumped tantivy version to 0.15 2021-06-07 09:52:48 +09:00
Paul Masurel
6e4b61154f Issue/1070 (#1071)
Add a boolean flag in the Query::query_terms informing on whether
position information is required.

Closes #1070
2021-06-03 22:33:20 +09:00
PSeitz
2aad0ced77 add inline to bitpacker (#1064) 2021-05-31 23:15:41 +09:00
Stéphane Campinas
41ea14840d add benchmark of term streams merge (#1024)
* add benchmark of term streams merge
* use union based on FST for merging the term dictionaries
* Rename TermMerger benchmark
2021-05-31 23:15:01 +09:00
PSeitz
dff0ffd38a prepare for multiple fastfield codecs (#1063)
* prepare for multiple fastfield codecs

 prepare for multiple fastfield codecs by wrapping the codecs in an enum #1042

* add FastFieldSerializer trait, add DynamicFastFieldSerializer

add FastFieldSerializer trait
add DynamicFastFieldSerializer enum to wrap all implementors of the FastFieldSerializer trait

* add estimation for fastfield bitpacker
2021-05-31 23:14:14 +09:00
PSeitz
8d32c3ba3a Change Footer version handling, Make compression dynamic (#1060)
Change Footer version handling, Make compression dynamic

Change Footer version handling
Simplify version handling by switching to JSON instead of binary serialization.
fixes #1058

Make compression dynamic
Instead of choosing the compression during compile time via a feature flag, you can now have multiple compression algorithms enabled and decide during runtime which one to choose via IndexSettings. Changing the compression algorithm on an index is also supported. The information which algorithm was used in the doc store is stored in the DocStoreFooter. The default is the lz4 block format.
fixes #904

Handle merging of different compressors
Fix feature flag names
Add doc store test for all compressors
2021-05-28 14:57:20 +09:00
Moriyoshi Koizumi
4afba005f9 Provide a means to deal with malformed facet text representation for the query parser (#1056)
* Provide a means to deal with malformed facet text representation for the query parser.
* Specific error enum for the facet parse error.
2021-05-27 12:16:49 +09:00
PSeitz
85fb0cc20a cache field norm reader in merge (#1061) 2021-05-25 21:48:02 +09:00
PSeitz
5ef2d56ec2 Avoid docstore stacking for small segments, fixes #1053 (#1055) 2021-05-24 15:38:49 +09:00
Paul Masurel
fd8e5bdf57 Rename more like this 2021-05-21 16:32:39 +09:00
PSeitz
4f8481a1e4 Detect if segments are stackackable with sorting, fixes #1038 (#1054)
* Detect if segments are stackackable with sorting, fixes #1038

Detect if segments are stackable when their data ranges on the sort property are disjunct.
Presort segments by thei min value on merge, to enable easier stacking.

* move code to function
2021-05-21 15:23:17 +09:00
PSeitz
bcd72e5c14 fix and refactor log merge policy, fixes #1035 (#1043)
* fix and refactor log merge policy, fixes #1035

fixes a bug in log merge policy where an index was wrongly referenced by its index

* cleanup

* fix sort order, improve method names

* use itertools groupby, fix serialization test

* minor improvments

* update names
2021-05-19 10:48:46 +09:00
PSeitz
249bc6cf72 upgrade lz4_flex to 0.8 (#1049)
* upgrade lz4_flex to 0.8

* fix set_len
2021-05-19 10:46:01 +09:00
PSeitz
1c0af5765d fix doc store iter error handling, fixes #1047 (#1051) 2021-05-18 21:43:57 +09:00
Paul Masurel
7ba771ed1b Replaced RawDocument by OwnedBytes (#1046) 2021-05-18 14:33:36 +09:00
PSeitz
a4002622f8 add iterator over documents in docstore (#1044)
* add iterator over documents in docstore

When profiling, I saw that around 8% of the time in a merge was spent in look-ups into the skip index. Since the documents in the merge case are read continuously, we can replace the random access with an iterator over the documents.

Merge Time on Sorted Index Before/After:
24s / 19s

Merge Time on Unsorted Index Before/After:
15s / 13,5s

So we can expect 10-20% faster merges.
This iterator is also important if we add sorting based on a field in the documents.

* Update reader.rs

Co-authored-by: Paul Masurel <paul@quickwit.io>
2021-05-18 10:29:02 +09:00
Kornel
8e21087ad7 Don't use overly-minimal dependencies (#1037) 2021-05-17 22:30:04 +09:00
PSeitz
d523543dc7 Sort Index/Docids By Field (#1026)
* sort index by field

add sort info to IndexSettings
generate docid mapping for sorted field (only fastfield)
remap singlevalue fastfield

* support docid mapping in multivalue fastfield

move docid mapping to serialization step (less intermediate data for mapping)
add support for docid mapping in multivalue fastfield

* handle docid map in bytes fastfield

* forward docid mapping, remap postings

* fix merge conflicts

* move test to index_sorter

* add docid index mapping old->new

add docid mapping for both directions old->new (used in postings) and new->old (used in fast field)
handle mapping in postings recorder
warn instead of info for MAX_TOKEN_LEN

* remap docid in fielnorm

* resort docids in recorder, more extensive tests

* handle index sorting in docstore

handle index sort in docstore, by saving all the docs in a temp docstore file (SegmentComponent::TempStore). On serialization the docid mapping is used to create a docstore in the correct order by reader the old docstore.

add docstore sort tests
refactor tests

* refactor

rename docid doc_id
rename docid_map doc_id_map
rename DocidMapping DocIdMapping
fix  typo

* u32 to DocId

* better doc_id_map creation

remove unstable sort

* add non mut method to FastFieldWriters

add _mut prefix to &mut methods

* remove sort_index

* fix clippy issues

* fix SegmentComponent iterator

use std::mem::replace

* fix test

* fmt

* handle indexsettings deserialize

* add reading, writing bytes to doc store

get bytes of document in doc store
add store_bytes method doc writer to accept serialized document
add serialization index settings test

* rename index_sorter to doc_id_mapping

use bufferlender in recorder

* fix compile issue, make sort_by_field optional

* fix test compile

* validate index settings on merge

validate index settings on merge
forward merge info to SegmentSerializer (for TempStore)

* fix doctest

* add itertools, use kmerge

add itertools, use kmerge
push because rustfmt fails

* implement/test merge for fastfield

implement/test merge for fastfield
rename len to num_deleted in DeleteBitSet

* Use precalculated docid mapping in merger

Use precalculated docid mapping in merger for sorted indices instead of on the fly calculation 
Add index creation macro benchmark, but commented out for now, since it is not really usable due to long runtimes, and extreme fluctuations. May be better suited in criterion or an external bench bin

* fix fast field reader docs

fix fast field reader docs, Error instead of None returned
add u64s_lenient to fastreader
add create docid mapping benchmark

* add test for multifast field merge

refactor test 
add test for multifast field merge

* add num_bytes to BytesFastFieldReader

equivalent to num_vals in MultiValuedFastFieldReader

* add MultiValueLength trait

add MultiValueLength trait in order to unify index creation for BytesFastFieldReader and MultiValuedFastFieldReader in merger

* Add ReaderWithOrdinal, fix 

Add ReaderWithOrdinal to associate data to a reader in merger
Fix bytes offset index creation in merger

* add test for merging bytes with sorted docids

* Merge fieldnorm for sorted index

* handle posting list in merge in sorted index

handle posting list in merge in sorted index by using doc id mapping for sorting
reuse SegmentOrdinal type

* handle doc store order in merge in sorted index

* fix typo, cleanup

* make IndexSetting non-optional

* fix type, rename test file

fix type
rename test file
add  type

* remove SegmentReaderWithOrdinal accessors

* cargo fmt

* add index sort & merge test to include deletes

* Fix posting list merge issue

Fix posting list merge issue - ensure serializer always gets monotonically increasing doc ids
handle sorting and merging for facets field

* performance: cache field readers, use bytes for doc store merge

* change facet merge test to cover index sorting

* add RawDocument abstraction to access bytes in doc store

* fix deserialization, update changelog

fix deserialization
update changelog
forward error on merge failed

* cache store readers to utilize lru cache (4x performance)

cache store readers, to utilize lru cache (4x faster performance, due to less decompress calls on the block)

* add include_temp_doc_store flag in InnerSegmentMeta

unset flag on deserialization and after finalize of a segment
set flag when creating new instances
2021-05-17 22:20:57 +09:00
Abderrahmen Hanafi
6ca27b6dd4 link collector header in introduction section (#1036) 2021-05-17 22:15:48 +09:00
Evance Soumaoro
8d51e9cc91 Capping IndexWriter Num thread (#1033)
* capping num threads of index writter to MAX_NUM_THREAD = 8

* fixed formating

* run ci

* fix bug from max to min
2021-05-06 20:44:39 +09:00
Paul Masurel
2aced2d958 Merge pull request #1028 from tantivy-search/issue-more-like-this-query
Support MoreLikeThisQuery
2021-05-04 22:15:43 +09:00
Paul Masurel
3fcba00a1f Merge pull request #1029 from tantivy-search/dependabot/add-v2-config-file
Upgrade to GitHub-native Dependabot
2021-05-03 21:11:06 +09:00
Evance Souamoro
372d12766a fix cargo fmt 2021-05-03 10:26:56 +00:00
Evance Soumaoro
dfed8896b9 Merge branch 'main' into issue-more-like-this-query 2021-05-03 10:08:38 +00:00
Evance Souamoro
d71aa57077 reusing idf from bm25 module as it was the same logic 2021-05-03 10:05:40 +00:00
Paul Masurel
3e85fe57ac Merge pull request #1031 from PSeitz/bitpack_writer
upate CHANGELOG
2021-05-03 16:29:19 +09:00
Pascal Seitz
537021e12d upate CHANGELOG 2021-05-03 09:09:42 +02:00
Paul Masurel
ec4834cd73 Merge pull request #1030 from PSeitz/bitpack_writer
add BlockedBitpacker
2021-05-03 14:19:17 +09:00
Evance Souamoro
712c01aa93 fixed term sorting & moved it to a better place 2021-05-01 05:40:59 +00:00
Evance Souamoro
cde324d4b4 fixed issues based on comment, still need to check BM25 suggestion 2021-04-30 21:14:19 +00:00
Pascal Seitz
478571ebb4 move minmax to bitpacker
move minmax to bitpacker
use minmax in blocked bitpacker
2021-04-30 17:07:30 +02:00
Pascal Seitz
fde9d27482 refactor 2021-04-30 16:29:02 +02:00
Pascal Seitz
f38daab7f7 add base value to blocked bitpacker 2021-04-30 14:47:58 +02:00
Pascal Seitz
25b9429929 calc mem_usage of more structs
calc mem_usage of more structs in index creation
add some comments
2021-04-30 14:16:39 +02:00
Pascal Seitz
83cf638a2e use 64bit encoded metadata
fix memory_usage calculation
2021-04-30 07:23:44 +02:00
Pascal Seitz
a04e0bdaf1 use flushfree blocked bitpacker (10% slower) 2021-04-29 19:57:17 +02:00
Pascal Seitz
c200d59d1e add blocked bitpacker, add benches 2021-04-29 19:53:54 +02:00
dependabot-preview[bot]
bbeac5888c Upgrade to GitHub-native Dependabot 2021-04-29 15:02:36 +00:00
Pascal Seitz
daa53522b5 move tantivy bitpacker to crate, refator bitpacker
remove byteorder dependency
2021-04-29 16:40:11 +02:00
Evance Souamoro
2c0f6e3319 add builder to the public for documentation 2021-04-29 12:38:16 +00:00
Evance Souamoro
27f587aa13 applied cargo fmt 2021-04-29 12:15:34 +00:00
Evance Souamoro
cfc27c9665 add support for more like this query 2021-04-29 11:49:27 +00:00
Paul Masurel
88a1a90c3c Merge pull request #1025 from tamuhey/patch-1
Typo in readme README.md
2021-04-28 15:31:53 +09:00
Yohei Tamura
6d8581baae Update README.md
typo
2021-04-28 15:10:59 +09:00
Paul Masurel
2b4b16ae90 Merge pull request #1021 from PSeitz/indexmeta
add Index::builder, add index_settings to IndexMeta
2021-04-27 16:13:48 +09:00
Paul Masurel
075c23eb8c Disabling fetching fieldnorm in phrasequery if scoring is disabled. 2021-04-27 14:06:41 +09:00
Pascal Seitz
cbf805c3e6 fix build, skip serialize None 2021-04-26 13:30:34 +02:00
Pascal Seitz
46beb2a989 index_settings should be optional 2021-04-26 11:34:19 +02:00
Pascal Seitz
c01c175744 rename fix 2021-04-26 09:45:12 +02:00
Paul Masurel
eca496ee24 Merge branch 'main' into indexmeta 2021-04-26 14:34:58 +09:00
Paul Masurel
083bb3ec3f Merge pull request #1023 from tantivy-search/issue/simpler-positions
Issue/simpler positions

Closes #1022
2021-04-26 14:02:11 +09:00
Paul Masurel
2dc5403e7b Closes #1022 2021-04-26 14:01:14 +09:00
Paul Masurel
aead5d4068 First stab 2021-04-26 12:46:06 +09:00
Paul Masurel
6fb3622abb Cargo fmt 2021-04-26 12:45:49 +09:00
Paul Masurel
39dd8cfe24 Cargo clippy. Acronym should not be full uppercase apparently. 2021-04-26 11:49:18 +09:00
Pascal Seitz
b9b9e9e518 move Index::create to IndexBuilder 2021-04-23 15:14:15 +02:00
Pascal Seitz
e2c91aff33 add open/create methods to index builder
add indexbuilder error
rename create_from_metas to open_from_metas
remove from_directory
2021-04-23 14:02:21 +02:00
Pascal Seitz
96098fce20 fmt 2021-04-23 12:09:55 +02:00
Paul Masurel
18bfe131fe Removed all inline(always) like a baboon. 2021-04-23 10:47:13 +09:00
Pascal Seitz
8dc3e7704c add IndexSettings to Index, use Indexbuilder in Index 2021-04-22 21:07:39 +02:00
Paul Masurel
1ebfc71721 Merge pull request #1019 from tantivy-search/fix-merge-policy-bug
fixed merge policy bug
2021-04-22 11:25:48 +09:00
Paul Masurel
afd3dc7e81 Merge pull request #1016 from rihardsk/date-range-exclusivity-test
Change to an exlusive bound in the date range test
2021-04-22 11:21:01 +09:00
Paul Masurel
cbaecb1ea4 Merge pull request #1020 from tantivy-search/fix-architecture-doc
fixes doc  small typos
2021-04-22 11:20:00 +09:00
Evance Souamoro
8883e32dd8 applied cargo fmt 2021-04-21 21:25:31 +00:00
Pascal Seitz
4243780e0a add Index::builder, add index_settings to IndexMeta 2021-04-21 19:32:19 +02:00
Evance Souamoro
5f3fd08509 re-run build 2021-04-21 17:17:13 +00:00
Evance Souamoro
98a225acd1 re-run build 2021-04-21 16:54:54 +00:00
Evance Souamoro
d8555cc8a1 fixes small typos 2021-04-21 15:28:15 +00:00
Evance Souamoro
3ccd93ac67 fixed merge policy bug 2021-04-21 15:20:05 +00:00
Paul Masurel
336428df8b Merge pull request #1017 from tantivy-search/fix-typo
Fixed potential typo
2021-04-20 17:21:51 +09:00
Evance Souamoro
d69aace9ec fixed potential typo 2021-04-20 07:21:47 +00:00
Rihards Krišlauks
777debf5d7 Format 2021-04-19 21:57:25 +03:00
Rihards Krišlauks
7c20771d20 Change to an exlusive bound in the date range test
So that both, inclusive and exclusive, bounds are tested for date ranges
2021-04-19 20:43:37 +03:00
Paul Masurel
fef428a9c6 Updated CHANGELOG 2021-04-19 21:58:52 +09:00
Paul Masurel
cc9972ad6c Merge pull request #1010 from PSeitz/main
add lz4 block format compressor as default docstore compressor
2021-04-19 21:46:19 +09:00
Pascal Seitz
c2fdc60569 fix snap version, fix naming 2021-04-19 10:55:44 +02:00
Paul Masurel
39320f953c Update CHANGELOG with date range queries 2021-04-19 10:03:41 +09:00
Paul Masurel
be7c9cc9b8 Merge pull request #1012 from rihardsk/date-ranges
Add support for date range queries
2021-04-19 09:34:40 +09:00
Rihards Krišlauks
f58345f0f0 Add a date range query example to QueryParser documentation 2021-04-18 22:13:02 +03:00
Rihards Krišlauks
f518012656 Test flexible bounds in date range queries 2021-04-17 19:30:09 +03:00
Rihards Krišlauks
12fb9a95cb Clean up leftower debug comments 2021-04-17 18:52:44 +03:00
Rihards Krišlauks
55e79e34af Verified that the change in datetime range test was correct
The value that was previously there was 3 and it made the test fail when i
enabled it. Verified that it, indeed, should have been 2 instead (the testing
code previously contained an error).
2021-04-17 18:16:52 +03:00
Rihards Krišlauks
1649f31258 Make time zone parsing more strict to match rfc3339 2021-04-17 17:57:46 +03:00
Rihards Krišlauks
7849736d80 Move all of the datetime parsing code into a single function
For readability
2021-04-17 17:23:47 +03:00
Pascal Seitz
b7159dd48e forward original error 2021-04-16 17:20:25 +02:00
Pascal Seitz
38992251c5 update appveyor to include new ff 2021-04-16 15:49:24 +02:00
Pascal Seitz
a00049b879 add lz4 block format compressor as default docstore compressor
add lz4 block compressor using lz4_flex, add lz4-block-compression feature flag
add snappy-compression feature flag for snap compressor, make snap crate optional
set lz4-block-compression as default feature flag
2021-04-16 15:24:35 +02:00
Paul Masurel
ba4bc6d7c3 Cargo check 2021-04-15 09:59:35 +09:00
Paul Masurel
868f4fd174 Removing TermMerger::next().
Closing #933
2021-04-14 12:06:04 +09:00
Rihards Krišlauks
e58401be78 Implement date range support in the query parser
Tests pass but needs cleanup
2021-04-13 23:32:22 +03:00
Paul Masurel
5c1ce5b0e1 Edited CHANGELOG 2021-04-12 12:02:25 +09:00
Paul Masurel
9af3aa0de0 Merge pull request #1007 from hardikpnsp/facet-topk-lexicographic-tie-break
Facet topk lexicographic tie break
2021-04-12 11:50:16 +09:00
Hardik Prajapati
71309c5528 Simplified chain orderings 2021-04-08 07:41:35 +05:30
Hardik Prajapati
54decc60bb Fixed formatting using cargo fmt 2021-04-07 23:59:04 +05:30
Hardik Prajapati
50eea4376b Implementation of Ord trait changed for Hit
- This will result in lexicographical ordering of facet in BinaryHeep in case of a tie
2021-04-07 23:14:38 +05:30
Hardik Prajapati
443aa17329 AAdded failing test for tie scenario in topk 2021-04-07 23:10:48 +05:30
Paul Masurel
be1d9e0db7 Marks list_all_segment_metas() as crate private
Closes #1004
2021-04-07 23:39:28 +09:00
Paul Masurel
5743b46457 Merge pull request #1006 from tantivy-search/feat-merge-splits
Implements merging several index into a brand new index.

Closes #1005
2021-04-07 23:38:14 +09:00
Paul Masurel
e67e5ebd46 Minor syntax changes, and passing a tantivy Directory as argument
Closes #1005.
2021-04-07 23:35:03 +09:00
Evance Souamoro
a550c85369 fixed issues & added test on merge_segements featt 2021-04-06 16:15:09 +00:00
Evance Souamoro
b185df2b22 added a scratched of implementation but still need to craft one detail and write test to validate 2021-04-06 11:48:51 +00:00
Evance Souamoro
f82922b354 added a scratched of implementation but still need to craft one detail and write test to validate 2021-04-06 11:46:17 +00:00
Paul Masurel
86b30d9d7f Cargo fmt 2021-03-31 12:20:31 +09:00
Paul Masurel
f1499d5b3e Cargo fmt 2021-03-31 11:44:03 +09:00
Paul Masurel
30b6828d71 Update actions.md 2021-03-31 10:36:13 +09:00
Paul Masurel
e6b7b7da0a Create actions.md 2021-03-31 10:34:33 +09:00
Paul Masurel
38a20ae269 Renamed SegmentLocalId to SegmentOrdinal for more homogeneity and edited
changelog
2021-03-29 09:25:42 +09:00
Stéphane Campinas
a0ec6e1e9d Expand the DocAddress struct with named fields 2021-03-28 19:00:23 +02:00
Paul Masurel
114fbe2512 Removed redundant check
Closes #997
2021-03-25 14:17:06 +09:00
Paul Masurel
155729044b Merge pull request #996 from lpouget/facet-optional-storage-and-index
Make facet indexation and storage optional
2021-03-25 09:08:36 +09:00
Laurent Pouget
4b34231f28 Make facet indexation and storage optional
Added a FacetOptions for HierarchicalFacet which add indexed and stored flags to it.
Propagate change and update tests accordingly
Added a test to ensure that a not indexed flag was taken care of.
Added on Value implem the `path()` function to return the stored facet.
2021-03-24 14:56:27 +01:00
Paul Masurel
8e7fe068e9 Fixed Histogram collector comment. 2021-03-23 18:43:49 +09:00
Paul Masurel
4c384272dc Added debug to FileHandle 2021-03-23 00:10:46 +09:00
Paul Masurel
5de9961cf2 Cargo fmt 2021-03-22 10:38:48 +09:00
Paul Masurel
eab36b5c6a Using std::iter::once. 2021-03-21 16:55:05 +09:00
Paul Masurel
96e5de2eb9 Merge pull request #995 from bstrie/main
Replace deprecated collections::Bound with ops::Bound
2021-03-19 09:06:09 +09:00
bstrie
5f740d9ab4 Replace deprecated collections::Bound with ops::Bound 2021-03-18 17:20:36 -04:00
Paul Masurel
4f32126e35 Allow for non static predicate in the FilterCollector 2021-03-18 21:58:35 +09:00
Paul Masurel
d2d0873fdb Added support for Option<TCollector>. 2021-03-18 17:28:09 +09:00
Paul Masurel
761298ff00 Added an histogram collector.
Closes #994
2021-03-18 16:54:42 +09:00
Paul Masurel
52b1eb2c37 Clippy fix 2021-03-10 14:35:51 +09:00
Paul Masurel
2ab25d994f Updated Changelog. Closing #991 2021-03-10 14:14:21 +09:00
Paul Masurel
5fac119aa0 Merge pull request #992 from tantivy-search/issue/991
Replacing (start, end) by Range
2021-03-10 14:12:53 +09:00
Paul Masurel
31137beea6 Replacing (start, end) by Range 2021-03-10 14:06:21 +09:00
Paul Masurel
316d65d7c6 removed deprecated compare_and_swap 2021-03-09 10:30:02 +09:00
Paul Masurel
82d7553c63 Merge pull request #988 from lengyijun/patch-9
Update file_slice.rs
2021-03-08 16:17:32 +09:00
lyj
bc0eb813ff Update file_slice.rs
typo fix
2021-03-08 14:12:33 +08:00
Paul Masurel
a259023fd9 Merge pull request #985 from tantivy-search/dependabot/cargo/proptest-1.0
Update proptest requirement from 0.10 to 1.0
2021-02-23 08:48:28 +09:00
dependabot-preview[bot]
25105448e8 Update proptest requirement from 0.10 to 1.0
Updates the requirements on [proptest](https://github.com/altsysrq/proptest) to permit the latest version.
- [Release notes](https://github.com/altsysrq/proptest/releases)
- [Changelog](https://github.com/AltSysrq/proptest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/altsysrq/proptest/compare/0.10.0...1.0.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2021-02-22 20:13:03 +00:00
Paul Masurel
fe3faf5b3f Cargo fmt 2021-02-22 14:29:03 +09:00
Paul Masurel
f19dd896cf Re-added u64_lenient as a public API 2021-02-22 14:07:48 +09:00
Paul Masurel
9fe26c4fdd Added 'static to FastValue. 2021-02-22 11:02:04 +09:00
Paul Masurel
a369a72cae Cargo fmt 2021-02-09 15:00:14 +09:00
Paul Masurel
a707967453 Merge pull request #984 from vishalsodani/main
Fixed spelling
2021-02-09 09:08:13 +09:00
Vishal Sodani
b2f2097239 Fixed spelling 2021-02-08 20:29:10 +05:30
Vishal Sodani
6ae96038c2 Fixed spelling 2021-02-08 20:18:45 +05:30
Paul Masurel
2c6a0d0a19 Merge pull request #983 from vishalsodani/main
Fixed grammar
2021-02-08 23:36:18 +09:00
Vishal Sodani
4bcdca8545 Fixed spelling 2021-02-08 19:51:36 +05:30
Vishal Sodani
67f8e91395 Fixed grammar 2021-02-08 18:26:24 +05:30
Paul Masurel
b209763a55 Added ARCHITECTURE.md 2021-02-08 16:40:20 +09:00
Paul Masurel
5ef96795dc Added minor comment on DocId 2021-02-08 16:14:05 +09:00
Paul Masurel
784717749f Removing unused imports. 2021-02-05 23:04:17 +09:00
Paul Masurel
945bcc5bd3 Bump tantivy-grammar version 2021-02-05 22:58:21 +09:00
Paul Masurel
51aa9c319e Bumped version to 0.14 2021-02-05 22:55:26 +09:00
Paul Masurel
74d8d2946b Merge pull request #980 from lengyijun/patch-8
Update segment_postings.rs
2021-02-05 22:52:29 +09:00
lyj
0a160cc16e Update segment_postings.rs 2021-02-05 21:32:25 +08:00
Paul Masurel
f099f97daa Merge pull request #979 from slckl/main
FacetCounts are now pub use in tantivy::collector (Closes #978)
2021-02-05 17:05:20 +09:00
alif
769e9ba14d added simple docs to FacetCounts now-public API 2021-02-05 09:18:20 +02:00
alif
a482c0e966 pub use FacetCounts in tantivy::collector module 2021-02-05 09:00:48 +02:00
Paul Masurel
86d92a72e7 Renaming MultiValueIntFastField* to MultiValuedIntFastField* 2021-01-21 22:47:00 +09:00
Paul Masurel
ef618a5999 Made fast field reader clonable. 2021-01-21 22:15:24 +09:00
Paul Masurel
94d3d7a89a Rename FastFieldReaders::load_all 2021-01-21 18:38:48 +09:00
Paul Masurel
aa9e79f957 Clippy warnings. 2021-01-21 18:23:20 +09:00
Paul Masurel
84a2f534db Merge pull request #976 from tantivy-search/issue/fastfield_no_load
Fast field are not loaded on the opening of a segment.
2021-01-21 18:14:55 +09:00
Paul Masurel
1b4be24dca Fast field are not loaded on the opening of a segment.
They are instead loaded lazily when they are request.
2021-01-21 18:13:08 +09:00
Paul Masurel
824ccc37ae Merge pull request #975 from jamescorbett/patch-1
Change from serde::export to std::marker
2021-01-12 10:04:23 +09:00
Paul Masurel
5231651020 Closes #974 2021-01-12 10:03:37 +09:00
James Corbett
fa2c6f80c7 Change from serde::export to std::marker
For some reason under a docker build I get a build error under docker only saying that `serde::export` is private. This fixes it for me.

```
error[E0603]: module `export` is private
   --> /usr/local/cargo/registry/src/github.com-1ecc6299db9ec823/tantivy-0.13.2/src/collector/top_collector.rs:5:12
    |
5   | use serde::export::PhantomData;
    |            ^^^^^^ private module
    |
note: the module `export` is defined here
   --> /usr/local/cargo/registry/src/github.com-1ecc6299db9ec823/serde-1.0.119/src/lib.rs:275:5
    |
275 | use self::__private as export;
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^
```
2021-01-12 00:25:54 +00:00
Paul Masurel
43c7b3bfec Bugfix in the RAMDirectory.
There was a state where the meta.json was empty.
2021-01-11 14:11:42 +09:00
Paul Masurel
b17a10546a Minor change in unit test. 2021-01-11 11:33:59 +09:00
Paul Masurel
bf6e6e8a7c Merge pull request #972 from tantivy-search/issue/969
Issue/969
2021-01-07 22:49:31 +09:00
Paul Masurel
203b0256a3 Minor renaming 2021-01-07 22:47:57 +09:00
Paul Masurel
caf2a38b7e Closes #969.
The segment stacking optimization is not updating "first_doc_in_block".
2021-01-07 22:43:56 +09:00
Paul Masurel
96f24b078e Added failing unit test. 2021-01-07 22:43:28 +09:00
Paul Masurel
332b50a4eb Merge pull request #970 from tantivy-search/functional-test-store
Added a functional long running test to test store merging.
2021-01-07 14:27:08 +09:00
Paul Masurel
8ca0954b3b Added a functional long running test to test store merging. 2021-01-07 14:07:15 +09:00
Paul Masurel
36343e2de8 Merge pull request #968 from tantivy-search/add-bench-analyzer
added a simple bench for the default analyzer
2021-01-06 21:33:39 +09:00
Paul Masurel
2f14a892ca added a simple bench for the default analyzer 2021-01-06 19:11:26 +09:00
Paul Masurel
9c3cabce40 Updated version of the rand crate. 2021-01-06 18:09:00 +09:00
Paul Masurel
f8d71c2b10 Merge pull request #964 from mosuka/deserializable
Make NamedFieldDocument deserializable
2021-01-06 17:43:53 +09:00
Paul Masurel
394dfb24f1 Merge pull request #965 from lewisdiamond/patch-1
Fix spelling
2021-01-06 13:38:31 +09:00
Lewis Diamond
b0549a229d Fix spelling 2021-01-05 22:34:56 -05:00
Minoru Osuka
670b6eaff6 Make NamedFieldDocument deserializable 2020-12-21 16:51:31 +09:00
Paul Masurel
a4f33d3823 Added comment to f64 conversion to u64.
- Added proptest
- Added comment to Lemire blog post.
2020-12-15 13:40:31 +09:00
Paul Masurel
c7841e3da5 Merge pull request #953 from barrotsteindev/filter-collector-tpredicatevalue
Generic filter collector
2020-12-14 10:35:46 +09:00
barrotsteindev
e7b4a12bba cargo fmt 2020-12-10 14:10:55 +02:00
barrotsteindev
0aaa929d6e Merge branch 'main' into filter-collector-tpredicatevalue 2020-12-10 11:27:19 +02:00
barrotsteindev
1112797c18 added a line to CHANGELOG.md 2020-12-10 11:25:08 +02:00
barrotsteindev
920481e1c1 change unit test 2020-12-10 11:24:53 +02:00
Paul Masurel
55f7b84966 Merge pull request #952 from tantivy-search/bm25-on-onebyte
Encode blockwand on a single byte.
2020-12-10 18:09:31 +09:00
Paul Masurel
09ab4df1fe Encode blockwand on a single byte. 2020-12-10 18:08:52 +09:00
barrotsteindev
0c2cf81b37 cargo fmt 2020-12-10 11:08:35 +02:00
barrotsteindev
d864430bda final edits 2020-12-10 11:08:15 +02:00
Paul Masurel
de60540e06 fixing compilation 2020-12-10 10:36:21 +02:00
Paul Masurel
c3e311e6b8 Removed 'static in compression_lz4. 2020-12-09 15:30:52 +09:00
barrotsteindev
ac704f2f22 WIP generic filter collector 2020-12-08 14:36:52 +02:00
Paul Masurel
be626083a0 Reorganized and added termdict unit tests. 2020-12-07 12:50:36 +09:00
Paul Masurel
b68fcca1e0 Minor changes
- Open{Write,Read}Error::wrap_io_error made public
- Arc<PathBuf> -> Arc<Path> in file_watcher.
2020-12-03 23:31:50 +09:00
Paul Masurel
af6dfa1856 Small refactoring 2020-12-03 14:27:05 +09:00
Paul Masurel
654c400a0b TermDictionary.finish does not flush 2020-12-03 13:36:25 +09:00
Paul Masurel
80a99539ce Several TermDict operation now returns an io::Result 2020-12-03 13:13:11 +09:00
Paul Masurel
4b1c770e5e Simplified counting writer and removed flush 2020-12-03 11:24:39 +09:00
Paul Masurel
3491645e69 Moved the term merger 2020-12-03 10:24:04 +09:00
Paul Masurel
e72c8287f8 Merge pull request #951 from tantivy-search/fst-isolated
Fst isolated
2020-12-03 10:11:39 +09:00
Paul Masurel
b4b3bc7acd Cargo fmt 2020-12-03 10:08:38 +09:00
Paul Masurel
521c7b271b Isolated fst impl of termdictionary in a specific module. 2020-12-02 21:18:33 +09:00
Paul Masurel
acd888c999 Merge pull request #950 from tantivy-search/guilload--fix-clippy-warning
Fix clippy warning
2020-12-02 08:09:31 +09:00
Adrien Guillo
3ab1ba0b2f Fix clippy warning 2020-12-01 12:07:53 -08:00
Paul Masurel
b344c0ac05 Merge pull request #949 from tantivy-search/docset_is_send
DocSet is send
2020-12-01 19:12:51 +09:00
Paul Masurel
1741619c7f DocSet is send 2020-12-01 19:11:21 +09:00
Paul Masurel
067ba3dff0 Merge pull request #946 from tantivy-search/issue/test-bugfix-atomicwrite
Attempt to fix bug surfacing sometimes in test.
2020-12-01 15:29:51 +09:00
Paul Masurel
f79250f665 Fix perf regression in the benchmark for the Count collector.
In order to reduce IO, we introduced a way to instanciate a dummy
constant FieldnormReader which worked by allocating a buffer with
as many bytes as there are docs in the segments.

This allocation is not a negligible by any mean.

This PR works by offering two implementation for the
FieldnormReader.
The const field norm reader simply returns the same value all of the
time, while the array based one does the same as the current one.
2020-12-01 08:51:32 +09:00
Paul Masurel
5a33b8d533 Merge pull request #942 from barrotsteindev/filter-collector
added initial implementation for filter_collector
2020-11-30 11:26:28 +09:00
Paul Masurel
d165655fb1 Added specialized implementation for count/count_including... in &mut DocSet 2020-11-30 11:24:13 +09:00
barrotsteindev
c805871b92 better test 2020-11-25 14:25:49 +02:00
barrotsteindev
f288e32634 rebaes on origin/main 2020-11-25 14:08:43 +02:00
barrotsteindev
bc44543d8f added TPredicate generic param and updated tests 2020-11-25 14:08:24 +02:00
Paul Masurel
db514208a7 Removed the SegmentCollector type from the Generics of the
FilterCollector
2020-11-25 14:08:24 +02:00
barrotsteindev
b6ff29e020 simplified FilterCollector#for_segment 2020-11-25 14:08:24 +02:00
barrotsteindev
7c94dfdc15 fmt 2020-11-25 14:08:24 +02:00
barrotsteindev
8782c0eada updated docs 2020-11-25 14:08:24 +02:00
barrotsteindev
fea0ba1042 removed unnecessary static liftimes 2020-11-25 14:08:24 +02:00
barrotsteindev
027555c75f added initial implementation for filter_collector 2020-11-25 14:08:24 +02:00
Paul Masurel
b478ed747a Attempt to fix bug surfacing sometimes in test.
Recently, `test_index_manual_policy_mmap` has been failing on Windows.

The idea addressed by this patch is that we forget to sync the parent
directory with the current implementation of atomic writes.
This was done correctly when we were relying the atomicwrites crate.

*crossing fingers*
2020-11-25 18:00:05 +09:00
Paul Masurel
e9aa27dace Avoid computing the BM25 weight if scoring is disabled 2020-11-25 14:35:49 +09:00
Paul Masurel
c079133f3a Merge pull request #945 from tantivy-search/guilload--replace-arc-box-with-arc
Replace some `Arc<Box<dyn...` with `Arc<dyn...`
2020-11-25 13:57:22 +09:00
Paul Masurel
30c5f7c5f0 Applied CR comments 2020-11-25 13:56:05 +09:00
Adrien Guillo
6f26871c0f Replace some Arc<Box<dyn... with Arc<dyn... 2020-11-24 19:54:53 -08:00
Paul Masurel
f93cc5b5e3 Merge pull request #944 from tantivy-search/no-file-len-problem
No filelen problem.
2020-11-25 11:54:44 +09:00
Paul Masurel
5a25c8dfd3 No filelen problem. 2020-11-25 11:51:58 +09:00
Paul Masurel
f5c079159d Merge pull request #943 from tantivy-search/guilload--ownedbytes-helper-methods
Add helper methods for reading u8 and u64 to `OwnedBytes`
2020-11-25 09:04:40 +09:00
Adrien Guillo
1cfdce3437 Add helper methods for reading u8 and u64 to OwnedBytes 2020-11-23 10:45:46 -08:00
Paul Masurel
e9e6d141e9 Merge pull request #941 from tantivy-search/revert-940-guilload--move-list-files-to-index
Revert "Move `SegmentUpdater::list_files()` to `Index`"
2020-11-20 13:54:05 +09:00
Paul Masurel
8d0e049261 Revert "Move SegmentUpdater::list_files() to Index" 2020-11-20 13:53:50 +09:00
Paul Masurel
0335c7353d Merge pull request #940 from tantivy-search/guilload--move-list-files-to-index
Move `SegmentUpdater::list_files()` to `Index`
2020-11-18 11:08:20 +09:00
Adrien Guillo
267e920a80 Move SegmentUpdater::list_files() to Index
... and make the method public
2020-11-17 17:54:18 -08:00
Paul Masurel
d8a3a47e3e Refactoring of the skip index.
Merge pull request #927 from tantivy-search/compact-store-index
    
The skip index now identifies both the start and the end offset of blocks. Checkpoints are compressed in blocks, reaching better compression.
2020-11-17 16:13:45 +09:00
Paul Masurel
7f0e61b173 Refactoring of the skip index.
The skip index now identifies both the start and the end offset
of blocks. Checkpoints are compressed in blocks, reaching better
compression.
2020-11-17 16:05:11 +09:00
Paul Masurel
ce4c50446b Merge pull request #937 from tantivy-search/guilload--cache-store-reader-blocks
Cache store reader blocks in an LRU fashion
2020-11-17 13:45:10 +09:00
Adrien Guillo
9ab25d2575 Cache store reader blocks in an LRU fashion 2020-11-16 19:09:10 -08:00
Paul Masurel
6d4b982417 Marked blockwand test as ignored.
- Using impl trait for iterating `matching_segments` in the termdict
merger
2020-11-16 13:44:14 +09:00
Paul Masurel
650eca271f Merge pull request #932 from tantivy-search/fix-unit-test-file-watcher
Fixing unit test.
2020-11-13 11:47:15 +09:00
Paul Masurel
8ee55aef6d Fixing unit test. 2020-11-13 09:01:45 +09:00
Paul Masurel
40d41c7dcb Merge pull request #929 from tantivy-search/api-public-term-merger
Make field TermMerger API public
2020-11-12 14:11:53 +09:00
Paul Masurel
c780a889a7 Merge pull request #931 from tantivy-search/issue/930
Closes #930 Minor bug.
2020-11-12 13:22:34 +09:00
Paul Masurel
eef348004e Closes #930 Minor bug.
Watch callback could be callback if the last watch handle was dropped
shortly before meta.json is called.
2020-11-11 15:51:23 +09:00
Paul Masurel
e784bbc40f Update src/core/searcher.rs
Co-authored-by: Adrien Guillo <adrien.guillo@gmail.com>
2020-11-11 12:37:52 +09:00
Paul Masurel
b8118d439f Make field TermMerger API public 2020-11-11 11:59:09 +09:00
Paul Masurel
a49e59053c Making block wand test more robusts 2020-11-10 18:01:38 +09:00
Paul Masurel
41bb2bd58b Merge pull request #926 from tantivy-search/guilload--directory-exists
Modified `Directory::exists` API to return `Result<bool, OpenReadError>`
2020-11-10 17:59:45 +09:00
Adrien Guillo
7fd6054145 Modified Directory::exists API to return Result<bool, OpenReadError> 2020-11-09 18:00:14 -08:00
Paul Masurel
6abf4e97b5 Merge pull request #925 from tantivy-search/postings-end-offset
Adding post stop offset to TermInfo.
2020-11-09 15:58:04 +09:00
Paul Masurel
d23aee76c9 Avoid loading fieldnorms when not necessary 2020-11-09 15:50:16 +09:00
Adrien Guillo
58a1595792 Updated CHANGELOG 2020-11-06 11:13:44 -08:00
Paul Masurel
726d32eac5 Merge pull request #924 from tantivy-search/guilload--implement-poll-watcher
Implement FileWatcher
2020-11-06 22:41:26 +09:00
Paul Masurel
b5f3dcdc8b TermInfo contain the end_offset of the postings.
We slice the ReadOnlySource tightly.
2020-11-06 15:18:51 +09:00
Adrien Guillo
2875deb4b1 Implement FileWatcher 2020-11-05 20:08:15 -08:00
Paul Masurel
b2dfacdc70 Fixed bench and removed unnecessary public symbols. 2020-11-05 16:12:03 +09:00
Paul Masurel
36a0520a48 Added failing proptest and fixed it. 2020-11-05 15:40:00 +09:00
Paul Masurel
6b5a5ac1d0 Merge pull request #923 from tantivy-search/refact-param-serialize
Minor refactoring postings serializers options.
2020-11-03 15:49:34 +09:00
Paul Masurel
581c2bb718 Minor refactoring postings serializers options. 2020-11-03 15:47:25 +09:00
Paul Masurel
3d192c0f57 Merge pull request #921 from tantivy-search/more-pub-for-hot-directory
Exposing API for the hot directory
2020-10-29 13:04:37 +09:00
Paul Masurel
9dc36f4431 Exposing API for the hot directory 2020-10-29 13:04:13 +09:00
Paul Masurel
730ccefffb Fixes a bug in TermQuery::explain.
Closes #915
2020-10-28 22:29:15 +09:00
Paul Masurel
2c56f4b583 Updated CHANGELOG 2020-10-28 17:39:01 +09:00
Paul Masurel
9e27da8b4e Added CR comments.
Added Unit tests.
2020-10-28 17:35:34 +09:00
Adrien Guillo
7f373f232a Add helper methods for BooleanQuery 2020-10-28 17:35:34 +09:00
Stephen Becker IV
6f0487979c Removing Inoperable 'Say Thanks' Links (#919)
Dearest Maintainer,

The say thanks project moved to email https://github.com/BlitzKraft/saythanks.io/issues/60.  I removed the links. You might want to use your email but at that point people could just email you thanks?

Anyway, Thanks for the hard work on the project. I am enjoying it.

Dictated but not reviewed,
Becker
2020-10-28 15:08:47 +09:00
Pasha Podolsky
71c66a5405 [tantivy] Run clippy linter (#914) 2020-10-27 14:36:02 +09:00
Paul Masurel
2eb5326aa4 Fixing compilation 2020-10-27 14:00:14 +09:00
Paul Masurel
91e92fa8a3 Made public. 2020-10-20 14:59:41 +09:00
Paul Masurel
9cc1661ce2 Updating crossbeam (#909) 2020-10-13 10:55:50 +09:00
Paul Masurel
c3f44d38f3 Moving HasLen (#910) 2020-10-13 10:19:30 +09:00
Paul Masurel
01b4aa9adc Refactoring dir (#905) 2020-10-11 22:22:56 +09:00
Paul Masurel
7a78b1cba3 Fix unit test on windows 2020-10-09 14:57:39 +09:00
Paul Masurel
4d011cc648 Updated changelog 2020-10-09 14:54:07 +09:00
Pasha Podolsky
80cbe889ba [tantivy] Add brotli codec for row storage (#885)
* [tantivy] Add brotli codec for row storage

* [tantivy] Fix not actual comments for code

* [CR] Fixes for comment and cursor
2020-10-09 14:51:42 +09:00
Paul Masurel
c23a03ad81 Large API Change in the Directory API. (#901)
Tantivy used to assume that all files could be somehow memory mapped. After this change, Directory return a `FileSlice` that can be reduced and eventually read into an `OwnedBytes` object. Long and blocking io operation are still required by they do not span over the entire file.
2020-10-08 16:36:51 +09:00
Paul Masurel
579e3d1ed8 Removed dev-deps to serde_yaml 2020-10-06 10:04:06 +09:00
Pasha Podolsky
687a36a49c [tantivy] Fix for schema deserialization error (#902)
Co-authored-by: Pasha <pasha@izihawa.net>
2020-10-05 11:24:48 +09:00
Paul Masurel
ad82b455a3 Minor change 2020-10-01 20:45:07 +09:00
Paul Masurel
848afa43ee Merge branch 'issue/896' into main 2020-10-01 20:43:42 +09:00
Paul Masurel
7720d21265 Closes #896 - Facet reader related
Bugfix. Acquiring a facet reader on a segment that does not contain any
doc with this facet returns `None`.
2020-10-01 20:25:28 +09:00
Paul Masurel
96f946d4c3 Raultang master (#879)
* add support for indexed bytes fast field

* remove backup code file

* refine test cases

* Simplified unit test. Renamed it as it is testing the storable part. Not the indexed part.

* Small refactoring and added unit test. If multivalued we only retain the first FAST value.

Co-authored-by: Raul <raul.tang.lc@gmail.com>
2020-10-01 18:03:18 +09:00
dependabot-preview[bot]
3432149759 Update base64 requirement from 0.12 to 0.13 (#895)
Updates the requirements on [base64](https://github.com/marshallpierce/rust-base64) to permit the latest version.
- [Release notes](https://github.com/marshallpierce/rust-base64/releases)
- [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md)
- [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.12.0...v0.13.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>

Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
2020-10-01 11:37:36 +09:00
Paul Masurel
392221e36a Removing dead file 2020-10-01 11:36:55 +09:00
Paul Masurel
674cae8ee2 Issue/822 TopDocs sorted by i64, and date fastfield (in addition to u64) (#890)
* Unsatisfactory implementation.

The fastfield are hit. But for performance, we want the comparison to happen on u64,
and the conversion to the FastType to be done only on the selected TopK
elements.

For i64, the current approach might be ok.
For DateTime, it is most likely catastrophic.

Closes #822

* Decoupled SegmentCollector Fruit from Collector Fruit.

Deferred conversion from u64 to the proper FastField type to after the overall collection.
(tantivy guarantees that u64 encoding is consistent with the original
ordering of the fastfield)

Closes #882
2020-09-30 17:51:11 +09:00
Paul Masurel
838c476733 Hirevo move to thiserror (#889)
* Migrated from `failure` to `thiserror`

* Refactoring

Co-authored-by: Nicolas Polomack <nicolas@polomack.eu>
2020-09-30 16:34:10 +09:00
Paul Masurel
5f574348d1 Syntactic change. 2020-09-26 21:33:00 +09:00
Paul Masurel
19a02b2c30 Merge tag '0.13.1'
0.13.1 was published as a hotfix to accomodate tantivy-py.
2020-09-19 21:20:27 +09:00
Paul Masurel
c339b05789 Bumped version and edited changelog 2020-09-19 21:13:19 +09:00
Paul Masurel
2d3c657f9d Added Send Sync to collectors. 2020-09-19 21:04:44 +09:00
Paul Masurel
07f9b828ae Added Send and Sync to the Query trait. 2020-09-19 21:04:29 +09:00
Paul Masurel
70bae7ce4c Removing Term Vec allocation (#881) 2020-09-08 23:11:00 +09:00
Paul Masurel
ac2a7273e6 Re-added comment to Score. 2020-09-08 21:41:34 +09:00
Paul Masurel
4ce9517a82 fix unit test for bench. remove scoref64 feature. fixed test for lz4 feature. 2020-09-08 07:35:00 +09:00
Paul Masurel
73024a8af3 Fixing compilation of bench and doctests. 2020-09-08 07:18:43 +09:00
Paul Masurel
e70e605fc3 fix unit test (at least on linux) 2020-09-07 23:35:04 +09:00
Paul Masurel
439d6956a9 Returning Result in some of the API (#880)
* Returning Result in some of the API

* Introducing `.writer_for_test(..)`
2020-09-07 15:52:34 +09:00
Paul Masurel
6530bf0eae Make field types less strict when populating documents. 2020-09-06 10:24:03 +09:00
Paul Masurel
151498cbe7 Creating the tempfile for atomicwrites in the same directory as the MmapDirectory. (#878) 2020-09-05 23:06:29 +09:00
Paul Masurel
3a72b1cb98 Accept dash within field names. (#874)
Accept dash in field names and enforce field names constraint at the
creation of the schema.

Closes #796
2020-09-01 13:38:52 +09:00
Paul Masurel
2737822620 Fixing unit tests. (#868)
There was a unit test failing when notify was sending more
than one event on atomicwrites.

It was observed on MacOS CI.
2020-08-27 16:43:39 +09:00
b8591340
06c12ae221 Filter meta.json from validate_checksum (#872) 2020-08-27 07:54:37 +09:00
Paul Masurel
4e4400af7f Added cargo timing report to .gitignore 2020-08-23 16:15:28 +09:00
Paul Masurel
3f1ecf53ab Merge branch 'master' of github.com:tantivy-search/tantivy 2020-08-22 21:30:47 +09:00
Paul Masurel
0b583b8130 Plastic changes 2020-08-22 21:29:12 +09:00
Paul Masurel
31d18dca1c Removing dependency to atomicwrites (#866) 2020-08-21 21:37:05 +09:00
stephenlagree
5e06e7de5a Update basic_search.rs (#865)
Remove duplicated document entry.
2020-08-21 11:23:09 +09:00
Paul Masurel
8af53cbd36 Merge branch 'master' of github.com:tantivy-search/tantivy 2020-08-21 08:57:42 +09:00
Paul Masurel
4914076e8f Fixing release build 2020-08-21 08:57:27 +09:00
Paul Masurel
e04f47e922 Using block wand for term queries too. 2020-08-20 15:51:21 +09:00
Paul Masurel
f355695581 Code clean up 2020-08-20 15:42:50 +09:00
Paul Masurel
cbacdf0de8 Edited README. 2020-08-20 14:28:24 +09:00
Paul Masurel
3dd0322f4c Bumped version 2020-08-19 22:41:48 +09:00
Paul Masurel
2481c87be8 Block wand (#856) 2020-08-19 22:36:36 +09:00
Paul Masurel
b6a664b5f8 cargo fmt 2020-08-16 12:40:50 +09:00
lyj
25b666a7c9 Update occur.rs (#862) 2020-08-16 10:49:55 +09:00
Paul Masurel
9b41912e66 Bugfix (#861) 2020-08-12 16:06:24 +09:00
Paul Masurel
8e74bb98b5 Added field norm readers (#854) 2020-07-20 13:05:05 +09:00
Paul Masurel
6db8bb49d6 Assert nearly equals macro (#853)
* Assert nearly equals macro

* Renamed specialized_scorer in TermScorer
2020-07-17 16:40:41 +09:00
lyj
410aed0176 Update segment_updater.rs (#848) 2020-07-16 12:33:11 +09:00
aptend
00a239a712 fix typo in index_meta.rs (#851) 2020-07-16 12:32:45 +09:00
Paul Masurel
68fe406924 Removed asserts (#850) 2020-07-16 12:24:55 +09:00
Paul Masurel
f71b04acb0 Bugfix. (#849)
go_to_first_doc was typically calling seek with a target smaller than
doc.

Since SegmentPostings typically do a linear search on the full block,
regardless of the current position, it could have our segment postings
go backward.
2020-07-16 10:57:51 +09:00
lyj
1ab7f660a4 Update index.rs (#846) 2020-07-02 15:11:38 +09:00
Sean Stangl
0ebbc4cb5a Fix incorrect SimpleTokenizer link in documentation (#844) 2020-07-01 10:26:36 +09:00
lyj
5300cb5da0 Update mod.rs (#845) 2020-07-01 10:25:26 +09:00
Ype Kingma
7d773abc92 Boolean query: do not combine excluded scores. (#840)
* Do nothing when combining score values of excluded scores.

* Add test case for two excluded.

* Test score for two excluded terms.

* Use TopDocs in test_boolean_query_two_excluded
2020-06-08 20:01:19 +09:00
Paul Masurel
c34541ccce Alive doc iterator. (#837) 2020-06-05 19:42:51 +09:00
Paul Masurel
1cc5bd706c Fixes build for no-default-features (#839) 2020-06-05 19:41:55 +09:00
Paul Masurel
4026d183bc Small readability change 2020-06-03 09:04:57 +09:00
Paul Masurel
c0f5645cd9 Move for_each functions from Scorer to Weight. (#836)
* Move for_each functions from Scorer to Weight.

* Specialized foreach / foreach_pruning for union of termscorer.
2020-06-01 11:31:18 +09:00
Paul Masurel
cbff874e43 Change the loading of blocks. 2020-05-27 16:36:50 +09:00
Paul Masurel
baf015fc57 Simplification of the segment postings seek implementation. (#834) 2020-05-27 08:49:47 +09:00
Paul Masurel
7275ebdf3c Skiprefactoring skipabsolute (#831)
Simplification of the way we handle positions.
2020-05-25 09:51:23 +09:00
Paul Masurel
b974e7ce34 Closes #828. (#829)
There was a bug in the LogMergePolicy that was surfacing when there were
segments, but all of the segments were larger than the max limit.

After filtering, the list of segments candidate for merge was 0, and
the code was indexing the first element of an empty Vec.
2020-05-22 16:24:07 +09:00
Paul Masurel
8f8f34499f Updated CHANGELOG with the TopCollector offset information and cargo fmt. 2020-05-20 22:26:54 +09:00
Rob Young
6ea6f4bfcd Add offset to TopDocsCollector (#826)
* Add offset to TopDocsCollector

Add an offset to TopDocsCollector and TopDocs to make it clearer how to
handle pagination.

Closes #822

* Address review comments

- Make Debug formatting of TopDocs clearer.
- Add unit tests for limit and offset on TopCollector.
- Change API for using offset to a fluent interface.
- Add some context to the docstring to clarify what limit and offset are
  equivalent to in other projects.

* Changes required by rebase on e25284

- Pass Collector into TweakedScoreTopCollector and
  CustomScoreTopCollector.
- Add std:: qualifier to f32, i32 etc. Not sure why this was not failing
  already.
- Add unit tests for TopDocs with offset including for tweaked and
  custom score collectors.

In order to convert a TopCollector<Score> to a TopCollector<TScore> I
had to add a `into_tscore` method to `TopCollector`. This is a hack but
I don't know how to avoid it.
2020-05-20 22:25:24 +09:00
Paul Masurel
e25284bafe Major change in the DocSet/Scorer API (#824)
- Change in the DocSet and Scorer API. (@fulmicoton). 
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
As a result, iterating through DocSet now looks as follows
```rust
let mut doc = docset.doc();
while doc != TERMINATED {
   // ...
   doc = docset.advance();
}
```
The change made it possible to greatly simplify a lot of the docset's code.
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
2020-05-16 16:33:36 +09:00
Fisher Darling
8b67877cd5 Made field methods const fns (#823) 2020-05-16 10:59:50 +09:00
Rob Young
9de1360538 Minor doc and test improvements around fuzzy querying (#825) 2020-05-16 10:59:24 +09:00
Paul Masurel
c55db83609 Closes #805 (#820)
Added TryInto implementation for IndexReaderBuilder
2020-04-27 12:01:17 +09:00
Paul Masurel
1e5ebdbf3c Format and remove useless import (#819) 2020-04-27 11:56:49 +09:00
Paul Masurel
9a2090ab21 Create the MMapDirectory does not return a Directory. (#818) 2020-04-27 11:42:20 +09:00
Paul Masurel
e4aaacdb86 Minor change in README.md 2020-04-21 21:30:34 +09:00
Paul Masurel
29acf1104d Update README's claim on performance. 2020-04-21 14:44:26 +09:00
Paul Masurel
3d34fa0b69 Fixed changelog 2020-04-19 15:55:54 +09:00
Rob Young
77f363987a Make TweakScore and CustomScore mutable at the segment level (#807)
* Make TweakScore and CustomScore mutable

Make TweakScore and CustomScore mutable at the segment level.

Addresses issue #806

* Add example to show tweak_score working for facets
2020-04-19 15:54:00 +09:00
Paul Masurel
c0be461191 Removing tantivy-fst conf and removing warning. (#813) 2020-04-18 20:19:23 +09:00
dependabot-preview[bot]
1fb562f44a Update fail requirement from 0.3 to 0.4 (#810)
Updates the requirements on [fail](https://github.com/tikv/fail-rs) to permit the latest version.
- [Release notes](https://github.com/tikv/fail-rs/releases)
- [Changelog](https://github.com/tikv/fail-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tikv/fail-rs/compare/v0.3.0...v0.4.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>

Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
2020-04-17 07:14:19 +09:00
Rob Young
c591d0e591 Switch fst dependency to git (#808)
Closes #803

This allows the package to be built without first cloning the
tantivy-search/fst repo into the expected place. This should fix CI.
2020-04-16 23:05:12 +09:00
Paul Masurel
186d7fc20e Fix build 2020-04-01 09:32:45 +09:00
Paul Masurel
cfbdef5186 Using tantivy-fst version 0.3. 2020-03-31 23:24:54 +09:00
Paul Masurel
d04368b1d4 Closes #788. OR not working when using conjunction by default. (#802) 2020-03-31 21:13:50 +09:00
Chen Xu
b167058028 Fix prefix option for FuzzyTermQuery (#797)
* Fix prefix option for FuzzyTermQuery

* Update changelog
2020-03-19 20:19:32 +09:00
Paul Masurel
262957717b unit test fix and use of matches 2020-03-15 00:20:17 +09:00
Paul Masurel
873a808321 Removed itertools (#792) 2020-03-11 18:41:04 +09:00
dependabot-preview[bot]
6fa8f9330e Update base64 requirement from 0.11.0 to 0.12.0 (#791)
Updates the requirements on [base64](https://github.com/marshallpierce/rust-base64) to permit the latest version.
- [Release notes](https://github.com/marshallpierce/rust-base64/releases)
- [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md)
- [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.11.0...v0.12.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>

Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
2020-03-11 17:51:22 +09:00
Paul Masurel
b3f0ef0878 Avoid writing a new delete file if there was no actual deletes. (#787)
When applying the delete operations in the delete queue, it is possible
that there was no new deleted document.

In this case, avoid creating a new delete file, and updating the delete
opstamp.
2020-03-08 13:04:21 +09:00
Paul Masurel
04304262ba cargo fmt 2020-03-08 09:58:42 +09:00
Paul Masurel
920ced364a Added a method to persist the RAMDirectory into a different directory. 2020-03-07 17:00:50 +09:00
Paul Masurel
e0499118e2 Minor refactoring 2020-03-07 15:56:03 +09:00
Paul Masurel
50b5efae46 Added derive feature to serde crate 2020-03-06 23:46:29 +09:00
Paul Masurel
486b8fa9c5 Removing serde-derive dependency (#786) 2020-03-06 23:33:58 +09:00
Minoru Osuka
b2baed9bdd Add Lindera to README.md (#785)
* Add Lindera to README.md

* Put lindera in first place
2020-03-03 20:23:59 +09:00
Paul Masurel
b591542c0b Removing err.description() before deprecation. 2020-03-03 09:58:49 +09:00
Paul Masurel
a83fa00ac4 Faster compilation of query-grammar. (#784) 2020-03-02 22:12:42 +09:00
Paul Masurel
7ff5c7c797 Removing the fst feature in the levenshtein_automata crate. 2020-03-02 21:47:05 +09:00
Paul Masurel
1748602691 ignore -> compile_fail 2020-03-02 09:59:48 +09:00
Paul Masurel
6542dd5337 Removing parenthesis. 2020-03-01 09:41:53 +09:00
Nicholas Connor
c64a44b9e1 Slight re-organization to increase contrast of "Getting Started" (#783) 2020-02-28 08:42:38 +09:00
Paul Masurel
fccc5b3bed Closes #758 2020-02-27 17:58:43 +09:00
Paul Masurel
98b9d5c6c4 Closes #780. Will be fixed on the next published release. 2020-02-21 09:41:52 +09:00
Paul Masurel
afd2c1a8ad Merge branch 'master' of github.com:tantivy-search/tantivy 2020-02-19 22:08:44 +09:00
Paul Masurel
81f35a3ceb Bumped tantivy-grammar version 2020-02-19 22:08:31 +09:00
Paul Masurel
7e2e765f4a Bumped tantivy-grammar version 2020-02-19 22:07:54 +09:00
Paul Masurel
7d6cfa58e1 [WIP] Alternative take on boosted queries (#772)
* Alternative take on boosted queries

* Fixing unit test

* Added boosting to the query grammar.

* Made BoostQuery public.

* Added support for boosting field in QueryParser

Closes #547
2020-02-19 11:04:38 +09:00
Paul Masurel
14735ce3aa Update snap version to 1. (#781) 2020-02-17 10:41:44 +09:00
Paul Masurel
72f7cc1569 Closes #777 (#779) 2020-02-17 09:53:38 +09:00
Paul Masurel
abef5c4e74 Updating combine to version 4 (#775) 2020-02-06 23:02:48 +09:00
Paul Masurel
ae14022bf0 Removed use::Result. (#771) 2020-01-31 18:47:02 +09:00
Alexander
55f5658d40 Make Executor public so Searcher::search_in_executor method now can be used (#769)
* Make Executor public so Searcher::search_in_executor method now can be used

* Fixed cargo fmt
2020-01-31 15:50:26 +09:00
Paul Masurel
3ae6363462 Updated CHANGELOG 2020-01-30 10:16:56 +09:00
Halvor Fladsrud Bø
9e20d7f8a5 Maximum size of segment to be considered for merge (#765)
* Replicated changes from dead PR

* Ran formatter.
2020-01-30 10:14:34 +09:00
Halvor Fladsrud Bø
ab13ffe377 Facet path string (#759)
* Added to_path_string

* Fixed logic. Found strange behavior with string comparisons.

* ran formatter

* Fixed test

* Fixed format

* Fixed comment
2020-01-30 10:11:29 +09:00
Paul Masurel
039138ed50 Added the empty dictionary item in the CHANGELOG 2020-01-30 10:10:34 +09:00
Paul Masurel
6227a0555a Added unit test for empty dictionaries. 2020-01-30 10:08:27 +09:00
Audun Halland
f85d0a522a Optimize TermDictionary::empty by precomputed data source (#767) 2020-01-30 10:04:58 +09:00
Halvor Fladsrud Bø
5795488ba7 Backward iteration for termdict range (#757)
* Added backwards iteration to termdict

* Ran formatter

* Updated fst dependency

* Updated dependency

* Changelog and version

* Fixed version

* Made it part of 12.0
2020-01-30 09:59:21 +09:00
Paul Masurel
c3045dfb5c Remove time dev-deps by relying on chrono::Duration reexport. 2020-01-29 23:25:03 +09:00
Paul Masurel
811fd0cb9e Dynamic analyzer (#755)
* Removed generics in tokenizers

* lowercaser

* Added TokenizerExt

* Introducing BoxedTokenizer

* Introducing BoxXXXXX helper struct

* Closes #762.

* Introducing a TextAnalyzer
2020-01-29 18:23:37 +09:00
dependabot-preview[bot]
f6847c46d7 Update tantivy-fst requirement from 0.1 to 0.2 (#750)
Updates the requirements on [tantivy-fst](https://github.com/tantivy-search/fst) to permit the latest version.
- [Release notes](https://github.com/tantivy-search/fst/releases)
- [Commits](https://github.com/tantivy-search/fst/compare/0.1.1...0.2.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-21 07:57:39 +09:00
Paul Masurel
92dac7af5c Return an error instead of panicking when sorting by a non fast field. (#748)
Closes #747
2020-01-08 13:41:02 +09:00
Paul Masurel
801905d77f Davide romanini arm atomic mutex (#746)
* Add atomic mutex implementation for ARM.

* Applied rustfmt.

* rustfmt

Co-authored-by: davide-romanini <davide.romanini@gmail.com>
2019-12-30 23:42:11 +09:00
Paul Horn
8f5ac86f30 Expose UserOperation as a public type. (#744)
In order to make `IndexWriter::run` callable from outside of the create,
the `UserOperation` type needs to be publicly available.
Since the `indexer` module is private, we just export the `UserOperation`
type directly.
2019-12-29 22:37:13 +09:00
Paul Masurel
d12a06b65b Tiny code simplification. 2019-12-26 09:33:17 +09:00
Minoru Osuka
749432f949 Make SchemaBuilder::add_field() public (#742)
* Make add_field() to public

* cargo format
2019-12-25 20:37:34 +09:00
Paul Masurel
c1400f25a7 Handle facet search in the QueryParser. (#741)
Closes #738
2019-12-25 17:43:33 +09:00
Paul Masurel
87120acf7c Bump version 2019-12-20 21:22:43 +09:00
Paul Masurel
401f74f7ae Implement fast field for DateTime. (#736) 2019-12-20 21:20:15 +09:00
Paul Masurel
03d31f6713 Update CHANGELOG 2019-12-19 10:07:43 +09:00
Paul Masurel
a57faf07f6 Added a constructor for WatchHandle (#734)
Closes #731
2019-12-19 10:06:02 +09:00
Paul Masurel
562ea9a839 Merge branch 'master' of github.com:tantivy-search/tantivy 2019-12-19 09:32:50 +09:00
Paul Masurel
cf92cc1ada Closes #732 (#733)
The future returned by `IndexWriter::merge` does not borrow `&mut self`
2019-12-18 23:25:22 +09:00
Paul Masurel
f6000aece7 Closes #732
The future returned by `IndexWriter::merge` does not borrow `&mut self`
2019-12-18 21:48:51 +09:00
Paul Masurel
2b3fe3a2b5 Bumped version for hotfix 2019-12-17 21:10:50 +09:00
Paul Masurel
0fde90faac Closes #729 (#730)
Bug related with merge and deletes...
2019-12-17 21:09:08 +09:00
Paul Masurel
5838644b03 Added README in tantivy-query-grammar 2019-12-16 08:41:21 +09:00
Paul Masurel
c0011edd05 Added version for tantivy-grammar before publish 2019-12-16 08:35:17 +09:00
petr-tik
431c187a60 Make error handling richer in Footer::is_compatible (#724)
* WIP implemented is_compatible

hide Footer::from_bytes from public consumption - only found Footer::extract
used outside the module

Add a new error type for IncompatibleIndex
add a prototypical call to footer.is_compatible() in ManagedDirectory::open_read
to make sure we error before reading it further

* Make error handling more ergonomic

Add an error subtype for OpenReadError and converters to TantivyError

* Remove an unnecessary assert

it's follower by the same check that Errors instead of panicking

* Correct the compatibility check logic

Leave a defensive versioned footer check to make sure we add new logic handling
when we add possible footer versions

Restricted VersionedFooter::from_bytes to be used inside the crate only

remove a half-baked test

* WIP.

* Return an error if index incompatible - closes #662

Enrich the error type with incompatibility

Change return type to Result<bool, TantivyError>, instead of bool

Add an Incompatibility enum that enriches the IncompatibleIndex error variant
with information, which then allows us to generate a developer-friendly hint how
to upgrade library version or switch feature flags for a different compression
algorithm

Updated changelog

Change the signature of is_compatible

Added documentation to the Incompatibility
Added a conditional test on a Footer with lz4 erroring
2019-12-14 09:14:33 +09:00
Caio Romão
392abec420 Make u64_lenient() handle f64 fast fields too (#726)
* Make u64_lenient() handle f64 fast fields too

Without this, we get a panic during merge since the merger will
get a `None` where it expects something.

Prior to this patch, you can reproduce the panic with:

    use tantivy::{
        self,
        schema::{SchemaBuilder, FAST},
        Document, Index, Result,
    };

    #[test]
    fn pass() -> Result<()> {
        let mut builder = SchemaBuilder::new();
        let field = builder.add_f64_field("f64", FAST);
        let index = Index::create_in_ram(builder.build());

        let mut writer = index.writer_with_num_threads(1, 50_000_000)?;

        for i in 0..1000 {
            let mut doc = Document::new();
            doc.add_f64(field, 0.42);
            writer.add_document(doc);

            if i % 5 == 0 {
                writer.commit()?;
            }
        }

        writer.commit()?;

        Ok(())
    }

* Add test to verify that f64 fields are merged

* Ensure multi-valued fast fields can be merged too
2019-12-13 23:41:22 +09:00
Paul Masurel
dfbe337fe2 Optimize deletes (#723)
Closes #710
2019-12-13 09:50:00 +09:00
Paul Masurel
b9896c4962 Cleanup 2019-12-10 23:01:07 +09:00
Paul Masurel
afa5715e56 Added unit test. 2019-12-10 22:49:32 +09:00
Paul Masurel
79474288d0 Some clippy minor fixes (#722) 2019-12-09 13:40:04 +09:00
Paul Masurel
daf64487b4 Fixing JSON se/deserialization of dates. (#721)
Closes #719
2019-12-09 13:31:35 +09:00
Ximo Guanter
00816f5529 Fix outdated reference in documentation (#720) 2019-12-08 18:10:50 +09:00
Paul Masurel
f73787e6e5 Merge branch 'master' of github.com:tantivy-search/tantivy 2019-12-06 10:06:09 +09:00
Paul Masurel
5cffa71467 Using census 0.4 2019-12-06 10:04:01 +09:00
Christian Hunstad
02af28b3b7 add norwegian stemmer (#717) 2019-11-27 21:08:59 +09:00
Paul Masurel
afe0134d0f Kkoziara remove tokens from doc store (#715)
* Prevent tokens from being stored in the document store.

Commit adds prepare_for_store method to Document, which changes all
PreTokenizedString values into String values. The method is called
before adding document to the document store to prevent tokens from
being saved there. Commit also adds small changes to comments in
pre_tokenized_text example.

* Avoid storing the pretokenized text.
2019-11-25 22:39:12 +09:00
Christian Hunstad
db9e81d0f9 Updated rust-stemmers version to 1.2 (#716)
* Updated rust-stemmers version to 1.2

* 1.2.0 -> 1.2
2019-11-25 22:38:48 +09:00
Paul Masurel
3821f57ecc Closes #712 (#714)
Fixing the memory leak in the DeleteQueue.
2019-11-25 15:57:29 +09:00
Paul Masurel
d379f98b22 Waiting for indexing threads when dropping IndexWriter 2019-11-23 15:00:27 +09:00
Paul Masurel
ef3eddf3da clippy first stab (#711) 2019-11-22 13:09:35 +09:00
Paul Masurel
08a2368845 Closes #708 (#709)
Fixes a race condition in the test.
2019-11-21 11:41:59 +09:00
Paul Masurel
1868fc1e2c Text fix 2019-11-20 23:00:39 +09:00
Paul Masurel
451a0252ab thread pool merge (#704) 2019-11-20 21:18:05 +09:00
Paul Masurel
42756c7474 Removing futures-cpupool and upgrading to futures-0.3 2019-11-15 18:35:31 +09:00
Paul Masurel
598b076240 Making some of the IndexWriter's method public. 2019-11-11 12:41:45 +09:00
Paul Masurel
f1f96fc417 Updating some doc. 2019-11-11 10:04:12 +09:00
Paul Masurel
9c941603f5 Petr tik n662 errror incompatible footer version (#696)
* code tidy-up

Replace `20` magic constant with COMMON_FOOTER_SIZE

Add a docstring showing how footer is serialised
Add a test for footer length checking

* Add more tests for VersionedFooter

successful and panicking .to_bytes() calls

* Minor changes in footer.rs
2019-11-10 14:40:06 +09:00
Paul Masurel
fb3d6fa332 Adding Value::From<PretokenizedText> (#697) 2019-11-10 14:39:44 +09:00
Paul Masurel
88fd7f091a SegmentUpdater.add_segment does not need to return true (#693) 2019-11-09 21:18:51 +09:00
Jacob Brown
6e4fdfd4bf replace scoped_pool (#685) 2019-11-07 10:26:08 +09:00
kkoziara
0519056bd8 Added handling of pre-tokenized text fields (#642). (#669)
* Added handling of pre-tokenized text fields (#642).

* * Updated changelog and examples concerning #642.
* Added tokenized_text method to Value implementation.
* Implemented From<TokenizedString> for TokenizedStream.

* * Removed tokenized flag from TextOptions and code reliance on the flag.
* Changed naming to use word "pre-tokenized" instead of "tokenized".
* Updated example code.
* Fixed comments.

* Minor code refactoring. Test improvements.
2019-11-07 10:10:56 +09:00
dependabot-preview[bot]
7305ad575e Update smallvec requirement from 0.6 to 1.0 (#686)
Updates the requirements on [smallvec](https://github.com/servo/rust-smallvec) to permit the latest version.
- [Release notes](https://github.com/servo/rust-smallvec/releases)
- [Commits](https://github.com/servo/rust-smallvec/compare/v0.6.0...v1.0.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-11-07 09:55:33 +09:00
Paul Masurel
79f64ac2f4 Create FUNDING.yml 2019-11-05 16:26:12 +09:00
Paul Masurel
67bce6cbf2 Fixing the construction of the DeleteBitset. (#683)
Closes #681
2019-11-04 15:39:11 +09:00
xiaoniu-578fa6bff964d005
e5316a4388 Reduce unnecessary clone. (#684) 2019-11-04 13:57:59 +09:00
Mathias Svensson
6a8a8557d2 Use slice::iter instead of into_iter to avoid future breakage (#679)
* Use `slice::iter` instead of `into_iter` to avoid future breakage

`an_array.into_iter()` currently just works because of the autoref
feature, which then calls `<[T] as IntoIterator>::into_iter`. But
in the future, arrays will implement `IntoIterator`, too. In order
to avoid problems in the future, the call is replaced by `iter()`
which is shorter and more explicit.

* cargo fmt
2019-10-31 20:59:50 +09:00
Alberto Piai
3a65dc84c8 TopDocs: ensure stable sorting on equal score (#675)
* TopDocs: ensure stable sorting on equal score

When selecting the top K documents by score, we need to ensure stable
sorting. Until now, for documents with the same score, we were relying
on the (arbitrary) order returned by the BinaryHeap used to implement
the collectors.

This patch fixes the problem by explicitly using the doc address when
harvesting the `TopSegmentCollector` and when merging the results in
`TopCollector::merge_fruits()`.

This is important (for example) to implement pagination correctly using
the TopDocs collector. If sorting isn't stable, documents that have the
same score might be ranked in different positions depending on the
specific K that was used, thus appearing in two different pages, or in
none at all.

Fixes gh-671

* TMP: alternative solution (see previous commit)

If we add the constrait that D is also PartialOrd in ComparableDoc<T,
D>, then we can move the comparison by doc address directly in the cmp
implementation of ComparableDoc.

* TMP rebase as first commit: add benchmarks for TopSegmentCollector

* fixup! TMP: alternative solution (see previous commit)

* TMP add changelog entry

* TMP run cargo fmt
2019-10-26 15:27:25 +09:00
dependabot-preview[bot]
ce42bbf5c9 Update base64 requirement from 0.10.0 to 0.11.0 (#676)
Updates the requirements on [base64](https://github.com/marshallpierce/rust-base64) to permit the latest version.
- [Release notes](https://github.com/marshallpierce/rust-base64/releases)
- [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md)
- [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.10.0...v0.11.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-10-26 15:24:47 +09:00
Paul Masurel
7b21b3f25a Refactoring around Field (#673)
* Refactoring around Field

Removing the contract about the order of the field, and the
field id allocation.

* Update delete_queue.rs

* Update field.rs
2019-10-25 09:06:44 +09:00
Paul Masurel
46caec1040 Updating uuid to 0.8 (#674) 2019-10-25 09:02:00 +09:00
petr-tik
1187a02a3e Fixed #664 (#667)
Removed references to u8 and old documentation
2019-10-22 09:34:10 +09:00
Andrew Banchich
f6c525b19e Fix grammar / punctuation (#668) 2019-10-21 10:50:53 +09:00
petr-tik
4a8f7712f3 Add a doctest to BooleanQuery (#630)
* Add a doctest to BooleanQuery

Closes #446

Mark a function that is only used in tests to be compiled for tests only

Fix doc-comments in a couple of related files

* Minor corrections

remove whitespace, fix typos, add explicit dyn marker

* WIP: BooleanQuery doc test

Trying to nest several BooleanQueries together

* Addressed old review

rust 2018 edition + make function available to everyone

* Box the previous query to resolve the type error

* Rework wording in DocAdress document strings

* Reworded and restructured the docstring
2019-10-07 10:05:12 +09:00
Paul Masurel
2f867aad17 Fix bench (#663)
* fmt

* Fixing bench compilation
2019-10-04 17:07:49 +09:00
Paul Masurel
5c6580eb15 fmt (#661) 2019-10-04 12:10:01 +09:00
Paul Masurel
4c3941750b Waiting potentially longer on watch 2019-10-01 09:50:46 +09:00
Paul Masurel
2ea8e618f2 Merge branch 'hotfix-656' 2019-10-01 09:44:56 +09:00
Paul Masurel
94f27f990b Address #656
Broke the reference loop to make sure that the watch_router can
be dropped, and the thread exits.
2019-10-01 09:34:22 +09:00
Paul Masurel
349e8aa348 Removed enum variants on type alias 2019-09-26 18:43:29 +09:00
Paul Masurel
cde9b78b8d Fixing the issue associated with the Regex performance change 2019-09-18 18:29:27 +09:00
fdb-hiroshima
d8894f0bd2 add checksum check in ManagedDirectory (#605)
* add checksum check in ManagedDirectory

fix #400

* flush after writing checksum

* don't checksum atomic file access and clone managed_paths

* implement a footer storing metadata about a file

this is more of a poc, it require some refactoring into multiple files
`terminate(self)` is implemented, but not used anywhere yet

* address comments and simplify things with new contract

use BitOrder for integer to raw byte conversion
consider atomic write imply atomic read, which might not actually be true
use some indirection to have a boxable terminating writer

* implement TerminatingWrite and make terminate() be called where it should

add dependancy to drop_bomb to help find where terminate() should be called
implement TerminatingWrite for wrapper writers
make tests pass
/!\ some tests seems to pass where they shouldn't

* remove usage of drop_bomb

* fmt

* add test for checksum

* address some review comments

* update changelog

* fmt
2019-09-18 18:26:25 +09:00
fdb-hiroshima
7e08e0047b fix Term documentation (#655)
u64-based fields are actually 4+8=12 bytes long
2019-09-11 18:49:35 +09:00
fdb-hiroshima
1a817f117f fix documentation error (#654)
Union missdocumented as doing an intersection
Union and Intersection can hold more than 2 DocSets
2019-09-11 17:12:08 +09:00
petr-tik
2ec19b21ae Remove unnecessary duplicate methods (#650)
Closes #649

Spotted by @imor
2019-09-09 06:36:04 +09:00
Raminder Singh
141f5a93f7 Using FnvHashMap for mapping UnorderedTermId to TermOrdinal. Fixes #507 (#647)
* Using FnvHashMap for mapping UnorderedTermId to TermOrdinal. Fixes #507

* Fixed cargo fmt errors
2019-09-07 19:40:21 +09:00
Paul Masurel
df47d55cd2 Occur debug interface (#648) 2019-09-07 15:08:45 +09:00
Raminder Singh
5e579fd6b7 Fixed clippy warning: unneeded return statement (#646) 2019-09-07 10:14:37 +09:00
Paul Masurel
4b9c1dce69 Moving queyr grammar to a different crate. (#645) 2019-09-05 09:37:28 +09:00
Paul Masurel
d74f71bbef Lighter regex dependency. (#644)
Detail on https://github.com/rust-lang/regex/pull/613
2019-09-04 13:10:12 +09:00
Paul Masurel
5196ca41d8 Small code clean up 2019-09-03 09:22:32 +09:00
dependabot-preview[bot]
4959e06151 Update once_cell requirement from 0.2 to 1.0 (#643)
Updates the requirements on [once_cell](https://github.com/matklad/once_cell) to permit the latest version.
- [Release notes](https://github.com/matklad/once_cell/releases)
- [Changelog](https://github.com/matklad/once_cell/blob/master/CHANGELOG.md)
- [Commits](https://github.com/matklad/once_cell/compare/v0.2.0...v1.0.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-09-03 07:00:45 +09:00
Paul Masurel
c1635c13f6 RegexQuery performance: make it possible to cache Regexes - remastered by fulmicoton (Closes #639) (#641)
* small docs cleanup

* only compile a regex once per RegexQuery

Building a `Regex` is an expensive operation. Users of `RegexQuery`
need to cache and reuse regexes when searching across multiple fields.

This is the first step towards allowing that: we can store the `Regex`
directly in the `RegexQuery`, instead of the string pattern.

* RegexQuery: account for possible failure in the constructor

When building a regex from a str pattern, we have to account for the
possibility that the pattern is invalid. Before the previous commit, the
failure would happen in the `specialized_weight` method. Now that we
store a compiled `Regex` in `RegexQuery`, `specialized_weight` doesn't
fail anymore, and we can fail early while constructing `RegexQuery` if
the pattern is invalid.

This is a breaking change for users of `RegexQuery::new`.

* add RegexQuery::from_regex method

This builds a `RegexQuery` from an already compiled `Regex`. The use of
`Into<Arc<Regex>>` is to allow the caller to either simply pass a
`Regex`, or an `Arc<Regex>`, in case it needs to be cached and shared on
the caller's side.

* Using an Arc in AutomatonWeight

Closes #639
2019-08-22 16:14:01 +09:00
Paul Masurel
135e0ea2e9 Expose new segment meta from Index (#637) 2019-08-19 10:39:15 +09:00
Paul Masurel
f283bfd7ab Added segmentid_from_string (#636) 2019-08-19 10:37:30 +09:00
Joshua Dutton
9f74786db2 Update import statements in examples, doctests (#633)
Update import statements to edition 2018, including removing
`extern crate` and  `#[macro_use]`. Alphabetize the statements.
2019-08-19 07:26:35 +09:00
Joshua Dutton
32e5d7a0c7 Fix trait object in doctest (#635) 2019-08-19 07:25:00 +09:00
Joshua Dutton
84c615cff1 Fixing typos (#634) 2019-08-19 07:24:05 +09:00
Paul Masurel
039c0a0863 Introducing a wrapper struct instead of Boxed<BoxableTokenizer> (#631)
Closes #629
2019-08-15 16:37:04 +09:00
Paul Masurel
b3b0138b82 Change for tantivy-py
Schema.convert_named_doc
Better Debug string for Terms and TermQueries
2019-08-14 17:44:25 +09:00
petr-tik
ea56160cdc Added cargo-fmt to CI runs (#627)
* Added cargo-fmt to CI runs

Closes #625

* Remove fmt from appveyor builds

Windows seems to have issues with install components through rustup.

Formatting should be equally informative regardless of the OS,
so best to keep it in Linux on Travis
2019-08-12 08:25:47 +09:00
petr-tik
028b0a749c Elastic unbounded range query (#624)
* Tidy up

fmt

remove unneccessary -> Result<()> followed by run.unwrap() in a test

* Adding support for elasticsearch-style unbounded queries

Extend the UserInputBound to include Unbounded, so we can reuse formatting and
internal query format

* Still working on elastic-style range queries

Fixes #498

Merge the elastic_range into range

Reformat to make code easier to follow, use optional() macro to return Some

* Fixed bugs

Made the range parser insensitive to whitespace between the ":" and the range.

Removed optional parsing of field.

Added a unit test for the range parser.

Derived PartialEq to compare the results of parsing as structs, instead of
strings. Found a bug with that unit test - "*}" was parsed as an
UserInputBound::Exclusive, instead of UserInputBound::Unbounded. Added an early
detection-and-return for * in the original range parser

* Correct failing test

Assume that we will use "{*" for Unbounded ranges

* Add a note in the changelog

cargo-fmt

* Moved parenthesis to a newline to make nested if-else more visible
2019-08-12 08:24:47 +09:00
Paul Masurel
941f06eb9f Added Schema.from_named_doc 2019-08-11 16:50:32 +09:00
Paul Masurel
04832a86eb WTF is this file doing here (#622) 2019-08-08 21:54:10 +09:00
fdb-hiroshima
beb8e990cd fix parsing neg float in range query (#621)
fix #620
2019-08-08 20:41:04 +09:00
Paul Masurel
001af3876f cargo fmt 2019-08-08 18:07:19 +09:00
Paul Masurel
f428f344da Various bugfix in the query parser (#619) 2019-08-08 17:48:21 +09:00
Paul Masurel
143f78eced Trying to fix #609 (#616) 2019-08-06 20:33:30 +09:00
Kornel
754b55eee5 Bump deps (#613)
* Bump crossbeam

* Warnings--

* Remove outdated tempdir
2019-08-05 22:21:22 +09:00
Paul Masurel
280ea1209c Changes required for python binding (#610) 2019-08-01 17:26:21 +09:00
petr-tik
0154dbe477 Replace unwrap with match and proper Error handling (#606)
* Replace unwrap with match and proper Error handling

* Replaced 'magic' values with a documented variable

Didn't like the unexplained 0..3 range, thought it was best as a variable

Calculating Levenshtein distance is expensive, so best explain why we should
keep it low
2019-07-31 08:16:02 +09:00
329 changed files with 152869 additions and 15868 deletions

12
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
# These are supported funding model platforms
github: fulmicoton
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

13
.github/ISSUE_TEMPLATE/actions.md vendored Normal file
View File

@@ -0,0 +1,13 @@
---
name: Actions
about: Actions not directly related to producing code.
---
# Actions title
Action description.
e.g.
- benchmark
- investigate and report
- etc.

15
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,15 @@
version: 2
updates:
- package-ecosystem: cargo
directory: "/"
schedule:
interval: daily
time: "20:00"
open-pull-requests-limit: 10
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: daily
time: "20:00"
open-pull-requests-limit: 10

26
.github/workflows/coverage.yml vendored Normal file
View File

@@ -0,0 +1,26 @@
name: Coverage
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
coverage:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
run: rustup toolchain install nightly --profile minimal --component llvm-tools-preview
- uses: Swatinem/rust-cache@v2
- uses: taiki-e/install-action@cargo-llvm-cov
- name: Generate code coverage
run: cargo +nightly llvm-cov --all-features --workspace --lcov --output-path lcov.info
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
continue-on-error: true
with:
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
files: lcov.info
fail_ci_if_error: true

28
.github/workflows/long_running.yml vendored Normal file
View File

@@ -0,0 +1,28 @@
name: Long running tests
on:
push:
branches: [ main ]
env:
CARGO_TERM_COLOR: always
NUM_FUNCTIONAL_TEST_ITERATIONS: 20000
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
- name: Run indexing_unsorted
run: cargo test indexing_unsorted -- --ignored
- name: Run indexing_sorted
run: cargo test indexing_sorted -- --ignored

74
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,74 @@
name: Unit tests
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
env:
CARGO_TERM_COLOR: always
jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install nightly
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
profile: minimal
components: rustfmt
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: clippy
- uses: Swatinem/rust-cache@v2
- name: Check Formatting
run: cargo +nightly fmt --all -- --check
- uses: actions-rs/clippy-check@v1
with:
toolchain: stable
token: ${{ secrets.GITHUB_TOKEN }}
args: --tests
test:
runs-on: ubuntu-latest
strategy:
matrix:
features: [
{ label: "all", flags: "mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
]
name: test-${{ matrix.features.label}}
steps:
- uses: actions/checkout@v3
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
- name: Run tests
run: cargo +stable nextest run --features ${{ matrix.features.flags }} --verbose --workspace
- name: Run doctests
run: cargo +stable test --doc --features ${{ matrix.features.flags }} --verbose --workspace

3
.gitignore vendored
View File

@@ -1,4 +1,6 @@
tantivy.iml
.cargo
proptest-regressions
*.swp
target
target/debug
@@ -11,3 +13,4 @@ cpp/simdcomp/bitpackingbenchmark
*.bk
.idea
trace.dat
cargo-timing*

View File

@@ -1,90 +0,0 @@
# Based on the "trust" template v0.1.2
# https://github.com/japaric/trust/tree/v0.1.2
dist: trusty
language: rust
services: docker
sudo: required
env:
global:
- CRATE_NAME=tantivy
- TRAVIS_CARGO_NIGHTLY_FEATURE=""
# - secure: eC8HjTi1wgRVCsMAeXEXt8Ckr0YBSGOEnQkkW4/Nde/OZ9jJjz2nmP1ELQlDE7+czHub2QvYtDMG0parcHZDx/Kus0yvyn08y3g2rhGIiE7y8OCvQm1Mybu2D/p7enm6shXquQ6Z5KRfRq+18mHy80wy9ABMA/ukEZdvnfQ76/Een8/Lb0eHaDoXDXn3PqLVtByvSfQQ7OhS60dEScu8PWZ6/l1057P5NpdWbMExBE7Ro4zYXNhkJeGZx0nP/Bd4Jjdt1XfPzMEybV6NZ5xsTILUBFTmOOt603IsqKGov089NExqxYu5bD3K+S4MzF1Nd6VhomNPJqLDCfhlymJCUj5n5Ku4yidlhQbM4Ej9nGrBalJnhcjBjPua5tmMF2WCxP9muKn/2tIOu1/+wc0vMf9Yd3wKIkf5+FtUxCgs2O+NslWvmOMAMI/yD25m7hb4t1IwE/4Bk+GVcWJRWXbo0/m6ZUHzRzdjUY2a1qvw7C9udzdhg7gcnXwsKrSWi2NjMiIVw86l+Zim0nLpKIN41sxZHLaFRG63Ki8zQ/481LGn32awJ6i3sizKS0WD+N1DfR2qYMrwYHaMN0uR0OFXYTJkFvTFttAeUY3EKmRKAuMhmO2YRdSr4/j/G5E9HMc1gSGJj6PxgpQU7EpvxRsmoVAEJr0mszmOj9icGHep/FM=
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- kalakris-cmake
packages:
- gcc-4.8
- g++-4.8
- libcurl4-openssl-dev
- libelf-dev
- libdw-dev
- binutils-dev
- cmake
matrix:
include:
# Android
- env: TARGET=aarch64-linux-android DISABLE_TESTS=1
#- env: TARGET=arm-linux-androideabi DISABLE_TESTS=1
#- env: TARGET=armv7-linux-androideabi DISABLE_TESTS=1
#- env: TARGET=i686-linux-android DISABLE_TESTS=1
#- env: TARGET=x86_64-linux-android DISABLE_TESTS=1
# Linux
#- env: TARGET=aarch64-unknown-linux-gnu
#- env: TARGET=i686-unknown-linux-gnu
- env: TARGET=x86_64-unknown-linux-gnu CODECOV=1 #UPLOAD_DOCS=1
# - env: TARGET=x86_64-unknown-linux-musl CODECOV=1
# OSX
#- env: TARGET=x86_64-apple-darwin
# os: osx
before_install:
- set -e
- rustup self update
install:
- sh ci/install.sh
- source ~/.cargo/env || true
- env | grep "TRAVIS"
before_script:
- export PATH=$HOME/.cargo/bin:$PATH
- cargo install cargo-update || echo "cargo-update already installed"
- cargo install cargo-travis || echo "cargo-travis already installed"
script:
- bash ci/script.sh
before_deploy:
- sh ci/before_deploy.sh
after_success:
# Needs GH_TOKEN env var to be set in travis settings
- if [[ -v GH_TOKEN ]]; then echo "GH TOKEN IS SET"; else echo "GH TOKEN NOT SET"; fi
- if [[ -v UPLOAD_DOCS ]]; then cargo doc; cargo doc-upload; else echo "doc upload disabled."; fi
#cache: cargo
#before_cache:
# # Travis can't cache files that are not readable by "others"
# - chmod -R a+r $HOME/.cargo
# - find ./target/debug -type f -maxdepth 1 -delete
# - rm -f ./target/.rustc_info.json
# - rm -fr ./target/debug/{deps,.fingerprint}/tantivy*
# - rm -r target/debug/examples/
# - ls -1 examples/ | sed -e 's/\.rs$//' | xargs -I "{}" find target/* -name "*{}*" -type f -delete
#branches:
# only:
# # release tags
# - /^v\d+\.\d+\.\d+.*$/
# - master
notifications:
email:
on_success: never

295
ARCHITECTURE.md Normal file
View File

@@ -0,0 +1,295 @@
# Tantivy
## What is tantivy?
Tantivy is a library that is meant to build search engines. Although it is by no means a port of Lucene, its architecture is strongly inspired by it. If you are familiar with Lucene, you may be struck by the overlapping vocabulary.
This is not fortuitous.
Tantivy's bread and butter is to address the problem of full-text search :
Given a large set of textual documents, and a text query, return the K-most relevant documents in a very efficient way. To execute these queries rapidly, the tantivy needs to build an index beforehand. The relevance score implemented in the tantivy is not configurable. Tantivy uses the same score as the default similarity used in Lucene / Elasticsearch, called [BM25](https://en.wikipedia.org/wiki/Okapi_BM25).
But tantivy's scope does not stop there. Numerous features are required to power rich-search applications. For instance, one may want to:
- compute the count of documents matching a query in the different section of an e-commerce website,
- display an average price per meter square for a real estate search engine,
- take into account historical user data to rank documents in a specific way,
- or even use tantivy to power an OLAP database.
A more abstract description of the problem space tantivy is trying to address is the following.
Ingest a large set of documents, create an index that makes it possible to
rapidly select all documents matching a given predicate (also known as a query) and
collect some information about them ([See collector](#collector-define-what-to-do-with-matched-documents)).
Roughly speaking the design is following these guiding principles:
- Search should be O(1) in memory.
- Indexing should be O(1) in memory. (In practice it is just sublinear)
- Search should be as fast as possible
This comes at the cost of the dynamicity of the index: while it is possible to add, and delete documents from our corpus, the tantivy is designed to handle these updates in large batches.
## [core/](src/core): Index, segments, searchers
Core contains all of the high-level code to make it possible to create an index, add documents, delete documents and commit.
This is both the most high-level part of tantivy, the least performance-sensitive one, the seemingly most mundane code... And paradoxically the most complicated part.
### Index and Segments
A tantivy index is a collection of smaller independent immutable segments.
Each segment contains its own independent set of data structures.
A segment is identified by a segment id that is in fact a UUID.
The file of a segment has the format
```segment-id . ext```
The extension signals which data structure (or [`SegmentComponent`](src/core/segment_component.rs)) is stored in the file.
A small `meta.json` file is in charge of keeping track of the list of segments, as well as the schema.
On commit, one segment per indexing thread is written to disk, and the `meta.json` is then updated atomically.
For a better idea of how indexing works, you may read the [following blog post](https://fulmicoton.com/posts/behold-tantivy-part2/).
### Deletes
Deletes happen by deleting a "term". Tantivy does not offer any notion of primary id, so it is up to the user to use a field in their schema as if it was a primary id, and delete the associated term if they want to delete only one specific document.
On commit, tantivy will find all of the segments with documents matching this existing term and remove from [alive bitset file](src/fastfield/alive_bitset.rs) that represents the bitset of the alive document ids.
Like all segment files, this file is immutable. Because it is possible to have more than one alive bitset file at a given instant, the alive bitset filename has the format ```segment_id . commit_opstamp . del```.
An opstamp is simply an incremental id that identifies any operation applied to the index. For instance, performing a commit or adding a document.
### DocId
Within a segment, all documents are identified by a DocId that ranges within `[0, max_doc)`.
where `max_doc` is the number of documents in the segment, (deleted or not). Having such a compact `DocId` space is key to the compression of our data structures.
The DocIds are simply allocated in the order documents are added to the index.
### Merges
In separate threads, tantivy's index writer search for opportunities to merge segments.
The point of segment merge is to:
- eventually get rid of tombstoned documents
- reduce the otherwise ever-growing number of segments.
Indeed, while having several segments instead of one does not hurt search too much, having hundreds can have a measurable impact on the search performance.
### Searcher
The user of the library usually does not need to know about the existence of Segments.
Searching is done through an object called a [`Searcher`](src/core/searcher.rs), that captures a
snapshot of the index at one point of time, by holding a list of [SegmentReader](src/core/segment_reader.rs).
In other words, regardless of commits, file garbage collection, or segment merge that might happen, as long as the user holds and reuse the same [Searcher](src/core/searcher.rs), search will happen on an immutable snapshot of the index.
## [directory/](src/directory): Where should the data be stored?
Tantivy, like Lucene, abstracts the place where the data should be stored in a key-trait
called [`Directory`](src/directory/directory.rs).
Contrary to Lucene however, "files" are quite different from some kind of `io::Read` object.
Check out [`src/directory/directory.rs`](src/directory/directory.rs) trait for more details.
Tantivy ships two main directory implementation: the `MMapDirectory` and the `RAMDirectory`,
but users can extend tantivy with their own implementation.
## [schema/](src/schema): What are documents?
Tantivy's document follows a very strict schema, decided before building any index.
The schema defines all of the fields that the indexes [`Document`](src/schema/document.rs) may and should contain, their types (`text`, `i64`, `u64`, `Date`, ...) as well as how it should be indexed / represented in tantivy.
Depending on the type of the field, you can decide to
- put it in the docstore
- store it as a fast field
- index it
Practically, tantivy will push values associated with this type to up to 3 respective
data structures.
*Limitations*
As of today, tantivy's schema imposes a 1:1 relationship between a field that is being ingested and a field represented in the search index. In sophisticated search application, it is fairly common to want to index a field twice using different tokenizers, or to index the concatenation of several fields together into one field.
This is not something tantivy supports, and it is up to the user to duplicate field / concatenate fields before feeding them to tantivy.
## General information about these data structures
All data structures in tantivy, have:
- a writer
- a serializer
- a reader
The writer builds an in-memory representation of a batch of documents. This representation is not searchable. It is just meant as an intermediary mutable representation, to which we can sequentially add
the document of a batch. At the end of the batch (or if a memory limit is reached), this representation
is then converted into an on-disk immutable representation, that is extremely compact.
This conversion is done by the serializer.
Finally, the reader is in charge of offering an API to read on this on-disk read-only representation.
In tantivy, readers are designed to require very little anonymous memory. The data is read straight from an mmapped file, and loading an index is as fast as mmapping its files.
## [store/](src/store): Here is my DocId, Gimme my document
The docstore is a row-oriented storage that, for each document, stores a subset of the fields
that are marked as stored in the schema. The docstore is compressed using a general-purpose algorithm
like LZ4.
**Useful for**
In search engines, it is often used to display search results.
Once the top 10 documents have been identified, we fetch them from the store, and display them or their snippet on the search result page (aka SERP).
**Not useful for**
Fetching a document from the store is typically a "slow" operation. It usually consists in
- searching into a compact tree-like data structure to find the position of the right block.
- decompressing a small block
- returning the document from this block.
It is NOT meant to be called for every document matching a query.
As a rule of thumb, if you hit the docstore more than 100 times per search query, you are probably misusing tantivy.
## [fastfield/](src/fastfield): Here is my DocId, Gimme my value
Fast fields are stored in a column-oriented storage that allows for random access.
The only compression applied is bitpacking. The column comes with two meta data.
The minimum value in the column and the number of bits per doc.
Fetching a value for a `DocId` is then as simple as computing
```rust
min_value + fetch_bits(num_bits * doc_id..num_bits * (doc_id+1))
```
This operation just requires one memory fetch.
Because, DocSets are scanned through in order (DocId are iterated in a sorted manner) which
also help locality.
In Lucene's jargon, fast fields are called DocValues.
**Useful for**
They are typically integer values that are useful to either rank or compute aggregate over
all of the documents matching a query (aka [DocSet](src/docset.rs)).
For instance, one could define a function to combine upvotes with tantivy's internal relevancy score.
This can be done by fetching a fast field during scoring.
One could also compute the mean price of the items matching a query in an e-commerce website.
This can be done by fetching a fast field in a collector.
Finally one could decide to post-filter a docset to remove docset with a price within a specific range.
If the ratio of filtered out documents is not too low, an efficient way to do this is to fetch the price and apply the filter on the collector side.
Aside from integer values, it is also possible to store an actual byte payload.
For advanced search engine, it is possible to store all of the features required for learning-to-rank in a byte payload, access it during search, and apply the learning-to-rank model.
Finally facets are a specific kind of fast field, and the associated source code is in [`fastfield/facet_reader.rs`](src/fastfield/facet_reader.rs).
# The inverted search index
The inverted index is the core part of full-text search.
When presented a new document with the text field "Hello, happy tax payer!", tantivy breaks it into a list of so-called tokens. In addition to just splitting these strings into tokens, it might also do different kinds of operations like dropping the punctuation, converting the character to lowercase, apply stemming, etc. Tantivy makes it possible to configure the operations to be applied in the schema (tokenizer/ is the place where these operations are implemented).
For instance, the default tokenizer of tantivy would break our text into: `[hello, happy, tax, payer]`.
The document will therefore be registered in the inverted index as containing the terms
`[text:hello, text:happy, text:tax, text:payer]`.
The role of the inverted index is, when given a term, gives us in return a very fast iterator over the sorted doc ids that match the term.
Such an iterator is called a posting list. In addition to giving us `DocId`, they can also give us optionally the number of occurrence of the term for each document, also called term frequency or TF.
These iterators being sorted by DocId, one can create an iterator over the document containing `text:tax AND text:payer`, `(text:tax AND text:payer) OR (text:contribuable)` or any boolean expression.
In order to represent the function
```Term ⟶ Posting```
The inverted index actually consists of two data structures chained together.
- [Term](src/schema/term.rs) ⟶ [TermInfo](src/postings/term_info.rs) is addressed by the term dictionary.
- [TermInfo](src/postings/term_info.rs) ⟶ [Posting](src/postings/postings.rs) is addressed by the posting lists.
Where [TermInfo](src/postings/term_info.rs) is an object containing some meta data about a term.
## [termdict/](src/termdict): Here is a term, give me the [TermInfo](src/postings/term_info.rs)
Tantivy's term dictionary is mainly in charge of supplying the function
[Term](src/schema/term.rs) ⟶ [TermInfo](src/postings/term_info.rs)
It is itself broken into two parts.
- [Term](src/schema/term.rs) ⟶ [TermOrdinal](src/termdict/mod.rs) is addressed by a finite state transducer, implemented by the fst crate.
- [TermOrdinal](src/termdict/mod.rs) ⟶ [TermInfo](src/postings/term_info.rs) is addressed by the term info store.
## [postings/](src/postings): Iterate over documents... very fast
A posting list makes it possible to store a sorted list of doc ids and for each doc store
a term frequency as well.
The posting lists are stored in a separate file. The [TermInfo](src/postings/term_info.rs) contains an offset into that file and a number of documents for the given posting list. Both are required and sufficient to read the posting list.
The posting list is organized in block of 128 documents.
One block of doc ids is followed by one block of term frequencies.
The doc ids are delta encoded and bitpacked.
The term frequencies are bitpacked.
Because the number of docs is rarely a multiple of 128, the last block may contain an arbitrary number of docs between 1 and 127 documents. We then use variable int encoding instead of bitpacking.
## [positions/](src/positions): Where are my terms within the documents?
Phrase queries make it possible to search for documents containing a specific sequence of terms.
For instance, when the phrase query "the art of war" does not match "the war of art".
To make it possible, it is possible to specify in the schema that a field should store positions in addition to being indexed.
The token positions of all of the terms are then stored in a separate file with the extension `.pos`.
The [TermInfo](src/postings/term_info.rs) gives an offset (expressed in position this time) in this file. As we iterate through the docset,
we advance the position reader by the number of term frequencies of the current document.
## [fieldnorms/](src/fieldnorms): Here is my doc, how many tokens in this field?
The [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) formula also requires to know the number of tokens stored in a specific field for a given document. We store this information on one byte per document in the fieldnorm.
The fieldnorm is therefore compressed. Values up to 40 are encoded unchanged.
## [tokenizer/](src/tokenizer): How should we process text?
Text processing is key to a good search experience.
Splits or normalize your text too much, and the search results will have a less precision and a higher recall.
Do not normalize, or under split your text, you will end up with a higher precision and a lesser recall.
Text processing can be configured by selecting an off-the-shelf [`Tokenizer`](./src/tokenizer/tokenizer.rs) or implementing your own to first split the text into tokens, and then chain different [`TokenFilter`](src/tokenizer/tokenizer.rs)'s to it.
Tantivy's comes with few tokenizers, but external crates are offering advanced tokenizers, such as [Lindera](https://crates.io/crates/lindera) for Japanese.
## [query/](src/query): Define and compose queries
The [Query](src/query/query.rs) trait defines what a query is.
Due to the necessity for some queries to compute some statistics over the entire index, and because the
index is composed of several `SegmentReader`, the path from transforming a `Query` to an iterator over documents is slightly convoluted, but fundamentally, this is what a Query is.
The iterator over a document comes with some scoring function. The resulting trait is called a
[Scorer](src/query/scorer.rs) and is specific to a segment.
Different queries can be combined using the [BooleanQuery](src/query/boolean_query/).
Tantivy comes with different types of queries and can be extended by implementing
the `Query`, `Weight`, and `Scorer` traits.
## [collector](src/collector): Define what to do with matched documents
Collectors define how to aggregate the documents matching a query, in the broadest sense possible.
The search will push matched documents one by one, calling their
`fn collect(doc: DocId, score: Score);` method.
Users may implement their own collectors by implementing the [Collector](src/collector/mod.rs) trait.
## [query-grammar](query-grammar): Defines the grammar of the query parser
While the [QueryParser](src/query/query_parser/query_parser.rs) struct is located in the `query/` directory, the actual parser combinator used to convert user queries into an AST is in an external crate called `query-grammar`. This part was externalized to lighten the work of the compiler.

View File

@@ -1,7 +1,223 @@
Tantivy 0.19
================================
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
The `DateTime` type has been updated to hold timestamps with microseconds precision.
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing).
- Remove Searcher pool and make `Searcher` cloneable.
Tantivy 0.18
================================
- For date values `chrono` has been replaced with `time` (@uklotzde) #1304 :
- The `time` crate is re-exported as `tantivy::time` instead of `tantivy::chrono`.
- The type alias `tantivy::DateTime` has been removed.
- `Value::Date` wraps `time::PrimitiveDateTime` without time zone information.
- Internally date/time values are stored as seconds since UNIX epoch in UTC.
- Converting a `time::OffsetDateTime` to `Value::Date` implicitly converts the value into UTC.
If this is not desired do the time zone conversion yourself and use `time::PrimitiveDateTime`
directly instead.
- Add [histogram](https://github.com/quickwit-oss/tantivy/pull/1306) aggregation (@PSeitz)
- Add support for fastfield on text fields (@PSeitz)
- Add terms aggregation (@PSeitz)
- Add support for zstd compression (@kryesh)
Tantivy 0.17
================================
- LogMergePolicy now triggers merges if the ratio of deleted documents reaches a threshold (@shikhar @fulmicoton) [#115](https://github.com/quickwit-oss/tantivy/issues/115)
- Adds a searcher Warmer API (@shikhar @fulmicoton)
- Change to non-strict schema. Ignore fields in data which are not defined in schema. Previously this returned an error. #1211
- Facets are necessarily indexed. Existing index with indexed facets should work out of the box. Index without facets that are marked with index: false should be broken (but they were already broken in a sense). (@fulmicoton) #1195 .
- Bugfix that could in theory impact durability in theory on some filesystems [#1224](https://github.com/quickwit-oss/tantivy/issues/1224)
- Schema now offers not indexing fieldnorms (@lpouget) [#922](https://github.com/quickwit-oss/tantivy/issues/922)
- Reduce the number of fsync calls [#1225](https://github.com/quickwit-oss/tantivy/issues/1225)
- Fix opening bytes index with dynamic codec (@PSeitz) [#1278](https://github.com/quickwit-oss/tantivy/issues/1278)
- Added an aggregation collector for range, average and stats compatible with Elasticsearch. (@PSeitz)
- Added a JSON schema type @fulmicoton [#1251](https://github.com/quickwit-oss/tantivy/issues/1251)
- Added support for slop in phrase queries @halvorboe [#1068](https://github.com/quickwit-oss/tantivy/issues/1068)
Tantivy 0.16.2
================================
- Bugfix in FuzzyTermQuery. (transposition_cost_one was not doing anything)
Tantivy 0.16.1
========================
- Major Bugfix on multivalued fastfield. #1151
- Demux operation (@PSeitz)
Tantivy 0.16.0
=========================
- Bugfix in the filesum check. (@evanxg852000) #1127
- Bugfix in positions when the index is sorted by a field. (@appaquet) #1125
Tantivy 0.15.3
=========================
- Major bugfix. Deleting documents was broken when the index was sorted by a field. (@appaquet, @fulmicoton) #1101
Tantivy 0.15.2
========================
- Major bugfix. DocStore still panics when a deleted doc is at the beginning of a block. (@appaquet) #1088
Tantivy 0.15.1
=========================
- Major bugfix. DocStore panics when first block is deleted. (@appaquet) #1077
Tantivy 0.15.0
=========================
- API Changes. Using Range instead of (start, end) in the API and internals (`FileSlice`, `OwnedBytes`, `Snippets`, ...)
This change is breaking but migration is trivial.
- Added an Histogram collector. (@fulmicoton) #994
- Added support for Option<TCollector>. (@fulmicoton)
- DocAddress is now a struct (@scampi) #987
- Bugfix consistent tie break handling in facet's topk (@hardikpnsp) #357
- Date field support for range queries (@rihardsk) #516
- Added lz4-flex as the default compression scheme in tantivy (@PSeitz) #1009
- Renamed a lot of symbols to avoid all uppercasing on acronyms, as per new clippy recommendation. For instance, RAMDirectory -> RamDirectory. (@fulmicoton)
- Simplified positions index format (@fulmicoton) #1022
- Moved bitpacking to bitpacker subcrate and add BlockedBitpacker, which bitpacks blocks of 128 elements (@PSeitz) #1030
- Added support for more-like-this query in tantivy (@evanxg852000) #1011
- Added support for sorting an index, e.g presorting documents in an index by a timestamp field. This can heavily improve performance for certain scenarios, by utilizing the sorted data (Top-n optimizations)(@PSeitz). #1026
- Add iterator over documents in doc store (@PSeitz). #1044
- Fix log merge policy (@PSeitz). #1043
- Add detection to avoid small doc store blocks on merge (@PSeitz). #1054
- Make doc store compression dynamic (@PSeitz). #1060
- Switch to json for footer version handling (@PSeitz). #1060
- Updated TermMerger implementation to rely on the union feature of the FST (@scampi) #469
- Add boolean marking whether position is required in the query_terms API call (@fulmicoton). #1070
Tantivy 0.14.0
=========================
- Remove dependency to atomicwrites #833 .Implemented by @fulmicoton upon suggestion and research from @asafigan).
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
- Large API Change in the Directory API. Tantivy used to assume that all files could be somehow memory mapped. After this change, Directory return a `FileSlice` that can be reduced and eventually read into an `OwnedBytes` object. Long and blocking io operation are still required by they do not span over the entire file.
- Added support for Brotli compression in the DocStore. (@ppodolsky)
- Added helper for building intersections and unions in BooleanQuery (@guilload)
- Bugfix in `Query::explain`
- Removed dependency on `notify` #924. Replaced with `FileWatcher` struct that polls meta file every 500ms in background thread. (@halvorboe @guilload)
- Added `FilterCollector`, which wraps another collector and filters docs using a predicate over a fast field (@barrotsteindev)
- Simplified the encoding of the skip reader struct. BlockWAND max tf is now encoded over a single byte. (@fulmicoton)
- `FilterCollector` now supports all Fast Field value types (@barrotsteindev)
- FastField are not all loaded when opening the segment reader. (@fulmicoton)
- Added an API to merge segments, see `tantivy::merge_segments` #1005. (@evanxg852000)
This version breaks compatibility and requires users to reindex everything.
Tantivy 0.13.2
===================
Bugfix. Acquiring a facet reader on a segment that does not contain any
doc with this facet returns `None`. (#896)
Tantivy 0.13.1
===================
Made `Query` and `Collector` `Send + Sync`.
Updated misc dependency versions.
Tantivy 0.13.0
======================
Tantivy 0.13 introduce a change in the index format that will require
you to reindex your index (BlockWAND information are added in the skiplist).
The index size increase is minor as this information is only added for
full blocks.
If you have a massive index for which reindexing is not an option, please contact me
so that we can discuss possible solutions.
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
- `MMapDirectory::open` does not return a `Result` anymore.
- Change in the DocSet and Scorer API. (@fulmicoton).
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
As a result, iterating through DocSet now looks as follows
```rust
let mut doc = docset.doc();
while doc != TERMINATED {
// ...
doc = docset.advance();
}
```
The change made it possible to greatly simplify a lot of the docset's code.
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
- Added an offset option to the Top(.*)Collectors. (@robyoung)
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
to the PISA team for answering all my questions!)
Tantivy 0.12.0
======================
- Removing static dispatch in tokenizers for simplicity. (#762)
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
- Important Bugfix #777, causing tantivy to retain memory mapping. (diagnosed by @poljar)
- Added support for field boosting. (#547, @fulmicoton)
## How to update?
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
minor changes. Check <https://github.com/quickwit-oss/tantivy/blob/main/examples/custom_tokenizer.rs>
to check for some code sample.
Tantivy 0.11.3
=======================
- Fixed DateTime as a fast field (#735)
Tantivy 0.11.2
=======================
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
- Exposing a constructor for `WatchHandle` (#731)
Tantivy 0.11.1
=====================
- Bug fix #729
Tantivy 0.11.0
=====================
- Added f64 field. Internally reuse u64 code the same way i64 does (@fdb-hiroshima)
- Various bugfixes in the query parser.
- Better handling of hyphens in query parser. (#609)
- Better handling of whitespaces.
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
- API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
- Add a method to check the compatibility of the footer in the index with the running version of tantivy (@petr-tik)
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
- Added handling of pre-tokenized text fields (#642), which will enable users to
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
- Fix crash when committing multiple times with deleted documents. #681 (@brainlock)
## How to update?
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
an error and handling the `Result` is required.
- `tantivy::version()` now returns a `Version` object. This object implements `ToString()`
Tantivy 0.10.2
=====================
- Closes #656. Solving memory leak.
Tantivy 0.10.1
=====================
@@ -10,32 +226,31 @@ Tantivy 0.10.1
Avoid watching the mmap directory until someone effectively creates a reader that uses
this functionality.
Tantivy 0.10.0
=====================
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
- Added an API to easily tweak or entirely replace the
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@pmasurel)
- Added an API to easily tweak or entirely replace the
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@fulmicoton)
- Added an ASCII folding filter (@drusellers)
- Bugfix in `query.count` in presence of deletes (@pmasurel)
- Added `.explain(...)` in `Query` and `Weight` to (@pmasurel)
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
- Bugfix in `query.count` in presence of deletes (@fulmicoton)
- Added `.explain(...)` in `Query` and `Weight` to (@fulmicoton)
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
All segments are simply removed.
Minor
---------
- Switched to Rust 2018 (@uvd)
- Small simplification of the code.
- Small simplification of the code.
Calling .freq() or .doc() when .advance() has never been called
on segment postings should panic from now on.
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
- Fast fields are now preloaded when the `SegmentReader` is created.
- `IndexMeta` is now public. (@hntd187)
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
only require a read lock. (@pmasurel)
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
only require a read lock. (@fulmicoton)
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
- Bugfix - Files get deleted slightly earlier
@@ -49,25 +264,26 @@ Your program should be usable as is.
Fast fields used to be accessed directly from the `SegmentReader`.
The API changed, you are now required to acquire your fast field reader via the
`segment_reader.fast_fields()`, and use one of the typed method:
`segment_reader.fast_fields()`, and use one of the typed method:
- `.u64()`, `.i64()` if your field is single-valued ;
- `.u64s()`, `.i64s()` if your field is multi-valued ;
- `.bytes()` if your field is bytes fast field.
Tantivy 0.9.0
=====================
*0.9.0 index format is not compatible with the
*0.9.0 index format is not compatible with the
previous index format.*
- MAJOR BUGFIX :
- MAJOR BUGFIX :
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
- Removed most unsafe (@fulmicoton)
- Indexer memory footprint improved. (VInt comp, inlining the first block. (@fulmicoton)
- Stemming in other language possible (@pentlander)
- Segments with no docs are deleted earlier (@barrotsteindev)
- Added grouped add and delete operations.
They are guaranteed to happen together (i.e. they cannot be split by a commit).
- Added grouped add and delete operations.
They are guaranteed to happen together (i.e. they cannot be split by a commit).
In addition, adds are guaranteed to happen on the same segment. (@elbow-jason)
- Removed `INT_STORED` and `INT_INDEXED`. It is now possible to use `STORED` and `INDEXED`
for int fields. (@fulmicoton)
@@ -81,59 +297,62 @@ tantivy 0.9 brought some API breaking change.
To update from tantivy 0.8, you will need to go through the following steps.
- `schema::INT_INDEXED` and `schema::INT_STORED` should be replaced by `schema::INDEXED` and `schema::INT_STORED`.
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
`IndexReader` for this.
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
`IndexReader` for this.
```rust
// create the reader. You typically need to create 1 reader for the entire
// lifetime of you program.
let reader = index.reader()?;
// Acquire a searcher (previously `index.searcher()`) is now written:
let searcher = reader.searcher();
// With the default setting of the reader, you are not required to
// With the default setting of the reader, you are not required to
// call `index.load_searchers()` anymore.
//
// The IndexReader will pick up that change automatically, regardless
// of whether the update was done in a different process or not.
// If this behavior is not wanted, you can create your reader with
// If this behavior is not wanted, you can create your reader with
// the `ReloadPolicy::Manual`, and manually decide when to reload the index
// by calling `reader.reload()?`.
```
```
Tantivy 0.8.2
=====================
Fixing build for x86_64 platforms. (#496)
No need to update from 0.8.1 if tantivy
is building on your platform.
Tantivy 0.8.1
=====================
Hotfix of #476.
Merge was reflecting deletes before commit was passed.
Merge was reflecting deletes before commit was passed.
Thanks @barrotsteindev for reporting the bug.
Tantivy 0.8.0
=====================
*No change in the index format*
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
- Multithreaded search (@jwolfe, @fulmicoton)
*No change in the index format*
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
- Multithreaded search (@jwolfe, @fulmicoton)
Tantivy 0.7.1
=====================
*No change in the index format*
- Bugfix: NGramTokenizer panics on non ascii chars
- Added a space usage API
Tantivy 0.7
=====================
- Skip data for doc ids and positions (@fulmicoton),
greatly improving performance
- Tantivy error now rely on the failure crate (@drusellers)
@@ -143,15 +362,15 @@ Tantivy 0.7
Tantivy 0.6.1
=========================
- Bugfix #324. GC removing was removing file that were still in useful
- Added support for parsing AllQuery and RangeQuery via QueryParser
- AllQuery: `*`
- RangeQuery:
- Inclusive `field:[startIncl to endIncl]`
- Exclusive `field:{startExcl to endExcl}`
- Mixed `field:[startIncl to endExcl}` and vice versa
- Unbounded `field:[start to *]`, `field:[* to end]`
- AllQuery: `*`
- RangeQuery:
- Inclusive `field:[startIncl to endIncl]`
- Exclusive `field:{startExcl to endExcl}`
- Mixed `field:[startIncl to endExcl}` and vice versa
- Unbounded `field:[start to *]`, `field:[* to end]`
Tantivy 0.6
==========================
@@ -159,63 +378,58 @@ Tantivy 0.6
Special thanks to @drusellers and @jason-wolfe for their contributions
to this release!
- Removed C code. Tantivy is now pure Rust. (@pmasurel)
- BM25 (@pmasurel)
- Approximate field norms encoded over 1 byte. (@pmasurel)
- Compiles on stable rust (@pmasurel)
- Removed C code. Tantivy is now pure Rust. (@fulmicoton)
- BM25 (@fulmicoton)
- Approximate field norms encoded over 1 byte. (@fulmicoton)
- Compiles on stable rust (@fulmicoton)
- Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270)
- Completely uncompressed
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
- Completely uncompressed
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
- Add NGram token support (@drusellers)
- Add Stopword Filter support (@drusellers)
- Add a FuzzyTermQuery (@drusellers)
- Add a RegexQuery (@drusellers)
- Various performance improvements (@pmasurel)_
- Various performance improvements (@fulmicoton)_
Tantivy 0.5.2
===========================
- bugfix #274
- bugfix #280
- bugfix #289
Tantivy 0.5.1
==========================
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
Tantivy 0.5
==========================
- Faceting
- RangeQuery
- Configurable tokenization pipeline
- Bugfix in PhraseQuery
- Various query optimisation
- Allowing very large indexes
- 64 bits file address
- Smarter encoding of the `TermInfo` objects
- 64 bits file address
- Smarter encoding of the `TermInfo` objects
Tantivy 0.4.3
==========================
- Bugfix race condition when deleting files. (#198)
Tantivy 0.4.2
==========================
- Prevent usage of AVX2 instructions (#201)
Tantivy 0.4.1
==========================
- Bugfix for non-indexed fields. (#199)
Tantivy 0.4.0
==========================
@@ -230,37 +444,31 @@ Tantivy 0.4.0
- Searching for a non-indexed field returns an explicit Error
- Phrase query for non-tokenized field are not tokenized by the query parser.
- Faster/Better indexing (@fulmicoton)
- using murmurhash2
- faster merging
- more memory efficient fast field writer (@lnicola )
- better handling of collisions
- lesser memory usage
- using murmurhash2
- faster merging
- more memory efficient fast field writer (@lnicola )
- better handling of collisions
- lesser memory usage
- Added API, most notably to iterate over ranges of terms (@fulmicoton)
- Bugfix that was preventing to unmap segment files, on index drop (@fulmicoton)
- Made the doc! macro public (@fulmicoton)
- Added an alternative implementation of the streaming dictionary (@fulmicoton)
Tantivy 0.3.1
==========================
- Expose a method to trigger files garbage collection
Tantivy 0.3
==========================
Special thanks to @Kodraus @lnicola @Ameobea @manuel-woelker @celaus
for their contribution to this release.
Thanks also to everyone in tantivy gitter chat
for their advise and company :)
https://gitter.im/tantivy-search/tantivy
<https://gitter.im/tantivy-search/tantivy>
Warning:
@@ -269,19 +477,16 @@ code and index format.
You should not expect backward compatibility before
tantivy 1.0.
New Features
------------
- Delete. You can now delete documents from an index.
- Support for windows (Thanks to @lnicola)
Various Bugfixes & small improvements
----------------------------------------
- Added CI for Windows (https://ci.appveyor.com/project/fulmicoton/tantivy)
- Added CI for Windows (<https://ci.appveyor.com/project/fulmicoton/tantivy>)
Thanks to @KodrAus ! (#108)
- Various dependy version update (Thanks to @Ameobea) #76
- Fixed several race conditions in `Index.wait_merge_threads`
@@ -293,7 +498,3 @@ Thanks to @KodrAus ! (#108)
- Building binary targets for tantivy-cli (Thanks to @KodrAus)
- Misc invisible bug fixes, and code cleanup.
- Use

View File

@@ -1,68 +1,83 @@
[package]
name = "tantivy"
version = "0.10.1"
version = "0.18.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
description = """Search engine library"""
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
homepage = "https://github.com/tantivy-search/tantivy"
repository = "https://github.com/tantivy-search/tantivy"
documentation = "https://docs.rs/tantivy/"
homepage = "https://github.com/quickwit-oss/tantivy"
repository = "https://github.com/quickwit-oss/tantivy"
readme = "README.md"
keywords = ["search", "information", "retrieval"]
edition = "2018"
edition = "2021"
[dependencies]
base64 = "0.10.0"
byteorder = "1.0"
once_cell = "0.2"
regex = "1.0"
tantivy-fst = "0.1"
memmap = {version = "0.7", optional=true}
lz4 = {version="1.20", optional=true}
snap = {version="0.2"}
atomicwrites = {version="0.2.2", optional=true}
tempfile = "3.0"
log = "0.4"
combine = ">=3.6.0,<4.0.0"
tempdir = "0.3"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
num_cpus = "1.2"
fs2={version="0.4", optional=true}
itertools = "0.8"
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
notify = {version="4", optional=true}
bit-set = "0.5"
uuid = { version = "0.7.2", features = ["v4", "serde"] }
crossbeam = "0.5"
futures = "0.1"
futures-cpupool = "0.1"
owning_ref = "0.4"
stable_deref_trait = "1.0.0"
rust-stemmers = "1.1"
downcast-rs = { version="1.0" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.2"
fnv = "1.0.6"
owned-read = "0.4"
failure = "0.1"
oneshot = "0.1.3"
base64 = "0.13.0"
byteorder = "1.4.3"
crc32fast = "1.3.2"
once_cell = "1.10.0"
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
tantivy-fst = "0.3.0"
memmap2 = { version = "0.5.3", optional = true }
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
brotli = { version = "3.3.4", optional = true }
zstd = { version = "0.11", optional = true }
snap = { version = "1.0.5", optional = true }
tempfile = { version = "3.3.0", optional = true }
log = "0.4.16"
serde = { version = "1.0.136", features = ["derive"] }
serde_json = "1.0.79"
num_cpus = "1.13.1"
fs2={ version = "0.4.3", optional = true }
levenshtein_automata = "0.2.1"
uuid = { version = "1.0.0", features = ["v4", "serde"] }
crossbeam-channel = "0.5.4"
tantivy-query-grammar = { version="0.18.0", path="./query-grammar" }
tantivy-bitpacker = { version="0.2", path="./bitpacker" }
common = { version = "0.3", path = "./common/", package = "tantivy-common" }
fastfield_codecs = { version="0.2", path="./fastfield_codecs", default-features = false }
ownedbytes = { version="0.3", path="./ownedbytes" }
stable_deref_trait = "1.2.0"
rust-stemmers = "1.2.0"
downcast-rs = "1.2.0"
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
census = "0.4.0"
fnv = "1.0.7"
thiserror = "1.0.30"
htmlescape = "0.3.1"
fail = "0.3"
scoped-pool = "1.0"
murmurhash32 = "0.2"
chrono = "0.4"
smallvec = "0.6"
fail = "0.5.0"
murmurhash32 = "0.2.0"
time = { version = "0.3.10", features = ["serde-well-known"] }
smallvec = "1.8.0"
rayon = "1.5.2"
lru = "0.7.5"
fastdivide = "0.4.0"
itertools = "0.10.3"
measure_time = "0.8.2"
pretty_assertions = "1.2.1"
serde_cbor = { version = "0.11.2", optional = true }
async-trait = "0.1.53"
arc-swap = "1.5.0"
[target.'cfg(windows)'.dependencies]
winapi = "0.3"
winapi = "0.3.9"
[dev-dependencies]
rand = "0.7"
maplit = "1"
matches = "0.1.8"
time = "0.1.42"
rand = "0.8.5"
maplit = "1.0.2"
matches = "0.1.9"
proptest = "1.0.0"
criterion = "0.3.5"
test-log = "0.2.10"
env_logger = "0.9.0"
pprof = { version = "0.10.0", features = ["flamegraph", "criterion"] }
futures = "0.3.21"
[dev-dependencies.fail]
version = "0.5.0"
features = ["failpoints"]
[profile.release]
opt-level = 3
@@ -74,19 +89,21 @@ debug-assertions = true
overflow-checks = true
[features]
default = ["mmap"]
mmap = ["atomicwrites", "fs2", "memmap", "notify"]
lz4-compression = ["lz4"]
default = ["mmap", "lz4-compression" ]
mmap = ["fs2", "tempfile", "memmap2"]
brotli-compression = ["brotli"]
lz4-compression = ["lz4_flex"]
snappy-compression = ["snap"]
zstd-compression = ["zstd"]
failpoints = ["fail/failpoints"]
unstable = [] # useful for benches.
wasm-bindgen = ["uuid/wasm-bindgen"]
[badges]
travis-ci = { repository = "tantivy-search/tantivy" }
[dev-dependencies.fail]
features = ["failpoints"]
quickwit = ["serde_cbor"]
[workspace]
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
# Following the "fail" crate best practises, we isolate
# tests that define specific behavior in fail check points
@@ -98,4 +115,13 @@ features = ["failpoints"]
[[test]]
name = "failpoints"
path = "tests/failpoints/mod.rs"
required-features = ["fail/failpoints"]
required-features = ["fail/failpoints"]
[[bench]]
name = "analyzer"
harness = false
[[bench]]
name = "index-bench"
harness = false

6
Makefile Normal file
View File

@@ -0,0 +1,6 @@
test:
echo "Run test only... No examples."
cargo test --tests --lib
fmt:
cargo +nightly fmt --all

148
README.md
View File

@@ -1,106 +1,93 @@
[![Build Status](https://travis-ci.org/tantivy-search/tantivy.svg?branch=master)](https://travis-ci.org/tantivy-search/tantivy)
[![codecov](https://codecov.io/gh/tantivy-search/tantivy/branch/master/graph/badge.svg)](https://codecov.io/gh/tantivy-search/tantivy)
[![Join the chat at https://gitter.im/tantivy-search/tantivy](https://badges.gitter.im/tantivy-search/tantivy.svg)](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Docs](https://docs.rs/tantivy/badge.svg)](https://docs.rs/crate/tantivy/)
[![Build Status](https://github.com/quickwit-oss/tantivy/actions/workflows/test.yml/badge.svg)](https://github.com/quickwit-oss/tantivy/actions/workflows/test.yml)
[![codecov](https://codecov.io/gh/quickwit-oss/tantivy/branch/main/graph/badge.svg)](https://codecov.io/gh/quickwit-oss/tantivy)
[![Join the chat at https://discord.gg/MT27AG5EVE](https://shields.io/discord/908281611840282624?label=chat%20on%20discord)](https://discord.gg/MT27AG5EVE)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Build status](https://ci.appveyor.com/api/projects/status/r7nb13kj23u8m9pj/branch/master?svg=true)](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
[![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy)
[![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton)
![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/0)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/0)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/1)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/1)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/2)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/2)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/3)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/3)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/4)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/4)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/5)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/5)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/6)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/6)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/7)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/7)
**Tantivy** is a **full-text search engine library** written in Rust.
[![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
**Tantivy** is a **full text search engine library** written in rust.
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) and [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
an off-the-shelf search engine server, but rather a crate that can be used
to build such a search engine.
Tantivy is, in fact, strongly inspired by Lucene's design.
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
# Benchmark
Tantivy is typically faster than Lucene, but the results will depend on
the nature of the queries in your workload.
The following [benchmark](https://tantivy-search.github.io/bench/) breakdowns
performance for different types of queries/collections.
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
performance for different type of queries / collection.
Your mileage WILL vary depending on the nature of queries and their load.
<img src="doc/assets/images/searchbenchmark.png">
# Features
- Full-text search
- Configurable tokenizer. (stemming available for 17 latin languages. Third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
- Tiny startup time (<10ms), perfect for command line tools
- BM25 scoring (the same as lucene)
- Natural query language `(michael AND jackson) OR "king of pop"`
- Phrase queries search (`"michael jackson"`)
- Tiny startup time (<10ms), perfect for command-line tools
- BM25 scoring (the same as Lucene)
- Natural query language (e.g. `(michael AND jackson) OR "king of pop"`)
- Phrase queries search (e.g. `"michael jackson"`)
- Incremental indexing
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
- Mmap directory
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set.
- Single valued and multivalued u64, i64 and f64 fast fields (equivalent of doc values in Lucene)
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene)
- `&[u8]` fast fields
- Text, i64, u64, f64, dates and hierarchical facet fields
- Text, i64, u64, f64, dates, and hierarchical facet fields
- LZ4 compressed document store
- Range queries
- Faceted search
- Configurable indexing (optional term frequency and position indexing)
- JSON Field
- Aggregation Collector: range buckets, average, and stats metrics
- LogMergePolicy with deletes
- Searcher Warmer API
- Cheesy logo with a horse
# Non-features
## Non-features
- Distributed search is out of the scope of tantivy. That being said, tantivy is meant as a
library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
are within the scope of tantivy.
# Supported OS and compiler
Tantivy works on stable rust (>= 1.27) and supports Linux, MacOS and Windows.
Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/).
# Getting started
- [tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli).
`tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
index documents and search via the CLI or a small server with a REST API.
It will walk you through getting a wikipedia search engine up and running in a few minutes.
- [reference doc for the last released version](https://docs.rs/tantivy/)
Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows.
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
index documents, and search via the CLI or a small server with a REST API.
It walks you through getting a Wikipedia search engine up and running in a few minutes.
- [Reference doc for the last released version](https://docs.rs/tantivy/)
# How can I support this project?
There are many ways to support this project.
There are many ways to support this project.
- Use tantivy and tell us about your experience on [gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
- Use Tantivy and tell us about your experience on [Discord](https://discord.gg/MT27AG5EVE) or by email (paul.masurel@gmail.com)
- Report bugs
- Write a blog post
- Help with documentation by asking questions or submitting PRs
- Contribute code (you can join [our gitter](https://gitter.im/tantivy-search/tantivy) )
- Talk about tantivy around you
- Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
- Contribute code (you can join [our Discord server](https://discord.gg/MT27AG5EVE))
- Talk about Tantivy around you
# Contributing code
We use the GitHub Pull Request workflow - reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
## Clone and build locally
Tantivy compiles on stable rust but requires `Rust >= 1.27`.
To check out and run tests, you can simply run :
Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
To check out and run tests, you can simply run:
```bash
git clone https://github.com/tantivy-search/tantivy.git
git clone https://github.com/quickwit-oss/tantivy.git
cd tantivy
cargo build
```
@@ -108,7 +95,7 @@ To check out and run tests, you can simply run :
## Run tests
Some tests will not run with just `cargo test` because of `fail-rs`.
To run the tests exhaustively, run `./run-tests.sh`
To run the tests exhaustively, run `./run-tests.sh`.
## Debug
@@ -116,13 +103,13 @@ You might find it useful to step through the programme with a debugger.
### A failing test
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that `target/` dir exists. Use this bash script to find the most name of the most recent debug build of tantivy and run it under rust-gdb.
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that the `target/` directory exists. Use this bash script to find the name of the most recent debug build of Tantivy and run it under `rust-gdb`:
```bash
find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY
```
Now that you are in rust-gdb, you can set breakpoints on lines and methods that match your source-code and run the debug executable with flags that you normally pass to `cargo test` to like this
Now that you are in `rust-gdb`, you can set breakpoints on lines and methods that match your source code and run the debug executable with flags that you normally pass to `cargo test` like this:
```bash
$gdb run --test-threads 1 --test $NAME_OF_TEST
@@ -130,9 +117,52 @@ $gdb run --test-threads 1 --test $NAME_OF_TEST
### An example
By default, rustc compiles everything in the `examples/` dir in debug mode. This makes it easy for you to make examples to reproduce bugs.
By default, `rustc` compiles everything in the `examples/` directory in debug mode. This makes it easy for you to make examples to reproduce bugs:
```bash
rust-gdb target/debug/examples/$EXAMPLE_NAME
$ gdb run
```
# Companies Using Tantivy
<p align="left">
<img align="center" src="doc/assets/images/Nuclia.png#gh-light-mode-only" alt="Nuclia" height="25" width="auto" /> &nbsp;
<img align="center" src="doc/assets/images/humanfirst.png#gh-light-mode-only" alt="Humanfirst.ai" height="30" width="auto" />
<img align="center" src="doc/assets/images/element.io.svg#gh-light-mode-only" alt="Element.io" height="25" width="auto" />
<img align="center" src="doc/assets/images/nuclia-dark-theme.png#gh-dark-mode-only" alt="Nuclia" height="35" width="auto" /> &nbsp;
<img align="center" src="doc/assets/images/humanfirst.ai-dark-theme.png#gh-dark-mode-only" alt="Humanfirst.ai" height="25" width="auto" />&nbsp; &nbsp;
<img align="center" src="doc/assets/images/element-dark-theme.png#gh-dark-mode-only" alt="Element.io" height="25" width="auto" />
</p>
# FAQ
### Can I use Tantivy in other languages?
- Python → [tantivy-py](https://github.com/quickwit-oss/tantivy-py)
- Ruby → [tantiny](https://github.com/baygeldin/tantiny)
You can also find other bindings on [GitHub](https://github.com/search?q=tantivy) but they may be less maintained.
### What are some examples of Tantivy use?
- [seshat](https://github.com/matrix-org/seshat/): A matrix message database/indexer
- [tantiny](https://github.com/baygeldin/tantiny): Tiny full-text search for Ruby
- [lnx](https://github.com/lnx-search/lnx): adaptable, typo tolerant search engine with a REST API
- and [more](https://github.com/search?q=tantivy)!
### On average, how much faster is Tantivy compared to Lucene?
- According to our [search latency benchmark](https://tantivy-search.github.io/bench/), Tantivy is approximately 2x faster than Lucene.
### Does tantivy support incremental indexing?
- Yes.
### How can I edit documents?
- Data in tantivy is immutable. To edit a document, the document needs to be deleted and reindexed.
### When will my documents be searchable during indexing?
- Documents will be searchable after a `commit` is called on an `IndexWriter`. Existing `IndexReader`s will also need to be reloaded in order to reflect the changes. Finally, changes are only visible to newly acquired `Searcher`.

View File

@@ -18,5 +18,6 @@ install:
build: false
test_script:
- REM SET RUST_LOG=tantivy,test & cargo test --verbose --no-default-features --features mmap
- REM SET RUST_LOG=tantivy,test & cargo test --all --verbose --no-default-features --features lz4-compression --features mmap
- REM SET RUST_LOG=tantivy,test & cargo test test_store --verbose --no-default-features --features lz4-compression --features snappy-compression --features brotli-compression --features mmap
- REM SET RUST_BACKTRACE=1 & cargo build --examples

3774
benches/alice.txt Normal file

File diff suppressed because it is too large Load Diff

22
benches/analyzer.rs Normal file
View File

@@ -0,0 +1,22 @@
use criterion::{criterion_group, criterion_main, Criterion};
use tantivy::tokenizer::TokenizerManager;
const ALICE_TXT: &str = include_str!("alice.txt");
pub fn criterion_benchmark(c: &mut Criterion) {
let tokenizer_manager = TokenizerManager::default();
let tokenizer = tokenizer_manager.get("default").unwrap();
c.bench_function("default-tokenize-alice", |b| {
b.iter(|| {
let mut word_count = 0;
let mut token_stream = tokenizer.token_stream(ALICE_TXT);
while token_stream.advance() {
word_count += 1;
}
assert_eq!(word_count, 30_731);
})
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

100000
benches/hdfs.json Normal file

File diff suppressed because it is too large Load Diff

121
benches/index-bench.rs Normal file
View File

@@ -0,0 +1,121 @@
use criterion::{criterion_group, criterion_main, Criterion};
use pprof::criterion::{Output, PProfProfiler};
use tantivy::schema::{INDEXED, STORED, STRING, TEXT};
use tantivy::Index;
const HDFS_LOGS: &str = include_str!("hdfs.json");
const NUM_REPEATS: usize = 2;
pub fn hdfs_index_benchmark(c: &mut Criterion) {
let schema = {
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
schema_builder.add_u64_field("timestamp", INDEXED);
schema_builder.add_text_field("body", TEXT);
schema_builder.add_text_field("severity", STRING);
schema_builder.build()
};
let schema_with_store = {
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
schema_builder.add_u64_field("timestamp", INDEXED | STORED);
schema_builder.add_text_field("body", TEXT | STORED);
schema_builder.add_text_field("severity", STRING | STORED);
schema_builder.build()
};
let dynamic_schema = {
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
schema_builder.add_json_field("json", TEXT);
schema_builder.build()
};
let mut group = c.benchmark_group("index-hdfs");
group.sample_size(20);
group.bench_function("index-hdfs-no-commit", |b| {
b.iter(|| {
let index = Index::create_in_ram(schema.clone());
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
}
})
});
group.bench_function("index-hdfs-with-commit", |b| {
b.iter(|| {
let index = Index::create_in_ram(schema.clone());
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
}
index_writer.commit().unwrap();
})
});
group.bench_function("index-hdfs-no-commit-with-docstore", |b| {
b.iter(|| {
let index = Index::create_in_ram(schema_with_store.clone());
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
}
})
});
group.bench_function("index-hdfs-with-commit-with-docstore", |b| {
b.iter(|| {
let index = Index::create_in_ram(schema_with_store.clone());
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
}
index_writer.commit().unwrap();
})
});
group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
b.iter(|| {
let index = Index::create_in_ram(dynamic_schema.clone());
let json_field = dynamic_schema.get_field("json").unwrap();
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
let json_val: serde_json::Map<String, serde_json::Value> =
serde_json::from_str(doc_json).unwrap();
let doc = tantivy::doc!(json_field=>json_val);
index_writer.add_document(doc).unwrap();
}
}
index_writer.commit().unwrap();
})
});
group.bench_function("index-hdfs-with-commit-json-without-docstore", |b| {
b.iter(|| {
let index = Index::create_in_ram(dynamic_schema.clone());
let json_field = dynamic_schema.get_field("json").unwrap();
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
let json_val: serde_json::Map<String, serde_json::Value> =
serde_json::from_str(doc_json).unwrap();
let doc = tantivy::doc!(json_field=>json_val);
index_writer.add_document(doc).unwrap();
}
}
index_writer.commit().unwrap();
})
});
}
criterion_group! {
name = benches;
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = hdfs_index_benchmark
}
criterion_main!(benches);

15
bitpacker/Cargo.toml Normal file
View File

@@ -0,0 +1,15 @@
[package]
name = "tantivy-bitpacker"
version = "0.2.0"
edition = "2021"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = []
description = """Tantivy-sub crate: bitpacking"""
repository = "https://github.com/quickwit-oss/tantivy"
keywords = []
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

View File

@@ -0,0 +1,35 @@
#![feature(test)]
extern crate test;
#[cfg(test)]
mod tests {
use tantivy_bitpacker::BlockedBitpacker;
use test::Bencher;
#[bench]
fn bench_blockedbitp_read(b: &mut Bencher) {
let mut blocked_bitpacker = BlockedBitpacker::new();
for val in 0..=21500 {
blocked_bitpacker.add(val * val);
}
b.iter(|| {
let mut out = 0;
for val in 0..=21500 {
out = blocked_bitpacker.get(val);
}
out
});
}
#[bench]
fn bench_blockedbitp_create(b: &mut Bencher) {
b.iter(|| {
let mut blocked_bitpacker = BlockedBitpacker::new();
for val in 0..=21500 {
blocked_bitpacker.add(val * val);
}
blocked_bitpacker
});
}
}

View File

@@ -1,12 +1,15 @@
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use std::convert::TryInto;
use std::io;
use std::ops::Deref;
pub(crate) struct BitPacker {
pub struct BitPacker {
mini_buffer: u64,
mini_buffer_written: usize,
}
impl Default for BitPacker {
fn default() -> Self {
BitPacker::new()
}
}
impl BitPacker {
pub fn new() -> BitPacker {
BitPacker {
@@ -15,6 +18,7 @@ impl BitPacker {
}
}
#[inline]
pub fn write<TWrite: io::Write>(
&mut self,
val: u64,
@@ -25,14 +29,14 @@ impl BitPacker {
let num_bits = num_bits as usize;
if self.mini_buffer_written + num_bits > 64 {
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
output.write_u64::<LittleEndian>(self.mini_buffer)?;
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
self.mini_buffer_written = self.mini_buffer_written + num_bits - 64;
} else {
self.mini_buffer |= val_u64 << self.mini_buffer_written;
self.mini_buffer_written += num_bits;
if self.mini_buffer_written == 64 {
output.write_u64::<LittleEndian>(self.mini_buffer)?;
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
self.mini_buffer_written = 0;
self.mini_buffer = 0u64;
}
@@ -43,10 +47,10 @@ impl BitPacker {
pub fn flush<TWrite: io::Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
if self.mini_buffer_written > 0 {
let num_bytes = (self.mini_buffer_written + 7) / 8;
let mut arr: [u8; 8] = [0u8; 8];
LittleEndian::write_u64(&mut arr, self.mini_buffer);
output.write_all(&arr[..num_bytes])?;
let bytes = self.mini_buffer.to_le_bytes();
output.write_all(&bytes[..num_bytes])?;
self.mini_buffer_written = 0;
self.mini_buffer = 0;
}
Ok(())
}
@@ -59,21 +63,14 @@ impl BitPacker {
}
}
#[derive(Clone)]
pub struct BitUnpacker<Data>
where
Data: Deref<Target = [u8]>,
{
#[derive(Clone, Debug, Default)]
pub struct BitUnpacker {
num_bits: u64,
mask: u64,
data: Data,
}
impl<Data> BitUnpacker<Data>
where
Data: Deref<Target = [u8]>,
{
pub fn new(data: Data, num_bits: u8) -> BitUnpacker<Data> {
impl BitUnpacker {
pub fn new(num_bits: u8) -> BitUnpacker {
let mask: u64 = if num_bits == 64 {
!0u64
} else {
@@ -82,27 +79,31 @@ where
BitUnpacker {
num_bits: u64::from(num_bits),
mask,
data,
}
}
pub fn get(&self, idx: u64) -> u64 {
pub fn bit_width(&self) -> u8 {
self.num_bits as u8
}
#[inline]
pub fn get(&self, idx: u64, data: &[u8]) -> u64 {
if self.num_bits == 0 {
return 0u64;
}
let data: &[u8] = &*self.data;
let num_bits = self.num_bits;
let mask = self.mask;
let addr_in_bits = idx * num_bits;
let addr_in_bits = idx * self.num_bits;
let addr = addr_in_bits >> 3;
let bit_shift = addr_in_bits & 7;
debug_assert!(
addr + 8 <= data.len() as u64,
"The fast field field should have been padded with 7 bytes."
);
let val_unshifted_unmasked: u64 = LittleEndian::read_u64(&data[(addr as usize)..]);
let bytes: [u8; 8] = (&data[(addr as usize)..(addr as usize) + 8])
.try_into()
.unwrap();
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
val_shifted & mask
val_shifted & self.mask
}
}
@@ -110,7 +111,7 @@ where
mod test {
use super::{BitPacker, BitUnpacker};
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>, Vec<u8>) {
let mut data = Vec::new();
let mut bitpacker = BitPacker::new();
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
@@ -122,14 +123,14 @@ mod test {
}
bitpacker.close(&mut data).unwrap();
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
let bitunpacker = BitUnpacker::new(data, num_bits);
(bitunpacker, vals)
let bitunpacker = BitUnpacker::new(num_bits);
(bitunpacker, vals, data)
}
fn test_bitpacker_util(len: usize, num_bits: u8) {
let (bitunpacker, vals) = create_fastfield_bitpacker(len, num_bits);
let (bitunpacker, vals, data) = create_fastfield_bitpacker(len, num_bits);
for (i, val) in vals.iter().enumerate() {
assert_eq!(bitunpacker.get(i as u64), *val);
assert_eq!(bitunpacker.get(i as u64, &data), *val);
}
}

View File

@@ -0,0 +1,179 @@
use super::bitpacker::BitPacker;
use super::compute_num_bits;
use crate::{minmax, BitUnpacker};
const BLOCK_SIZE: usize = 128;
/// `BlockedBitpacker` compresses data in blocks of
/// 128 elements, while keeping an index on it
#[derive(Debug, Clone)]
pub struct BlockedBitpacker {
// bitpacked blocks
compressed_blocks: Vec<u8>,
// uncompressed data, collected until BLOCK_SIZE
buffer: Vec<u64>,
offset_and_bits: Vec<BlockedBitpackerEntryMetaData>,
}
impl Default for BlockedBitpacker {
fn default() -> Self {
BlockedBitpacker::new()
}
}
/// `BlockedBitpackerEntryMetaData` encodes the
/// offset and bit_width into a u64 bit field
///
/// This saves some space, since 7byte is more
/// than enough and also keeps the access fast
/// because of alignment
#[derive(Debug, Clone, Default)]
struct BlockedBitpackerEntryMetaData {
encoded: u64,
base_value: u64,
}
impl BlockedBitpackerEntryMetaData {
fn new(offset: u64, num_bits: u8, base_value: u64) -> Self {
let encoded = offset | (num_bits as u64) << (64 - 8);
Self {
encoded,
base_value,
}
}
fn offset(&self) -> u64 {
(self.encoded << 8) >> 8
}
fn num_bits(&self) -> u8 {
(self.encoded >> 56) as u8
}
fn base_value(&self) -> u64 {
self.base_value
}
}
#[test]
fn metadata_test() {
let meta = BlockedBitpackerEntryMetaData::new(50000, 6, 40000);
assert_eq!(meta.offset(), 50000);
assert_eq!(meta.num_bits(), 6);
}
fn mem_usage<T>(items: &Vec<T>) -> usize {
items.capacity() * std::mem::size_of::<T>()
}
impl BlockedBitpacker {
pub fn new() -> Self {
let mut compressed_blocks = vec![];
compressed_blocks.resize(8, 0);
Self {
compressed_blocks,
buffer: vec![],
offset_and_bits: vec![],
}
}
/// The memory used (inclusive childs)
pub fn mem_usage(&self) -> usize {
std::mem::size_of::<BlockedBitpacker>()
+ self.compressed_blocks.capacity()
+ mem_usage(&self.offset_and_bits)
+ mem_usage(&self.buffer)
}
#[inline]
pub fn add(&mut self, val: u64) {
self.buffer.push(val);
if self.buffer.len() == BLOCK_SIZE as usize {
self.flush();
}
}
pub fn flush(&mut self) {
if let Some((min_value, max_value)) = minmax(self.buffer.iter()) {
let mut bit_packer = BitPacker::new();
let num_bits_block = compute_num_bits(*max_value - min_value);
// todo performance: the padding handling could be done better, e.g. use a slice and
// return num_bytes written from bitpacker
self.compressed_blocks
.resize(self.compressed_blocks.len() - 8, 0); // remove padding for bitpacker
let offset = self.compressed_blocks.len() as u64;
// todo performance: for some bit_width we
// can encode multiple vals into the
// mini_buffer before checking to flush
// (to be done in BitPacker)
for val in self.buffer.iter() {
bit_packer
.write(
*val - min_value,
num_bits_block,
&mut self.compressed_blocks,
)
.expect("cannot write bitpacking to output"); // write to in memory can't fail
}
bit_packer.flush(&mut self.compressed_blocks).unwrap();
self.offset_and_bits
.push(BlockedBitpackerEntryMetaData::new(
offset,
num_bits_block,
*min_value,
));
self.buffer.clear();
self.compressed_blocks
.resize(self.compressed_blocks.len() + 8, 0); // add padding for bitpacker
}
}
#[inline]
pub fn get(&self, idx: usize) -> u64 {
let metadata_pos = idx / BLOCK_SIZE as usize;
let pos_in_block = idx % BLOCK_SIZE as usize;
if let Some(metadata) = self.offset_and_bits.get(metadata_pos) {
let unpacked = BitUnpacker::new(metadata.num_bits()).get(
pos_in_block as u64,
&self.compressed_blocks[metadata.offset() as usize..],
);
unpacked + metadata.base_value()
} else {
self.buffer[pos_in_block]
}
}
pub fn iter(&self) -> impl Iterator<Item = u64> + '_ {
// todo performance: we could decompress a whole block and cache it instead
let bitpacked_elems = self.offset_and_bits.len() * BLOCK_SIZE;
let iter = (0..bitpacked_elems)
.map(move |idx| self.get(idx))
.chain(self.buffer.iter().cloned());
iter
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn blocked_bitpacker_empty() {
let blocked_bitpacker = BlockedBitpacker::new();
assert_eq!(blocked_bitpacker.iter().collect::<Vec<u64>>(), vec![]);
}
#[test]
fn blocked_bitpacker_one() {
let mut blocked_bitpacker = BlockedBitpacker::new();
blocked_bitpacker.add(50000);
assert_eq!(blocked_bitpacker.get(0), 50000);
assert_eq!(blocked_bitpacker.iter().collect::<Vec<u64>>(), vec![50000]);
}
#[test]
fn blocked_bitpacker_test() {
let mut blocked_bitpacker = BlockedBitpacker::new();
for val in 0..21500 {
blocked_bitpacker.add(val);
}
for val in 0..21500 {
assert_eq!(blocked_bitpacker.get(val as usize), val);
}
assert_eq!(blocked_bitpacker.iter().count(), 21500);
assert_eq!(blocked_bitpacker.iter().last().unwrap(), 21499);
}
}

80
bitpacker/src/lib.rs Normal file
View File

@@ -0,0 +1,80 @@
mod bitpacker;
mod blocked_bitpacker;
pub use crate::bitpacker::{BitPacker, BitUnpacker};
pub use crate::blocked_bitpacker::BlockedBitpacker;
/// Computes the number of bits that will be used for bitpacking.
///
/// In general the target is the minimum number of bits
/// required to express the amplitude given in argument.
///
/// e.g. If the amplitude is 10, we can store all ints on simply 4bits.
///
/// The logic is slightly more convoluted here as for optimization
/// reasons, we want to ensure that a value spawns over at most 8 bytes
/// of aligned bytes.
///
/// Spanning over 9 bytes is possible for instance, if we do
/// bitpacking with an amplitude of 63 bits.
/// In this case, the second int will start on bit
/// 63 (which belongs to byte 7) and ends at byte 15;
/// Hence 9 bytes (from byte 7 to byte 15 included).
///
/// To avoid this, we force the number of bits to 64bits
/// when the result is greater than `64-8 = 56 bits`.
///
/// Note that this only affects rare use cases spawning over
/// a very large range of values. Even in this case, it results
/// in an extra cost of at most 12% compared to the optimal
/// number of bits.
pub fn compute_num_bits(n: u64) -> u8 {
let amplitude = (64u32 - n.leading_zeros()) as u8;
if amplitude <= 64 - 8 {
amplitude
} else {
64
}
}
pub fn minmax<I, T>(mut vals: I) -> Option<(T, T)>
where
I: Iterator<Item = T>,
T: Copy + Ord,
{
if let Some(first_el) = vals.next() {
return Some(vals.fold((first_el, first_el), |(min_val, max_val), el| {
(min_val.min(el), max_val.max(el))
}));
}
None
}
#[test]
fn test_compute_num_bits() {
assert_eq!(compute_num_bits(1), 1u8);
assert_eq!(compute_num_bits(0), 0u8);
assert_eq!(compute_num_bits(2), 2u8);
assert_eq!(compute_num_bits(3), 2u8);
assert_eq!(compute_num_bits(4), 3u8);
assert_eq!(compute_num_bits(255), 8u8);
assert_eq!(compute_num_bits(256), 9u8);
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
}
#[test]
fn test_minmax_empty() {
let vals: Vec<u32> = vec![];
assert_eq!(minmax(vals.into_iter()), None);
}
#[test]
fn test_minmax_one() {
assert_eq!(minmax(vec![1].into_iter()), Some((1, 1)));
}
#[test]
fn test_minmax_two() {
assert_eq!(minmax(vec![1, 2].into_iter()), Some((1, 2)));
assert_eq!(minmax(vec![2, 1].into_iter()), Some((1, 2)));
}

View File

@@ -7,7 +7,7 @@ set -ex
main() {
if [ ! -z $CODECOV ]; then
echo "Codecov"
cargo build --verbose && cargo coverage --verbose && bash <(curl -s https://codecov.io/bash) -s target/kcov
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov
else
echo "Build"
cross build --target $TARGET
@@ -15,7 +15,8 @@ main() {
return
fi
echo "Test"
cross test --target $TARGET --no-default-features --features mmap -- --test-threads 1
cross test --target $TARGET --no-default-features --features mmap
cross test --target $TARGET --no-default-features --features mmap query-grammar
fi
for example in $(ls examples/*.rs)
do

17
common/Cargo.toml Normal file
View File

@@ -0,0 +1,17 @@
[package]
name = "tantivy-common"
version = "0.3.0"
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
license = "MIT"
edition = "2021"
description = "common traits and utility functions used by multiple tantivy subcrates"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
byteorder = "1.4.3"
ownedbytes = { version="0.3", path="../ownedbytes" }
[dev-dependencies]
proptest = "1.0.0"
rand = "0.8.4"

745
common/src/bitset.rs Normal file
View File

@@ -0,0 +1,745 @@
use std::convert::TryInto;
use std::io::Write;
use std::{fmt, io, u64};
use ownedbytes::OwnedBytes;
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct TinySet(u64);
impl fmt::Debug for TinySet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.into_iter().collect::<Vec<u32>>().fmt(f)
}
}
pub struct TinySetIterator(TinySet);
impl Iterator for TinySetIterator {
type Item = u32;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.0.pop_lowest()
}
}
impl IntoIterator for TinySet {
type Item = u32;
type IntoIter = TinySetIterator;
fn into_iter(self) -> Self::IntoIter {
TinySetIterator(self)
}
}
impl TinySet {
pub fn serialize<T: Write>(&self, writer: &mut T) -> io::Result<()> {
writer.write_all(self.0.to_le_bytes().as_ref())
}
pub fn into_bytes(self) -> [u8; 8] {
self.0.to_le_bytes()
}
#[inline]
pub fn deserialize(data: [u8; 8]) -> Self {
let val: u64 = u64::from_le_bytes(data);
TinySet(val)
}
/// Returns an empty `TinySet`.
#[inline]
pub fn empty() -> TinySet {
TinySet(0u64)
}
/// Returns a full `TinySet`.
#[inline]
pub fn full() -> TinySet {
TinySet::empty().complement()
}
pub fn clear(&mut self) {
self.0 = 0u64;
}
/// Returns the complement of the set in `[0, 64[`.
///
/// Careful on making this function public, as it will break the padding handling in the last
/// bucket.
#[inline]
fn complement(self) -> TinySet {
TinySet(!self.0)
}
/// Returns true iff the `TinySet` contains the element `el`.
#[inline]
pub fn contains(self, el: u32) -> bool {
!self.intersect(TinySet::singleton(el)).is_empty()
}
/// Returns the number of elements in the TinySet.
#[inline]
pub fn len(self) -> u32 {
self.0.count_ones()
}
/// Returns the intersection of `self` and `other`
#[inline]
#[must_use]
pub fn intersect(self, other: TinySet) -> TinySet {
TinySet(self.0 & other.0)
}
/// Creates a new `TinySet` containing only one element
/// within `[0; 64[`
#[inline]
pub fn singleton(el: u32) -> TinySet {
TinySet(1u64 << u64::from(el))
}
/// Insert a new element within [0..64)
#[inline]
#[must_use]
pub fn insert(self, el: u32) -> TinySet {
self.union(TinySet::singleton(el))
}
/// Removes an element within [0..64)
#[inline]
#[must_use]
pub fn remove(self, el: u32) -> TinySet {
self.intersect(TinySet::singleton(el).complement())
}
/// Insert a new element within [0..64)
///
/// returns true if the set changed
#[inline]
pub fn insert_mut(&mut self, el: u32) -> bool {
let old = *self;
*self = old.insert(el);
old != *self
}
/// Remove a element within [0..64)
///
/// returns true if the set changed
#[inline]
pub fn remove_mut(&mut self, el: u32) -> bool {
let old = *self;
*self = old.remove(el);
old != *self
}
/// Returns the union of two tinysets
#[inline]
#[must_use]
pub fn union(self, other: TinySet) -> TinySet {
TinySet(self.0 | other.0)
}
/// Returns true iff the `TinySet` is empty.
#[inline]
pub fn is_empty(self) -> bool {
self.0 == 0u64
}
/// Returns the lowest element in the `TinySet`
/// and removes it.
#[inline]
pub fn pop_lowest(&mut self) -> Option<u32> {
if self.is_empty() {
None
} else {
let lowest = self.0.trailing_zeros() as u32;
self.0 ^= TinySet::singleton(lowest).0;
Some(lowest)
}
}
/// Returns a `TinySet` than contains all values up
/// to limit excluded.
///
/// The limit is assumed to be strictly lower than 64.
pub fn range_lower(upper_bound: u32) -> TinySet {
TinySet((1u64 << u64::from(upper_bound % 64u32)) - 1u64)
}
/// Returns a `TinySet` that contains all values greater
/// or equal to the given limit, included. (and up to 63)
///
/// The limit is assumed to be strictly lower than 64.
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
TinySet::range_lower(from_included).complement()
}
}
#[derive(Clone)]
pub struct BitSet {
tinysets: Box<[TinySet]>,
len: u64,
max_value: u32,
}
fn num_buckets(max_val: u32) -> u32 {
(max_val + 63u32) / 64u32
}
impl BitSet {
/// serialize a `BitSet`.
pub fn serialize<T: Write>(&self, writer: &mut T) -> io::Result<()> {
writer.write_all(self.max_value.to_le_bytes().as_ref())?;
for tinyset in self.tinysets.iter().cloned() {
writer.write_all(&tinyset.into_bytes())?;
}
writer.flush()?;
Ok(())
}
/// Create a new `BitSet` that may contain elements
/// within `[0, max_val)`.
pub fn with_max_value(max_value: u32) -> BitSet {
let num_buckets = num_buckets(max_value);
let tinybitsets = vec![TinySet::empty(); num_buckets as usize].into_boxed_slice();
BitSet {
tinysets: tinybitsets,
len: 0,
max_value,
}
}
/// Create a new `BitSet` that may contain elements. Initially all values will be set.
/// within `[0, max_val)`.
pub fn with_max_value_and_full(max_value: u32) -> BitSet {
let num_buckets = num_buckets(max_value);
let mut tinybitsets = vec![TinySet::full(); num_buckets as usize].into_boxed_slice();
// Fix padding
let lower = max_value % 64u32;
if lower != 0 {
tinybitsets[tinybitsets.len() - 1] = TinySet::range_lower(lower);
}
BitSet {
tinysets: tinybitsets,
len: max_value as u64,
max_value,
}
}
/// Removes all elements from the `BitSet`.
pub fn clear(&mut self) {
for tinyset in self.tinysets.iter_mut() {
*tinyset = TinySet::empty();
}
}
/// Intersect with serialized bitset
pub fn intersect_update(&mut self, other: &ReadOnlyBitSet) {
self.intersect_update_with_iter(other.iter_tinysets());
}
/// Intersect with tinysets
fn intersect_update_with_iter(&mut self, other: impl Iterator<Item = TinySet>) {
self.len = 0;
for (left, right) in self.tinysets.iter_mut().zip(other) {
*left = left.intersect(right);
self.len += left.len() as u64;
}
}
/// Returns the number of elements in the `BitSet`.
#[inline]
pub fn len(&self) -> usize {
self.len as usize
}
/// Inserts an element in the `BitSet`
#[inline]
pub fn insert(&mut self, el: u32) {
// we do not check saturated els.
let higher = el / 64u32;
let lower = el % 64u32;
self.len += if self.tinysets[higher as usize].insert_mut(lower) {
1
} else {
0
};
}
/// Inserts an element in the `BitSet`
#[inline]
pub fn remove(&mut self, el: u32) {
// we do not check saturated els.
let higher = el / 64u32;
let lower = el % 64u32;
self.len -= if self.tinysets[higher as usize].remove_mut(lower) {
1
} else {
0
};
}
/// Returns true iff the elements is in the `BitSet`.
#[inline]
pub fn contains(&self, el: u32) -> bool {
self.tinyset(el / 64u32).contains(el % 64)
}
/// Returns the first non-empty `TinySet` associated to a bucket lower
/// or greater than bucket.
///
/// Reminder: the tiny set with the bucket `bucket`, represents the
/// elements from `bucket * 64` to `(bucket+1) * 64`.
pub fn first_non_empty_bucket(&self, bucket: u32) -> Option<u32> {
self.tinysets[bucket as usize..]
.iter()
.cloned()
.position(|tinyset| !tinyset.is_empty())
.map(|delta_bucket| bucket + delta_bucket as u32)
}
#[inline]
pub fn max_value(&self) -> u32 {
self.max_value
}
/// Returns the tiny bitset representing the
/// the set restricted to the number range from
/// `bucket * 64` to `(bucket + 1) * 64`.
pub fn tinyset(&self, bucket: u32) -> TinySet {
self.tinysets[bucket as usize]
}
}
/// Serialized BitSet.
#[derive(Clone)]
pub struct ReadOnlyBitSet {
data: OwnedBytes,
max_value: u32,
}
pub fn intersect_bitsets(left: &ReadOnlyBitSet, other: &ReadOnlyBitSet) -> ReadOnlyBitSet {
assert_eq!(left.max_value(), other.max_value());
assert_eq!(left.data.len(), other.data.len());
let union_tinyset_it = left
.iter_tinysets()
.zip(other.iter_tinysets())
.map(|(left_tinyset, right_tinyset)| left_tinyset.intersect(right_tinyset));
let mut output_dataset: Vec<u8> = Vec::with_capacity(left.data.len());
for tinyset in union_tinyset_it {
output_dataset.extend_from_slice(&tinyset.into_bytes());
}
ReadOnlyBitSet {
data: OwnedBytes::new(output_dataset),
max_value: left.max_value(),
}
}
impl ReadOnlyBitSet {
pub fn open(data: OwnedBytes) -> Self {
let (max_value_data, data) = data.split(4);
assert_eq!(data.len() % 8, 0);
let max_value: u32 = u32::from_le_bytes(max_value_data.as_ref().try_into().unwrap());
ReadOnlyBitSet { data, max_value }
}
/// Number of elements in the bitset.
#[inline]
pub fn len(&self) -> usize {
self.iter_tinysets()
.map(|tinyset| tinyset.len() as usize)
.sum()
}
/// Iterate the tinyset on the fly from serialized data.
#[inline]
fn iter_tinysets(&self) -> impl Iterator<Item = TinySet> + '_ {
self.data.chunks_exact(8).map(move |chunk| {
let tinyset: TinySet = TinySet::deserialize(chunk.try_into().unwrap());
tinyset
})
}
/// Iterate over the positions of the elements.
#[inline]
pub fn iter(&self) -> impl Iterator<Item = u32> + '_ {
self.iter_tinysets()
.enumerate()
.flat_map(move |(chunk_num, tinyset)| {
let chunk_base_val = chunk_num as u32 * 64;
tinyset
.into_iter()
.map(move |val| val + chunk_base_val)
.take_while(move |doc| *doc < self.max_value)
})
}
/// Returns true iff the elements is in the `BitSet`.
#[inline]
pub fn contains(&self, el: u32) -> bool {
let byte_offset = el / 8u32;
let b: u8 = self.data[byte_offset as usize];
let shift = (el % 8) as u8;
b & (1u8 << shift) != 0
}
/// Maximum value the bitset may contain.
/// (Note this is not the maximum value contained in the set.)
///
/// A bitset has an intrinsic capacity.
/// It only stores elements within [0..max_value).
#[inline]
pub fn max_value(&self) -> u32 {
self.max_value
}
/// Number of bytes used in the bitset representation.
pub fn num_bytes(&self) -> usize {
self.data.len()
}
}
impl<'a> From<&'a BitSet> for ReadOnlyBitSet {
fn from(bitset: &'a BitSet) -> ReadOnlyBitSet {
let mut buffer = Vec::with_capacity(bitset.tinysets.len() * 8 + 4);
bitset
.serialize(&mut buffer)
.expect("serializing into a buffer should never fail");
ReadOnlyBitSet::open(OwnedBytes::new(buffer))
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use ownedbytes::OwnedBytes;
use rand::distributions::Bernoulli;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use super::{BitSet, ReadOnlyBitSet, TinySet};
#[test]
fn test_read_serialized_bitset_full_multi() {
for i in 0..1000 {
let bitset = BitSet::with_max_value_and_full(i);
let mut out = vec![];
bitset.serialize(&mut out).unwrap();
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
assert_eq!(bitset.len() as usize, i as usize);
}
}
#[test]
fn test_read_serialized_bitset_full_block() {
let bitset = BitSet::with_max_value_and_full(64);
let mut out = vec![];
bitset.serialize(&mut out).unwrap();
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
assert_eq!(bitset.len() as usize, 64);
}
#[test]
fn test_read_serialized_bitset_full() {
let mut bitset = BitSet::with_max_value_and_full(5);
bitset.remove(3);
let mut out = vec![];
bitset.serialize(&mut out).unwrap();
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
assert_eq!(bitset.len(), 4);
}
#[test]
fn test_bitset_intersect() {
let bitset_serialized = {
let mut bitset = BitSet::with_max_value_and_full(5);
bitset.remove(1);
bitset.remove(3);
let mut out = vec![];
bitset.serialize(&mut out).unwrap();
ReadOnlyBitSet::open(OwnedBytes::new(out))
};
let mut bitset = BitSet::with_max_value_and_full(5);
bitset.remove(1);
bitset.intersect_update(&bitset_serialized);
assert!(bitset.contains(0));
assert!(!bitset.contains(1));
assert!(bitset.contains(2));
assert!(!bitset.contains(3));
assert!(bitset.contains(4));
bitset.intersect_update_with_iter(vec![TinySet::singleton(0)].into_iter());
assert!(bitset.contains(0));
assert!(!bitset.contains(1));
assert!(!bitset.contains(2));
assert!(!bitset.contains(3));
assert!(!bitset.contains(4));
assert_eq!(bitset.len(), 1);
bitset.intersect_update_with_iter(vec![TinySet::singleton(1)].into_iter());
assert!(!bitset.contains(0));
assert!(!bitset.contains(1));
assert!(!bitset.contains(2));
assert!(!bitset.contains(3));
assert!(!bitset.contains(4));
assert_eq!(bitset.len(), 0);
}
#[test]
fn test_read_serialized_bitset_empty() {
let mut bitset = BitSet::with_max_value(5);
bitset.insert(3);
let mut out = vec![];
bitset.serialize(&mut out).unwrap();
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
assert_eq!(bitset.len(), 1);
{
let bitset = BitSet::with_max_value(5);
let mut out = vec![];
bitset.serialize(&mut out).unwrap();
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
assert_eq!(bitset.len(), 0);
}
}
#[test]
fn test_tiny_set_remove() {
{
let mut u = TinySet::empty().insert(63u32).insert(5).remove(63u32);
assert_eq!(u.pop_lowest(), Some(5u32));
assert!(u.pop_lowest().is_none());
}
{
let mut u = TinySet::empty()
.insert(63u32)
.insert(1)
.insert(5)
.remove(63u32);
assert_eq!(u.pop_lowest(), Some(1u32));
assert_eq!(u.pop_lowest(), Some(5u32));
assert!(u.pop_lowest().is_none());
}
{
let mut u = TinySet::empty().insert(1).remove(63u32);
assert_eq!(u.pop_lowest(), Some(1u32));
assert!(u.pop_lowest().is_none());
}
{
let mut u = TinySet::empty().insert(1).remove(1u32);
assert!(u.pop_lowest().is_none());
}
}
#[test]
fn test_tiny_set() {
assert!(TinySet::empty().is_empty());
{
let mut u = TinySet::empty().insert(1u32);
assert_eq!(u.pop_lowest(), Some(1u32));
assert!(u.pop_lowest().is_none())
}
{
let mut u = TinySet::empty().insert(1u32).insert(1u32);
assert_eq!(u.pop_lowest(), Some(1u32));
assert!(u.pop_lowest().is_none())
}
{
let mut u = TinySet::empty().insert(2u32);
assert_eq!(u.pop_lowest(), Some(2u32));
u.insert_mut(1u32);
assert_eq!(u.pop_lowest(), Some(1u32));
assert!(u.pop_lowest().is_none());
}
{
let mut u = TinySet::empty().insert(63u32);
assert_eq!(u.pop_lowest(), Some(63u32));
assert!(u.pop_lowest().is_none());
}
{
let mut u = TinySet::empty().insert(63u32).insert(5);
assert_eq!(u.pop_lowest(), Some(5u32));
assert_eq!(u.pop_lowest(), Some(63u32));
assert!(u.pop_lowest().is_none());
}
{
let original = TinySet::empty().insert(63u32).insert(5);
let after_serialize_deserialize = TinySet::deserialize(original.into_bytes());
assert_eq!(original, after_serialize_deserialize);
}
}
#[test]
fn test_bitset() {
let test_against_hashset = |els: &[u32], max_value: u32| {
let mut hashset: HashSet<u32> = HashSet::new();
let mut bitset = BitSet::with_max_value(max_value);
for &el in els {
assert!(el < max_value);
hashset.insert(el);
bitset.insert(el);
}
for el in 0..max_value {
assert_eq!(hashset.contains(&el), bitset.contains(el));
}
assert_eq!(bitset.max_value(), max_value);
// test deser
let mut data = vec![];
bitset.serialize(&mut data).unwrap();
let ro_bitset = ReadOnlyBitSet::open(OwnedBytes::new(data));
for el in 0..max_value {
assert_eq!(hashset.contains(&el), ro_bitset.contains(el));
}
assert_eq!(ro_bitset.max_value(), max_value);
assert_eq!(ro_bitset.len(), els.len());
};
test_against_hashset(&[], 0);
test_against_hashset(&[], 1);
test_against_hashset(&[0u32], 1);
test_against_hashset(&[0u32], 100);
test_against_hashset(&[1u32, 2u32], 4);
test_against_hashset(&[99u32], 100);
test_against_hashset(&[63u32], 64);
test_against_hashset(&[62u32, 63u32], 64);
}
#[test]
fn test_bitset_num_buckets() {
use super::num_buckets;
assert_eq!(num_buckets(0u32), 0);
assert_eq!(num_buckets(1u32), 1);
assert_eq!(num_buckets(64u32), 1);
assert_eq!(num_buckets(65u32), 2);
assert_eq!(num_buckets(128u32), 2);
assert_eq!(num_buckets(129u32), 3);
}
#[test]
fn test_tinyset_range() {
assert_eq!(
TinySet::range_lower(3).into_iter().collect::<Vec<u32>>(),
[0, 1, 2]
);
assert!(TinySet::range_lower(0).is_empty());
assert_eq!(
TinySet::range_lower(63).into_iter().collect::<Vec<u32>>(),
(0u32..63u32).collect::<Vec<_>>()
);
assert_eq!(
TinySet::range_lower(1).into_iter().collect::<Vec<u32>>(),
[0]
);
assert_eq!(
TinySet::range_lower(2).into_iter().collect::<Vec<u32>>(),
[0, 1]
);
assert_eq!(
TinySet::range_greater_or_equal(3)
.into_iter()
.collect::<Vec<u32>>(),
(3u32..64u32).collect::<Vec<_>>()
);
}
#[test]
fn test_bitset_len() {
let mut bitset = BitSet::with_max_value(1_000);
assert_eq!(bitset.len(), 0);
bitset.insert(3u32);
assert_eq!(bitset.len(), 1);
bitset.insert(103u32);
assert_eq!(bitset.len(), 2);
bitset.insert(3u32);
assert_eq!(bitset.len(), 2);
bitset.insert(103u32);
assert_eq!(bitset.len(), 2);
bitset.insert(104u32);
assert_eq!(bitset.len(), 3);
bitset.remove(105u32);
assert_eq!(bitset.len(), 3);
bitset.remove(104u32);
assert_eq!(bitset.len(), 2);
bitset.remove(3u32);
assert_eq!(bitset.len(), 1);
bitset.remove(103u32);
assert_eq!(bitset.len(), 0);
}
pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> {
StdRng::from_seed([seed_val; 32])
.sample_iter(&Bernoulli::new(ratio).unwrap())
.take(n as usize)
.enumerate()
.filter_map(|(val, keep)| if keep { Some(val as u32) } else { None })
.collect()
}
pub fn sample(n: u32, ratio: f64) -> Vec<u32> {
sample_with_seed(n, ratio, 4)
}
#[test]
fn test_bitset_clear() {
let mut bitset = BitSet::with_max_value(1_000);
let els = sample(1_000, 0.01f64);
for &el in &els {
bitset.insert(el);
}
assert!(els.iter().all(|el| bitset.contains(*el)));
bitset.clear();
for el in 0u32..1000u32 {
assert!(!bitset.contains(el));
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use test;
use super::{BitSet, TinySet};
#[bench]
fn bench_tinyset_pop(b: &mut test::Bencher) {
b.iter(|| {
let mut tinyset = TinySet::singleton(test::black_box(31u32));
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
tinyset.pop_lowest();
});
}
#[bench]
fn bench_tinyset_sum(b: &mut test::Bencher) {
let tiny_set = TinySet::empty().insert(10u32).insert(14u32).insert(21u32);
b.iter(|| {
assert_eq!(test::black_box(tiny_set).into_iter().sum::<u32>(), 45u32);
});
}
#[bench]
fn bench_tinyarr_sum(b: &mut test::Bencher) {
let v = [10u32, 14u32, 21u32];
b.iter(|| test::black_box(v).iter().cloned().sum::<u32>());
}
#[bench]
fn bench_bitset_initialize(b: &mut test::Bencher) {
b.iter(|| BitSet::with_max_value(1_000_000));
}
}

168
common/src/lib.rs Normal file
View File

@@ -0,0 +1,168 @@
#![allow(clippy::len_without_is_empty)]
use std::ops::Deref;
pub use byteorder::LittleEndian as Endianness;
mod bitset;
mod serialize;
mod vint;
mod writer;
pub use bitset::*;
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
pub use vint::{read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt};
pub use writer::{AntiCallToken, CountingWriter, TerminatingWrite};
/// Has length trait
pub trait HasLen {
/// Return length
fn len(&self) -> usize;
/// Returns true iff empty.
fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<T: Deref<Target = [u8]>> HasLen for T {
fn len(&self) -> usize {
self.deref().len()
}
}
const HIGHEST_BIT: u64 = 1 << 63;
/// Maps a `i64` to `u64`
///
/// For simplicity, tantivy internally handles `i64` as `u64`.
/// The mapping is defined by this function.
///
/// Maps `i64` to `u64` so that
/// `-2^63 .. 2^63-1` is mapped
/// to
/// `0 .. 2^64-1`
/// in that order.
///
/// This is more suited than simply casting (`val as u64`)
/// because of bitpacking.
///
/// Imagine a list of `i64` ranging from -10 to 10.
/// When casting negative values, the negative values are projected
/// to values over 2^63, and all values end up requiring 64 bits.
///
/// # See also
/// The [reverse mapping is `u64_to_i64`](./fn.u64_to_i64.html).
#[inline]
pub fn i64_to_u64(val: i64) -> u64 {
(val as u64) ^ HIGHEST_BIT
}
/// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html).
#[inline]
pub fn u64_to_i64(val: u64) -> i64 {
(val ^ HIGHEST_BIT) as i64
}
/// Maps a `f64` to `u64`
///
/// For simplicity, tantivy internally handles `f64` as `u64`.
/// The mapping is defined by this function.
///
/// Maps `f64` to `u64` in a monotonic manner, so that bytes lexical order is preserved.
///
/// This is more suited than simply casting (`val as u64`)
/// which would truncate the result
///
/// # Reference
///
/// Daniel Lemire's [blog post](https://lemire.me/blog/2020/12/14/converting-floating-point-numbers-to-integers-while-preserving-order/)
/// explains the mapping in a clear manner.
///
/// # See also
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
#[inline]
pub fn f64_to_u64(val: f64) -> u64 {
let bits = val.to_bits();
if val.is_sign_positive() {
bits ^ HIGHEST_BIT
} else {
!bits
}
}
/// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html).
#[inline]
pub fn u64_to_f64(val: u64) -> f64 {
f64::from_bits(if val & HIGHEST_BIT != 0 {
val ^ HIGHEST_BIT
} else {
!val
})
}
#[cfg(test)]
pub mod test {
use proptest::prelude::*;
use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64, BinarySerializable, FixedSize};
fn test_i64_converter_helper(val: i64) {
assert_eq!(u64_to_i64(i64_to_u64(val)), val);
}
fn test_f64_converter_helper(val: f64) {
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
}
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();
O::default().serialize(&mut buffer).unwrap();
assert_eq!(buffer.len(), O::SIZE_IN_BYTES);
}
proptest! {
#[test]
fn test_f64_converter_monotonicity_proptest((left, right) in (proptest::num::f64::NORMAL, proptest::num::f64::NORMAL)) {
let left_u64 = f64_to_u64(left);
let right_u64 = f64_to_u64(right);
assert_eq!(left_u64 < right_u64, left < right);
}
}
#[test]
fn test_i64_converter() {
assert_eq!(i64_to_u64(i64::MIN), u64::MIN);
assert_eq!(i64_to_u64(i64::MAX), u64::MAX);
test_i64_converter_helper(0i64);
test_i64_converter_helper(i64::MIN);
test_i64_converter_helper(i64::MAX);
for i in -1000i64..1000i64 {
test_i64_converter_helper(i);
}
}
#[test]
fn test_f64_converter() {
test_f64_converter_helper(f64::INFINITY);
test_f64_converter_helper(f64::NEG_INFINITY);
test_f64_converter_helper(0.0);
test_f64_converter_helper(-0.0);
test_f64_converter_helper(1.0);
test_f64_converter_helper(-1.0);
}
#[test]
fn test_f64_order() {
assert!(!(f64_to_u64(f64::NEG_INFINITY)..f64_to_u64(f64::INFINITY))
.contains(&f64_to_u64(f64::NAN))); // nan is not a number
assert!(f64_to_u64(1.5) > f64_to_u64(1.0)); // same exponent, different mantissa
assert!(f64_to_u64(2.0) > f64_to_u64(1.0)); // same mantissa, different exponent
assert!(f64_to_u64(2.0) > f64_to_u64(1.5)); // different exponent and mantissa
assert!(f64_to_u64(1.0) > f64_to_u64(-1.0)); // pos > neg
assert!(f64_to_u64(-1.5) < f64_to_u64(-1.0));
assert!(f64_to_u64(-2.0) < f64_to_u64(1.0));
assert!(f64_to_u64(-2.0) < f64_to_u64(-1.5));
}
}

View File

@@ -1,10 +1,9 @@
use crate::common::Endianness;
use crate::common::VInt;
use std::io::{Read, Write};
use std::{fmt, io};
use byteorder::{ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::io;
use std::io::Read;
use std::io::Write;
use crate::{Endianness, VInt};
/// Trait for a simple binary serialization.
pub trait BinarySerializable: fmt::Debug + Sized {
@@ -14,6 +13,20 @@ pub trait BinarySerializable: fmt::Debug + Sized {
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self>;
}
pub trait DeserializeFrom<T: BinarySerializable> {
fn deserialize(&mut self) -> io::Result<T>;
}
/// Implement deserialize from &[u8] for all types which implement BinarySerializable.
///
/// TryFrom would actually be preferable, but not possible because of the orphan
/// rules (not completely sure if this could be resolved)
impl<T: BinarySerializable> DeserializeFrom<T> for &[u8] {
fn deserialize(&mut self) -> io::Result<T> {
T::deserialize(self)
}
}
/// `FixedSize` marks a `BinarySerializable` as
/// always serializing to the same size.
pub trait FixedSize: BinarySerializable {
@@ -61,6 +74,11 @@ impl<Left: BinarySerializable, Right: BinarySerializable> BinarySerializable for
Ok((Left::deserialize(reader)?, Right::deserialize(reader)?))
}
}
impl<Left: BinarySerializable + FixedSize, Right: BinarySerializable + FixedSize> FixedSize
for (Left, Right)
{
const SIZE_IN_BYTES: usize = Left::SIZE_IN_BYTES + Right::SIZE_IN_BYTES;
}
impl BinarySerializable for u32 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
@@ -89,6 +107,19 @@ impl FixedSize for u64 {
const SIZE_IN_BYTES: usize = 8;
}
impl BinarySerializable for f32 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_f32::<Endianness>(*self)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
reader.read_f32::<Endianness>()
}
}
impl FixedSize for f32 {
const SIZE_IN_BYTES: usize = 4;
}
impl BinarySerializable for i64 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_i64::<Endianness>(*self)
@@ -128,6 +159,28 @@ impl FixedSize for u8 {
const SIZE_IN_BYTES: usize = 1;
}
impl BinarySerializable for bool {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
let val = if *self { 1 } else { 0 };
writer.write_u8(val)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
let val = reader.read_u8()?;
match val {
0 => Ok(false),
1 => Ok(true),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
"invalid bool value on deserialization, data corrupted",
)),
}
}
}
impl FixedSize for bool {
const SIZE_IN_BYTES: usize = 1;
}
impl BinarySerializable for String {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
let data: &[u8] = self.as_bytes();
@@ -148,9 +201,8 @@ impl BinarySerializable for String {
#[cfg(test)]
pub mod test {
use super::*;
use crate::common::VInt;
use super::{VInt, *};
use crate::serialize::BinarySerializable;
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();
O::default().serialize(&mut buffer).unwrap();
@@ -177,7 +229,7 @@ pub mod test {
fixed_size_test::<u32>();
assert_eq!(4, serialize_test(3u32));
assert_eq!(4, serialize_test(5u32));
assert_eq!(4, serialize_test(u32::max_value()));
assert_eq!(4, serialize_test(u32::MAX));
}
#[test]
@@ -195,14 +247,16 @@ pub mod test {
fixed_size_test::<u64>();
}
#[test]
fn test_serialize_bool() {
fixed_size_test::<bool>();
}
#[test]
fn test_serialize_string() {
assert_eq!(serialize_test(String::from("")), 1);
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
assert_eq!(
serialize_test(String::from("富士さん見える。")),
1 + 3 * 8
);
assert_eq!(serialize_test(String::from("富士さん見える。")), 1 + 3 * 8);
}
#[test]
@@ -223,6 +277,6 @@ pub mod test {
assert_eq!(serialize_test(VInt(1234u64)), 2);
assert_eq!(serialize_test(VInt(16_383u64)), 2);
assert_eq!(serialize_test(VInt(16_384u64)), 3);
assert_eq!(serialize_test(VInt(u64::max_value())), 10);
assert_eq!(serialize_test(VInt(u64::MAX)), 10);
}
}

View File

@@ -1,16 +1,17 @@
use super::BinarySerializable;
use byteorder::{ByteOrder, LittleEndian};
use std::io;
use std::io::Read;
use std::io::Write;
use std::io::{Read, Write};
use byteorder::{ByteOrder, LittleEndian};
use super::BinarySerializable;
/// Wrapper over a `u64` that serializes as a variable int.
#[derive(Debug, Eq, PartialEq)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct VInt(pub u64);
const STOP_BIT: u8 = 128;
pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
const START_2: u64 = 1 << 7;
const START_3: u64 = 1 << 14;
const START_4: u64 = 1 << 21;
@@ -29,7 +30,7 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
let val = u64::from(val);
const STOP_BIT: u64 = 128u64;
match val {
let (res, num_bytes) = match val {
0..=STOP_1 => (val | STOP_BIT, 1),
START_2..=STOP_2 => (
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
@@ -56,7 +57,9 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
| (STOP_BIT << (8 * 4)),
5,
),
}
};
LittleEndian::write_u64(&mut buf[..], res);
&buf[0..num_bytes]
}
/// Returns the number of bytes covered by a
@@ -85,23 +88,26 @@ fn vint_len(data: &[u8]) -> usize {
/// If the buffer does not start by a valid
/// vint payload
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
let vlen = vint_len(*data);
let (result, vlen) = read_u32_vint_no_advance(*data);
*data = &data[vlen..];
result
}
pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
let vlen = vint_len(data);
let mut result = 0u32;
let mut shift = 0u64;
for &b in &data[..vlen] {
result |= u32::from(b & 127u8) << shift;
shift += 7;
}
*data = &data[vlen..];
result
(result, vlen)
}
/// Write a `u32` as a vint payload.
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
let (val, num_bytes) = serialize_vint_u32(val);
let mut buffer = [0u8; 8];
LittleEndian::write_u64(&mut buffer, val);
writer.write_all(&buffer[..num_bytes])
let mut buf = [0u8; 8];
let data = serialize_vint_u32(val, &mut buf);
writer.write_all(data)
}
impl VInt {
@@ -169,16 +175,13 @@ impl BinarySerializable for VInt {
#[cfg(test)]
mod tests {
use super::serialize_vint_u32;
use super::VInt;
use crate::common::BinarySerializable;
use byteorder::{ByteOrder, LittleEndian};
use super::{serialize_vint_u32, BinarySerializable, VInt};
fn aux_test_vint(val: u64) {
let mut v = [14u8; 10];
let num_bytes = VInt(val).serialize_into(&mut v);
for i in num_bytes..10 {
assert_eq!(v[i], 14u8);
for el in &v[num_bytes..10] {
assert_eq!(el, &14u8);
}
assert!(num_bytes > 0);
if num_bytes < 10 {
@@ -196,7 +199,7 @@ mod tests {
aux_test_vint(0);
aux_test_vint(1);
aux_test_vint(5);
aux_test_vint(u64::max_value());
aux_test_vint(u64::MAX);
for i in 1..9 {
let power_of_128 = 1u64 << (7 * i);
aux_test_vint(power_of_128 - 1u64);
@@ -208,12 +211,10 @@ mod tests {
fn aux_test_serialize_vint_u32(val: u32) {
let mut buffer = [0u8; 10];
let mut buffer2 = [0u8; 10];
let mut buffer2 = [0u8; 8];
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
let (vint, len) = serialize_vint_u32(val);
assert_eq!(len, len_vint, "len wrong for val {}", val);
LittleEndian::write_u64(&mut buffer2, vint);
assert_eq!(&buffer[..len], &buffer2[..len], "array wrong for {}", val);
let res2 = serialize_vint_u32(val, &mut buffer2);
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
}
#[test]
@@ -227,6 +228,6 @@ mod tests {
aux_test_serialize_vint_u32(power_of_128);
aux_test_serialize_vint_u32(power_of_128 + 1u32);
}
aux_test_serialize_vint_u32(u32::max_value());
aux_test_serialize_vint_u32(u32::MAX);
}
}

114
common/src/writer.rs Normal file
View File

@@ -0,0 +1,114 @@
use std::io::{self, BufWriter, Write};
pub struct CountingWriter<W> {
underlying: W,
written_bytes: u64,
}
impl<W: Write> CountingWriter<W> {
pub fn wrap(underlying: W) -> CountingWriter<W> {
CountingWriter {
underlying,
written_bytes: 0,
}
}
#[inline]
pub fn written_bytes(&self) -> u64 {
self.written_bytes
}
/// Returns the underlying write object.
/// Note that this method does not trigger any flushing.
#[inline]
pub fn finish(self) -> W {
self.underlying
}
}
impl<W: Write> Write for CountingWriter<W> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let written_size = self.underlying.write(buf)?;
self.written_bytes += written_size as u64;
Ok(written_size)
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.underlying.write_all(buf)?;
self.written_bytes += buf.len() as u64;
Ok(())
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.underlying.flush()
}
}
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
#[inline]
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.underlying.terminate_ref(token)
}
}
/// Struct used to prevent from calling
/// [`terminate_ref`](trait.TerminatingWrite.html#tymethod.terminate_ref) directly
///
/// The point is that while the type is public, it cannot be built by anyone
/// outside of this module.
pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer
pub trait TerminatingWrite: Write + Send {
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()>
where Self: Sized {
self.terminate_ref(AntiCallToken(()))
}
/// You should implement this function to define custom behavior.
/// This function should flush any buffer it may hold.
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
}
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.as_mut().terminate_ref(token)
}
}
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.get_mut().terminate_ref(a)
}
}
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
#[cfg(test)]
mod test {
use std::io::Write;
use super::CountingWriter;
#[test]
fn test_counting_writer() {
let buffer: Vec<u8> = vec![];
let mut counting_writer = CountingWriter::wrap(buffer);
let bytes = (0u8..10u8).collect::<Vec<u8>>();
counting_writer.write_all(&bytes).unwrap();
let len = counting_writer.written_bytes();
let buffer_restituted: Vec<u8> = counting_writer.finish();
assert_eq!(len, 10u64);
assert_eq!(buffer_restituted.len(), 10);
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

View File

@@ -0,0 +1,8 @@
<svg width="518" height="112" viewBox="0 0 518 112" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M56 112C86.9279 112 112 86.9279 112 56C112 25.0721 86.9279 0 56 0C25.0721 0 0 25.0721 0 56C0 86.9279 25.0721 112 56 112Z" fill="#0DBD8B"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M45.7615 26.093C45.7615 23.8325 47.5977 22.0001 49.8629 22.0001C65.2154 22.0001 77.6611 34.4199 77.6611 49.7406C77.6611 52.001 75.8248 53.8335 73.5597 53.8335C71.2945 53.8335 69.4583 52.001 69.4583 49.7406C69.4583 38.9408 60.6851 30.1859 49.8629 30.1859C47.5977 30.1859 45.7615 28.3534 45.7615 26.093Z" fill="white"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M85.8986 45.6477C88.1637 45.6477 89.9999 47.4801 89.9999 49.7406C89.9999 65.0612 77.5543 77.4811 62.2017 77.4811C59.9366 77.4811 58.1003 75.6486 58.1003 73.3882C58.1003 71.1277 59.9366 69.2953 62.2017 69.2953C73.024 69.2953 81.7972 60.5403 81.7972 49.7406C81.7972 47.4801 83.6334 45.6477 85.8986 45.6477Z" fill="white"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M66.3031 85.907C66.3031 88.1675 64.4668 89.9999 62.2017 89.9999C46.8492 89.9999 34.4035 77.58 34.4035 62.2594C34.4035 59.9989 36.2398 58.1665 38.5049 58.1665C40.77 58.1665 42.6063 59.9989 42.6063 62.2594C42.6063 73.0592 51.3795 81.8141 62.2017 81.8141C64.4668 81.8141 66.3031 83.6466 66.3031 85.907Z" fill="white"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M26.1014 66.3523C23.8363 66.3523 22.0001 64.5199 22.0001 62.2594C22 46.9388 34.4457 34.5189 49.7983 34.5189C52.0634 34.5189 53.8997 36.3514 53.8997 38.6118C53.8997 40.8723 52.0634 42.7047 49.7983 42.7047C38.976 42.7047 30.2028 51.4597 30.2028 62.2594C30.2028 64.5199 28.3666 66.3523 26.1014 66.3523Z" fill="white"/>
<path d="M197 63.5H157.5C157.967 67.6333 159.467 70.9333 162 73.4C164.533 75.8 167.867 77 172 77C174.733 77 177.2 76.3333 179.4 75C181.6 73.6667 183.167 71.8667 184.1 69.6H196.1C194.5 74.8667 191.5 79.1333 187.1 82.4C182.767 85.6 177.633 87.2 171.7 87.2C163.967 87.2 157.7 84.6333 152.9 79.5C148.167 74.3667 145.8 67.8667 145.8 60C145.8 52.3333 148.2 45.9 153 40.7C157.8 35.5 164 32.9 171.6 32.9C179.2 32.9 185.333 35.4667 190 40.6C194.733 45.6667 197.1 52.0667 197.1 59.8L197 63.5ZM171.6 42.6C167.867 42.6 164.767 43.7 162.3 45.9C159.833 48.1 158.3 51.0333 157.7 54.7H185.3C184.767 51.0333 183.3 48.1 180.9 45.9C178.5 43.7 175.4 42.6 171.6 42.6ZM205.289 70.5V11H217.189V70.7C217.189 73.3667 218.656 74.7 221.589 74.7L223.689 74.6V85.9C222.556 86.1 221.356 86.2 220.089 86.2C214.956 86.2 211.189 84.9 208.789 82.3C206.456 79.7 205.289 75.7667 205.289 70.5ZM279.109 63.5H239.609C240.076 67.6333 241.576 70.9333 244.109 73.4C246.643 75.8 249.976 77 254.109 77C256.843 77 259.309 76.3333 261.509 75C263.709 73.6667 265.276 71.8667 266.209 69.6H278.209C276.609 74.8667 273.609 79.1333 269.209 82.4C264.876 85.6 259.743 87.2 253.809 87.2C246.076 87.2 239.809 84.6333 235.009 79.5C230.276 74.3667 227.909 67.8667 227.909 60C227.909 52.3333 230.309 45.9 235.109 40.7C239.909 35.5 246.109 32.9 253.709 32.9C261.309 32.9 267.443 35.4667 272.109 40.6C276.843 45.6667 279.209 52.0667 279.209 59.8L279.109 63.5ZM253.709 42.6C249.976 42.6 246.876 43.7 244.409 45.9C241.943 48.1 240.409 51.0333 239.809 54.7H267.409C266.876 51.0333 265.409 48.1 263.009 45.9C260.609 43.7 257.509 42.6 253.709 42.6ZM332.798 56.2V86H320.898V54.9C320.898 47.0333 317.632 43.1 311.098 43.1C307.565 43.1 304.732 44.2333 302.598 46.5C300.532 48.7667 299.498 51.8667 299.498 55.8V86H287.598V34.1H298.598V41C299.865 38.6667 301.798 36.7333 304.398 35.2C306.998 33.6667 310.232 32.9 314.098 32.9C321.298 32.9 326.498 35.6333 329.698 41.1C334.098 35.6333 339.965 32.9 347.298 32.9C353.365 32.9 358.032 34.8 361.298 38.6C364.565 42.3333 366.198 47.2667 366.198 53.4V86H354.298V54.9C354.298 47.0333 351.032 43.1 344.498 43.1C340.898 43.1 338.032 44.2667 335.898 46.6C333.832 48.8667 332.798 52.0667 332.798 56.2ZM425.379 63.5H385.879C386.346 67.6333 387.846 70.9333 390.379 73.4C392.912 75.8 396.246 77 400.379 77C403.112 77 405.579 76.3333 407.779 75C409.979 73.6667 411.546 71.8667 412.479 69.6H424.479C422.879 74.8667 419.879 79.1333 415.479 82.4C411.146 85.6 406.012 87.2 400.079 87.2C392.346 87.2 386.079 84.6333 381.279 79.5C376.546 74.3667 374.179 67.8667 374.179 60C374.179 52.3333 376.579 45.9 381.379 40.7C386.179 35.5 392.379 32.9 399.979 32.9C407.579 32.9 413.712 35.4667 418.379 40.6C423.112 45.6667 425.479 52.0667 425.479 59.8L425.379 63.5ZM399.979 42.6C396.246 42.6 393.146 43.7 390.679 45.9C388.212 48.1 386.679 51.0333 386.079 54.7H413.679C413.146 51.0333 411.679 48.1 409.279 45.9C406.879 43.7 403.779 42.6 399.979 42.6ZM444.868 34.1V41C446.068 38.7333 448.035 36.8333 450.768 35.3C453.568 33.7 456.935 32.9 460.868 32.9C467.001 32.9 471.735 34.7667 475.068 38.5C478.468 42.2333 480.168 47.2 480.168 53.4V86H468.268V54.9C468.268 51.2333 467.401 48.3667 465.668 46.3C464.001 44.1667 461.435 43.1 457.968 43.1C454.168 43.1 451.168 44.2333 448.968 46.5C446.835 48.7667 445.768 51.9 445.768 55.9V86H433.868V34.1H444.868ZM514.922 75.4V85.7C513.455 86.1 511.389 86.3 508.722 86.3C498.589 86.3 493.522 81.2 493.522 71V43.6H485.622V34.1H493.522V20.6H505.422V34.1H515.122V43.6H505.422V69.8C505.422 73.8667 507.355 75.9 511.222 75.9L514.922 75.4Z" fill="black"/>
</svg>

After

Width:  |  Height:  |  Size: 5.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 653 KiB

View File

@@ -1,12 +1,11 @@
# Summary
[Avant Propos](./avant-propos.md)
- [Segments](./basis.md)
- [Defining your schema](./schema.md)
- [Facetting](./facetting.md)
- [Index Sorting](./index_sorting.md)
- [Innerworkings](./innerworkings.md)
- [Inverted index](./inverted_index.md)
- [Best practise](./inverted_index.md)

View File

@@ -3,7 +3,7 @@
> Tantivy is a **search** engine **library** for Rust.
If you are familiar with Lucene, it's an excellent approximation to consider tantivy as Lucene for rust. tantivy is heavily inspired by Lucene's design and
they both have the same scope and targetted use cases.
they both have the same scope and targeted use cases.
If you are not familiar with Lucene, let's break down our little tagline.
@@ -31,4 +31,4 @@ relevancy, collapsing, highlighting, spatial search.
index from a different format.
Tantivy exposes a lot of low level API to do all of these things.

View File

@@ -11,7 +11,7 @@ directory shipped with tantivy is the `MmapDirectory`.
While this design has some downsides, this greatly simplifies the source code of
tantivy. Caching is also entirely delegated to the OS.
`tantivy` works entirely (or almost) by directly reading the datastructures as they are layed on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds.
`tantivy` works entirely (or almost) by directly reading the datastructures as they are laid on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds.
This is an interesting property for a command line search engine, or for some multi-tenant log search engine : spawning a new process for each new query can be a perfectly sensible solution in some use case.
@@ -22,7 +22,6 @@ Of course this is crucial to reduce IO, and ensure that as much of our index can
Also, whenever possible its data is accessed sequentially. Of course, this is an amazing property when tantivy needs to access the data from your spinning hard disk, but this is also
critical for performance, if your data is read from and an `SSD` or even already in your pagecache.
## Segments, and the log method
That kind of compact layout comes at one cost: it prevents our datastructures from being dynamic.
@@ -53,11 +52,7 @@ to get tantivy to fit your use case:
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`.
# Merging
## Merging
As you index more and more data, your index will accumulate more and more segments.
Having a lot of small segments is not really optimal. There is a bit of redundancy in having
@@ -66,11 +61,7 @@ all these term dictionary. Also when searching, we will need to do term lookups
That's where merging or compacting comes into place. Tantivy will continuously consider merge
opportunities and start merging segments in the background.
# Indexing throughput, number of indexing threads
## Indexing throughput, number of indexing threads
[^1]: This may eventually change.

View File

@@ -1,3 +1,3 @@
# Examples
- [Basic search](/examples/basic_search.html)
- [Basic search](/examples/basic_search.html)

62
doc/src/index_sorting.md Normal file
View File

@@ -0,0 +1,62 @@
- [Index Sorting](#index-sorting)
- [Why Sorting](#why-sorting)
- [Compression](#compression)
- [Top-N Optimization](#top-n-optimization)
- [Pruning](#pruning)
- [Other](#other)
- [Usage](#usage)
# Index Sorting
Tantivy allows you to sort the index according to a property.
## Why Sorting
Presorting an index has several advantages:
### Compression
When data is sorted it is easier to compress the data. E.g. the numbers sequence [5, 2, 3, 1, 4] would be sorted to [1, 2, 3, 4, 5].
If we apply delta encoding this list would be unsorted [5, -3, 1, -2, 3] vs. [1, 1, 1, 1, 1].
Compression ratio is mainly affected on the fast field of the sorted property, every thing else is likely unaffected.
### Top-N Optimization
When data is presorted by a field and search queries request sorting by the same field, we can leverage the natural order of the documents.
E.g. if the data is sorted by timestamp and want the top n newest docs containing a term, we can simply leveraging the order of the docids.
Note: Tantivy 0.16 does not do this optimization yet.
### Pruning
Let's say we want all documents and want to apply the filter `>= 2010-08-11`. When the data is sorted, we could make a lookup in the fast field to find the docid range and use this as the filter.
Note: Tantivy 0.16 does not do this optimization yet.
### Other?
In principle there are many algorithms possible that exploit the monotonically increasing nature. (aggregations maybe?)
## Usage
The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of Tantivy 0.16 only fast fields are allowed to be used.
```rust
let settings = IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
}),
..Default::default()
};
let mut index_builder = Index::builder().schema(schema);
index_builder = index_builder.settings(settings);
let index = index_builder.create_in_ram().unwrap();
```
## Implementation details
Sorting an index is applied in the serialization step. In general there are two serialization steps: [Finishing a single segment](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/segment_writer.rs#L338) and [merging multiple segments](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/merger.rs#L1073).
In both cases we generate a docid mapping reflecting the sort. This mapping is used when serializing the different components (doc store, fastfields, posting list, normfield, facets).

130
doc/src/json.md Normal file
View File

@@ -0,0 +1,130 @@
# Json
As of tantivy 0.17, tantivy supports a json object type.
This type can be used to allow for a schema-less search index.
When indexing a json object, we "flatten" the JSON. This operation emits terms that represent a triplet `(json_path, value_type, value)`
For instance, if user is a json field, the following document:
```json
{
"user": {
"name": "Paul Masurel",
"address": {
"city": "Tokyo",
"country": "Japan"
},
"created_at": "2018-11-12T23:20:50.52Z"
}
}
```
emits the following tokens:
- ("name", Text, "Paul")
- ("name", Text, "Masurel")
- ("address.city", Text, "Tokyo")
- ("address.country", Text, "Japan")
- ("created_at", Date, 15420648505)
## Bytes-encoding and lexicographical sort
Like any other terms, these triplets are encoded into a binary format as follows.
- `json_path`: the json path is a sequence of "segments". In the example above, `address.city`
is just a debug representation of the json path `["address", "city"]`.
Its representation is done by separating segments by a unicode char `\x01`, and ending the path by `\x00`.
- `value type`: One byte represents the `Value` type.
- `value`: The value representation is just the regular Value representation.
This representation is designed to align the natural sort of Terms with the lexicographical sort
of their binary representation (Tantivy's dictionary (whether fst or sstable) is sorted and does prefix encoding).
In the example above, the terms will be sorted as
- ("address.city", Text, "Tokyo")
- ("address.country", Text, "Japan")
- ("name", Text, "Masurel")
- ("name", Text, "Paul")
- ("created_at", Date, 15420648505)
As seen in "pitfalls", we may end up having to search for a value for a same path in several different fields. Putting the field code after the path makes it maximizes compression opportunities but also increases the chances for the two terms to end up in the actual same term dictionary block.
## Pitfalls, limitation and corner cases
Json gives very little information about the type of the literals it stores.
All numeric types end up mapped as a "Number" and there are no types for dates.
At indexing, tantivy will try to interpret number and strings as different type with a
priority order.
Numbers will be interpreted as u64, i64 and f64 in that order.
Strings will be interpreted as rfc3999 dates or simple strings.
The first working type is picked and is the only term that is emitted for indexing.
Note this interpretation happens on a per-document basis, and there is no effort to try to sniff
a consistent field type at the scale of a segment.
On the query parser side on the other hand, we may end up emitting more than one type.
For instance, we do not even know if the type is a number or string based.
So the query
```rust
my_path.my_segment:233
```
Will be interpreted as
```rust
(my_path.my_segment, String, 233) or (my_path.my_segment, u64, 233)
```
Likewise, we need to emit two tokens if the query contains an rfc3999 date.
Indeed the date could have been actually a single token inside the text of a document at ingestion time. Generally speaking, we will always at least emit a string token in query parsing, and sometimes more.
If one more json field is defined, things get even more complicated.
## Default json field
If the schema contains a text field called "text" and a json field that is set as a default field:
`text:hello` could be reasonably interpreted as targeting the text field or as targeting the json field called `json_dynamic` with the json_path "text".
If there is such an ambiguity, we decide to only search in the "text" field: `text:hello`.
In other words, the parser will not search in default json fields if there is a schema hit.
This is a product decision.
The user can still target the JSON field by specifying its name explicitly:
`json_dynamic.text:hello`.
## Range queries are not supported
Json field do not support range queries.
## Arrays do not work like nested object
If json object contains an array, a search query might return more documents
than what might be expected.
Let's take an example.
```json
{
"cart_id": 3234234 ,
"cart": [
{"product_type": "sneakers", "attributes": {"color": "white"} },
{"product_type": "t-shirt", "attributes": {"color": "red"}},
]
}
```
Despite the array structure, a document in tantivy is a bag of terms.
The query:
```rust
cart.product_type:sneakers AND cart.attributes.color:red
```
Actually match the document above.

130
examples/aggregation.rs Normal file
View File

@@ -0,0 +1,130 @@
// # Aggregation example
//
// This example shows how you can use built-in aggregations.
// We will use range buckets and compute the average in each bucket.
//
use serde_json::Value;
use tantivy::aggregation::agg_req::{
Aggregation, Aggregations, BucketAggregation, BucketAggregationType, MetricAggregation,
RangeAggregation,
};
use tantivy::aggregation::agg_result::AggregationResults;
use tantivy::aggregation::metric::AverageAggregation;
use tantivy::aggregation::AggregationCollector;
use tantivy::query::TermQuery;
use tantivy::schema::{self, Cardinality, IndexRecordOption, Schema, TextFieldIndexing};
use tantivy::{doc, Index, Term};
fn main() -> tantivy::Result<()> {
let mut schema_builder = Schema::builder();
let text_fieldtype = schema::TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
)
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype);
let score_fieldtype =
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
let highscore_field = schema_builder.add_f64_field("highscore", score_fieldtype.clone());
let price_field = schema_builder.add_f64_field("price", score_fieldtype.clone());
let schema = schema_builder.build();
// # Indexing documents
//
// Lets index a bunch of documents for this example.
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer(50_000_000)?;
// writing the segment
index_writer.add_document(doc!(
text_field => "cool",
highscore_field => 1f64,
price_field => 0f64,
))?;
index_writer.add_document(doc!(
text_field => "cool",
highscore_field => 3f64,
price_field => 1f64,
))?;
index_writer.add_document(doc!(
text_field => "cool",
highscore_field => 5f64,
price_field => 1f64,
))?;
index_writer.add_document(doc!(
text_field => "nohit",
highscore_field => 6f64,
price_field => 2f64,
))?;
index_writer.add_document(doc!(
text_field => "cool",
highscore_field => 7f64,
price_field => 2f64,
))?;
index_writer.commit()?;
index_writer.add_document(doc!(
text_field => "cool",
highscore_field => 11f64,
price_field => 10f64,
))?;
index_writer.add_document(doc!(
text_field => "cool",
highscore_field => 14f64,
price_field => 15f64,
))?;
index_writer.add_document(doc!(
text_field => "cool",
highscore_field => 15f64,
price_field => 20f64,
))?;
index_writer.commit()?;
let reader = index.reader()?;
let text_field = reader.searcher().schema().get_field("text").unwrap();
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let sub_agg_req_1: Aggregations = vec![(
"average_price".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("price".to_string()),
)),
)]
.into_iter()
.collect();
let agg_req_1: Aggregations = vec![(
"score_ranges".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "highscore".to_string(),
ranges: vec![
(-1f64..9f64).into(),
(9f64..14f64).into(),
(14f64..20f64).into(),
],
..Default::default()
}),
sub_aggregation: sub_agg_req_1.clone(),
}),
)]
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
let res: Value = serde_json::to_value(&agg_res)?;
println!("{}", serde_json::to_string_pretty(&res)?);
Ok(())
}

View File

@@ -5,26 +5,23 @@
//
// We will :
// - define our schema
// = create an index in a directory
// - index few documents in our index
// - search for the best document matchings "sea whale"
// - retrieve the best document original content.
// - create an index in a directory
// - index a few documents into our index
// - search for the best document matching a basic query
// - retrieve the best document's original content.
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::Index;
use tantivy::ReloadPolicy;
use tempdir::TempDir;
use tantivy::{doc, Index, ReloadPolicy};
use tempfile::TempDir;
fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the
// sake of this example
let index_path = TempDir::new("tantivy_example_dir")?;
let index_path = TempDir::new()?;
// # Defining the schema
//
@@ -33,7 +30,7 @@ fn main() -> tantivy::Result<()> {
// and for each field, its type and "the way it should
// be indexed".
// first we need to define a schema ...
// First we need to define a schema ...
let mut schema_builder = Schema::builder();
// Our first field is title.
@@ -48,7 +45,7 @@ fn main() -> tantivy::Result<()> {
//
// `STORED` means that the field will also be saved
// in a compressed, row-oriented key-value store.
// This store is useful to reconstruct the
// This store is useful for reconstructing the
// documents that were selected during the search phase.
schema_builder.add_text_field("title", TEXT | STORED);
@@ -57,8 +54,7 @@ fn main() -> tantivy::Result<()> {
// need to be able to be able to retrieve it
// for our application.
//
// We can make our index lighter and
// by omitting `STORED` flag.
// We can make our index lighter by omitting the `STORED` flag.
schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build();
@@ -71,13 +67,13 @@ fn main() -> tantivy::Result<()> {
// with our schema in the directory.
let index = Index::create_in_dir(&index_path, schema.clone())?;
// To insert document we need an index writer.
// To insert a document we will need an index writer.
// There must be only one writer at a time.
// This single `IndexWriter` is already
// multithreaded.
//
// Here we give tantivy a budget of `50MB`.
// Using a bigger heap for the indexer may increase
// Using a bigger memory_arena for the indexer may increase
// throughput, but 50 MB is already plenty.
let mut index_writer = index.writer(50_000_000)?;
@@ -95,12 +91,12 @@ fn main() -> tantivy::Result<()> {
old_man_doc.add_text(title, "The Old Man and the Sea");
old_man_doc.add_text(
body,
"He was an old man who fished alone in a skiff in the Gulf Stream and \
he had gone eighty-four days now without taking a fish.",
"He was an old man who fished alone in a skiff in the Gulf Stream and he had gone \
eighty-four days now without taking a fish.",
);
// ... and add it to the `IndexWriter`.
index_writer.add_document(old_man_doc);
index_writer.add_document(old_man_doc)?;
// For convenience, tantivy also comes with a macro to
// reduce the boilerplate above.
@@ -114,19 +110,7 @@ fn main() -> tantivy::Result<()> {
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
));
index_writer.add_document(doc!(
title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
over the yellow sands in the sunlight before reaching the narrow pool. On one \
side of the river the golden foothill slopes curve up to the strong and rocky \
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
));
))?;
// Multivalued field just need to be repeated.
index_writer.add_document(doc!(
@@ -136,7 +120,7 @@ fn main() -> tantivy::Result<()> {
enterprise which you have regarded with such evil forebodings. I arrived here \
yesterday, and my first task is to assure my dear sister of my welfare and \
increasing confidence in the success of my undertaking."
));
))?;
// This is an example, so we will only index 3 documents
// here. You can check out tantivy's tutorial to index
@@ -149,8 +133,8 @@ fn main() -> tantivy::Result<()> {
// At this point our documents are not searchable.
//
//
// We need to call .commit() explicitly to force the
// index_writer to finish processing the documents in the queue,
// We need to call `.commit()` explicitly to force the
// `index_writer` to finish processing the documents in the queue,
// flush the current index to the disk, and advertise
// the existence of new documents.
//
@@ -162,14 +146,14 @@ fn main() -> tantivy::Result<()> {
// persistently indexed.
//
// In the scenario of a crash or a power failure,
// tantivy behaves as if has rolled back to its last
// tantivy behaves as if it has rolled back to its last
// commit.
// # Searching
//
// ### Searcher
//
// A reader is required to get search the index.
// A reader is required first in order to search an index.
// It acts as a `Searcher` pool that reloads itself,
// depending on a `ReloadPolicy`.
//
@@ -185,7 +169,7 @@ fn main() -> tantivy::Result<()> {
// We now need to acquire a searcher.
//
// A searcher points to snapshotted, immutable version of the index.
// A searcher points to a snapshotted, immutable version of the index.
//
// Some search experience might require more than
// one query. Using the same searcher ensures that all of these queries will run on the
@@ -205,7 +189,7 @@ fn main() -> tantivy::Result<()> {
// in both title and body.
let query_parser = QueryParser::for_index(&index, vec![title, body]);
// QueryParser may fail if the query is not in the right
// `QueryParser` may fail if the query is not in the right
// format. For user facing applications, this can be a problem.
// A ticket has been opened regarding this problem.
let query = query_parser.parse_query("sea whale")?;
@@ -221,7 +205,7 @@ fn main() -> tantivy::Result<()> {
//
// We are not interested in all of the documents but
// only in the top 10. Keeping track of our top 10 best documents
// is the role of the TopDocs.
// is the role of the `TopDocs` collector.
// We can now perform our query.
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;

View File

@@ -9,15 +9,11 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::{Collector, SegmentCollector};
use tantivy::fastfield::FastFieldReader;
use tantivy::fastfield::{DynamicFastFieldReader, FastFieldReader};
use tantivy::query::QueryParser;
use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::SegmentReader;
use tantivy::{Index, TantivyError};
use tantivy::schema::{Field, Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, Score, SegmentReader};
#[derive(Default)]
struct Stats {
@@ -75,16 +71,7 @@ impl Collector for StatsCollector {
_segment_local_id: u32,
segment_reader: &SegmentReader,
) -> tantivy::Result<StatsSegmentCollector> {
let fast_field_reader = segment_reader
.fast_fields()
.u64(self.field)
.ok_or_else(|| {
let field_name = segment_reader.schema().get_field_name(self.field);
TantivyError::SchemaError(format!(
"Field {:?} is not a u64 fast field.",
field_name
))
})?;
let fast_field_reader = segment_reader.fast_fields().u64(self.field)?;
Ok(StatsSegmentCollector {
fast_field_reader,
stats: Stats::default(),
@@ -98,26 +85,24 @@ impl Collector for StatsCollector {
fn merge_fruits(&self, segment_stats: Vec<Option<Stats>>) -> tantivy::Result<Option<Stats>> {
let mut stats = Stats::default();
for segment_stats_opt in segment_stats {
if let Some(segment_stats) = segment_stats_opt {
stats.count += segment_stats.count;
stats.sum += segment_stats.sum;
stats.squared_sum += segment_stats.squared_sum;
}
for segment_stats in segment_stats.into_iter().flatten() {
stats.count += segment_stats.count;
stats.sum += segment_stats.sum;
stats.squared_sum += segment_stats.squared_sum;
}
Ok(stats.non_zero_count())
}
}
struct StatsSegmentCollector {
fast_field_reader: FastFieldReader<u64>,
fast_field_reader: DynamicFastFieldReader<u64>,
stats: Stats,
}
impl SegmentCollector for StatsSegmentCollector {
type Fruit = Option<Stats>;
fn collect(&mut self, doc: u32, _score: f32) {
fn collect(&mut self, doc: u32, _score: Score) {
let value = self.fast_field_reader.get(doc) as f64;
self.stats.count += 1;
self.stats.sum += value;
@@ -151,7 +136,7 @@ fn main() -> tantivy::Result<()> {
//
// Lets index a bunch of fake documents for the sake of
// this example.
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer(50_000_000)?;
index_writer.add_document(doc!(
@@ -159,23 +144,23 @@ fn main() -> tantivy::Result<()> {
product_description => "While it is ok for short distance travel, this broom \
was designed quiditch. It will up your game.",
price => 30_200u64
));
))?;
index_writer.add_document(doc!(
product_name => "Turbulobroom",
product_description => "You might have heard of this broom before : it is the sponsor of the Wales team.\
You'll enjoy its sharp turns, and rapid acceleration",
price => 29_240u64
));
))?;
index_writer.add_document(doc!(
product_name => "Broomio",
product_description => "Great value for the price. This broom is a market favorite",
price => 21_240u64
));
))?;
index_writer.add_document(doc!(
product_name => "Whack a Mole",
product_description => "Prime quality bat.",
price => 5_200u64
));
))?;
index_writer.commit()?;
let reader = index.reader()?;

View File

@@ -2,14 +2,11 @@
//
// In this example, we'll see how to define a tokenizer pipeline
// by aligning a bunch of `TokenFilter`.
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::tokenizer::NgramTokenizer;
use tantivy::Index;
use tantivy::{doc, Index};
fn main() -> tantivy::Result<()> {
// # Defining the schema
@@ -53,7 +50,7 @@ fn main() -> tantivy::Result<()> {
// for your unit tests... Or this example.
let index = Index::create_in_ram(schema.clone());
// here we are registering our custome tokenizer
// here we are registering our custom tokenizer
// this will store tokens of 3 characters each
index
.tokenizers()
@@ -65,13 +62,13 @@ fn main() -> tantivy::Result<()> {
// multithreaded.
//
// Here we use a buffer of 50MB per thread. Using a bigger
// heap for the indexer can increase its throughput.
// memory arena for the indexer can increase its throughput.
let mut index_writer = index.writer(50_000_000)?;
index_writer.add_document(doc!(
title => "The Old Man and the Sea",
body => "He was an old man who fished alone in a skiff in the Gulf Stream and \
he had gone eighty-four days now without taking a fish."
));
))?;
index_writer.add_document(doc!(
title => "Of Mice and Men",
body => r#"A few miles south of Soledad, the Salinas River drops in close to the hillside
@@ -82,14 +79,14 @@ fn main() -> tantivy::Result<()> {
fresh and green with every spring, carrying in their lower leaf junctures the
debris of the winters flooding; and sycamores with mottled, white, recumbent
limbs and branches that arch over the pool"#
));
))?;
index_writer.add_document(doc!(
title => "Frankenstein",
body => r#"You will rejoice to hear that no disaster has accompanied the commencement of an
enterprise which you have regarded with such evil forebodings. I arrived here
yesterday, and my first task is to assure my dear sister of my welfare and
increasing confidence in the success of my undertaking."#
));
))?;
index_writer.commit()?;
let reader = index.reader()?;

View File

@@ -0,0 +1,69 @@
// # DateTime field example
//
// This example shows how the DateTime field can be used
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::{Cardinality, DateOptions, Schema, Value, INDEXED, STORED, STRING};
use tantivy::Index;
fn main() -> tantivy::Result<()> {
// # Defining the schema
let mut schema_builder = Schema::builder();
let opts = DateOptions::from(INDEXED)
.set_stored()
.set_fast(Cardinality::SingleValue)
.set_precision(tantivy::DatePrecision::Seconds);
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
let event_type = schema_builder.add_text_field("event", STRING | STORED);
let schema = schema_builder.build();
// # Indexing documents
let index = Index::create_in_ram(schema.clone());
let mut index_writer = index.writer(50_000_000)?;
let doc = schema.parse_document(
r#"{
"occurred_at": "2022-06-22T12:53:50.53Z",
"event": "pull-request"
}"#,
)?;
index_writer.add_document(doc)?;
let doc = schema.parse_document(
r#"{
"occurred_at": "2022-06-22T13:00:00.22Z",
"event": "comment"
}"#,
)?;
index_writer.add_document(doc)?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
// # Default fields: event_type
let query_parser = QueryParser::for_index(&index, vec![event_type]);
{
let query = query_parser.parse_query("event:comment")?;
let count_docs = searcher.search(&*query, &TopDocs::with_limit(5))?;
assert_eq!(count_docs.len(), 1);
}
{
let query = query_parser
.parse_query(r#"occurred_at:[2022-06-22T12:58:00Z TO 2022-06-23T00:00:00Z}"#)?;
let count_docs = searcher.search(&*query, &TopDocs::with_limit(4))?;
assert_eq!(count_docs.len(), 1);
for (_score, doc_address) in count_docs {
let retrieved_doc = searcher.doc(doc_address)?;
assert!(matches!(
retrieved_doc.get_first(occurred_at),
Some(Value::Date(_))
));
assert_eq!(
schema.to_json(&retrieved_doc),
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
);
}
}
Ok(())
}

View File

@@ -8,13 +8,10 @@
//
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::TermQuery;
use tantivy::schema::*;
use tantivy::Index;
use tantivy::IndexReader;
use tantivy::{doc, Index, IndexReader};
// A simple helper function to fetch a single document
// given its id from our index.
@@ -59,8 +56,9 @@ fn main() -> tantivy::Result<()> {
// If it is `text`, let's make sure to keep it `raw` and let's avoid
// running any text processing on it.
// This is done by associating this field to the tokenizer named `raw`.
// Rather than building our [`TextOptions`](//docs.rs/tantivy/~0/tantivy/schema/struct.TextOptions.html) manually,
// We use the `STRING` shortcut. `STRING` stands for indexed (without term frequency or positions)
// Rather than building our
// [`TextOptions`](//docs.rs/tantivy/~0/tantivy/schema/struct.TextOptions.html) manually, We
// use the `STRING` shortcut. `STRING` stands for indexed (without term frequency or positions)
// and untokenized.
//
// Because we also want to be able to see this `id` in our returned documents,
@@ -79,21 +77,21 @@ fn main() -> tantivy::Result<()> {
index_writer.add_document(doc!(
isbn => "978-0099908401",
title => "The old Man and the see"
));
))?;
index_writer.add_document(doc!(
isbn => "978-0140177398",
title => "Of Mice and Men",
));
))?;
index_writer.add_document(doc!(
title => "Frankentein", //< Oops there is a typo here.
isbn => "978-9176370711",
));
))?;
index_writer.commit()?;
let reader = index.reader()?;
let frankenstein_isbn = Term::from_field_text(isbn, "978-9176370711");
// Oops our frankenstein doc seems mispelled
// Oops our frankenstein doc seems misspelled
let frankenstein_doc_misspelled = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
assert_eq!(
schema.to_json(&frankenstein_doc_misspelled),
@@ -125,7 +123,7 @@ fn main() -> tantivy::Result<()> {
index_writer.add_document(doc!(
title => "Frankenstein",
isbn => "978-9176370711",
));
))?;
// You are guaranteed that your clients will only observe your index in
// the state it was in after a commit.

View File

@@ -12,67 +12,101 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::FacetCollector;
use tantivy::query::AllQuery;
use tantivy::query::{AllQuery, TermQuery};
use tantivy::schema::*;
use tantivy::Index;
use tantivy::{doc, Index};
fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the
// sake of this example
let index_path = TempDir::new("tantivy_facet_example_dir")?;
// Let's create a temporary directory for the sake of this example
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("name", TEXT | STORED);
// this is our faceted field
schema_builder.add_facet_field("tags");
let name = schema_builder.add_text_field("felin_name", TEXT | STORED);
// this is our faceted field: its scientific classification
let classification = schema_builder.add_facet_field("classification", FacetOptions::default());
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let index = Index::create_in_dir(&index_path, schema.clone())?;
let mut index_writer = index.writer(50_000_000)?;
let name = schema.get_field("name").unwrap();
let tags = schema.get_field("tags").unwrap();
let mut index_writer = index.writer(30_000_000)?;
// For convenience, tantivy also comes with a macro to
// reduce the boilerplate above.
index_writer.add_document(doc!(
name => "the ditch",
tags => Facet::from("/pools/north")
));
name => "Cat",
classification => Facet::from("/Felidae/Felinae/Felis")
))?;
index_writer.add_document(doc!(
name => "little stacey",
tags => Facet::from("/pools/south")
));
name => "Canada lynx",
classification => Facet::from("/Felidae/Felinae/Lynx")
))?;
index_writer.add_document(doc!(
name => "Cheetah",
classification => Facet::from("/Felidae/Felinae/Acinonyx")
))?;
index_writer.add_document(doc!(
name => "Tiger",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
))?;
index_writer.add_document(doc!(
name => "Lion",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
))?;
index_writer.add_document(doc!(
name => "Jaguar",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
))?;
index_writer.add_document(doc!(
name => "Sunda clouded leopard",
classification => Facet::from("/Felidae/Pantherinae/Neofelis")
))?;
index_writer.add_document(doc!(
name => "Fossa",
classification => Facet::from("/Eupleridae/Cryptoprocta")
))?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
{
let mut facet_collector = FacetCollector::for_field(classification);
facet_collector.add_facet("/Felidae");
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
// This lists all of the facet counts, right below "/Felidae".
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae").collect();
assert_eq!(
facets,
vec![
(&Facet::from("/Felidae/Felinae"), 3),
(&Facet::from("/Felidae/Pantherinae"), 4),
]
);
}
let mut facet_collector = FacetCollector::for_field(tags);
facet_collector.add_facet("/pools");
// Facets are also searchable.
//
// For instance a common UI pattern is to allow the user someone to click on a facet link
// (e.g: `Pantherinae`) to drill down and filter the current result set with this subfacet.
//
// The search would then look as follows.
let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
// This lists all of the facet counts
let facets: Vec<(&Facet, u64)> = facet_counts.get("/pools").collect();
assert_eq!(
facets,
vec![
(&Facet::from("/pools/north"), 1),
(&Facet::from("/pools/south"), 1),
]
);
// Check the reference doc for different ways to create a `Facet` object.
{
let facet = Facet::from("/Felidae/Pantherinae");
let facet_term = Term::from_facet(classification, &facet);
let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic);
let mut facet_collector = FacetCollector::for_field(classification);
facet_collector.add_facet("/Felidae/Pantherinae");
let facet_counts = searcher.search(&facet_term_query, &facet_collector)?;
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect();
assert_eq!(
facets,
vec![
(&Facet::from("/Felidae/Pantherinae/Neofelis"), 1),
(&Facet::from("/Felidae/Pantherinae/Panthera"), 3),
]
);
}
Ok(())
}
use tempdir::TempDir;

View File

@@ -0,0 +1,98 @@
use std::collections::HashSet;
use tantivy::collector::TopDocs;
use tantivy::query::BooleanQuery;
use tantivy::schema::*;
use tantivy::{doc, DocId, Index, Score, SegmentReader};
fn main() -> tantivy::Result<()> {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", STORED);
let ingredient = schema_builder.add_facet_field("ingredient", FacetOptions::default());
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer(30_000_000)?;
index_writer.add_document(doc!(
title => "Fried egg",
ingredient => Facet::from("/ingredient/egg"),
ingredient => Facet::from("/ingredient/oil"),
))?;
index_writer.add_document(doc!(
title => "Scrambled egg",
ingredient => Facet::from("/ingredient/egg"),
ingredient => Facet::from("/ingredient/butter"),
ingredient => Facet::from("/ingredient/milk"),
ingredient => Facet::from("/ingredient/salt"),
))?;
index_writer.add_document(doc!(
title => "Egg rolls",
ingredient => Facet::from("/ingredient/egg"),
ingredient => Facet::from("/ingredient/garlic"),
ingredient => Facet::from("/ingredient/salt"),
ingredient => Facet::from("/ingredient/oil"),
ingredient => Facet::from("/ingredient/tortilla-wrap"),
ingredient => Facet::from("/ingredient/mushroom"),
))?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
{
let facets = vec![
Facet::from("/ingredient/egg"),
Facet::from("/ingredient/oil"),
Facet::from("/ingredient/garlic"),
Facet::from("/ingredient/mushroom"),
];
let query = BooleanQuery::new_multiterms_query(
facets
.iter()
.map(|key| Term::from_facet(ingredient, key))
.collect(),
);
let top_docs_by_custom_score =
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
let facet_dict = ingredient_reader.facet_dict();
let query_ords: HashSet<u64> = facets
.iter()
.filter_map(|key| facet_dict.term_ord(key.encoded_str()).unwrap())
.collect();
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);
move |doc: DocId, original_score: Score| {
ingredient_reader.facet_ords(doc, &mut facet_ords_buffer);
let missing_ingredients = facet_ords_buffer
.iter()
.filter(|ord| !query_ords.contains(ord))
.count();
let tweak = 1.0 / 4_f32.powi(missing_ingredients as i32);
original_score * tweak
}
});
let top_docs = searcher.search(&query, &top_docs_by_custom_score)?;
let titles: Vec<String> = top_docs
.iter()
.map(|(_, doc_id)| {
searcher
.doc(*doc_id)
.unwrap()
.get_first(title)
.unwrap()
.as_text()
.unwrap()
.to_owned()
})
.collect();
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
}
Ok(())
}

View File

@@ -2,16 +2,12 @@
//
// Below is an example of creating an indexed integer field in your schema
// You can use RangeQuery to get a Count of all occurrences in a given range.
#[macro_use]
extern crate tantivy;
use tantivy::collector::Count;
use tantivy::query::RangeQuery;
use tantivy::schema::{Schema, INDEXED};
use tantivy::Index;
use tantivy::Result;
use tantivy::{doc, Index, Result};
fn run() -> Result<()> {
fn main() -> Result<()> {
// For the sake of simplicity, this schema will only have 1 field
let mut schema_builder = Schema::builder();
@@ -23,7 +19,7 @@ fn run() -> Result<()> {
{
let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
for year in 1950u64..2019u64 {
index_writer.add_document(doc!(year_field => year));
index_writer.add_document(doc!(year_field => year))?;
}
index_writer.commit()?;
// The index will be a range of years
@@ -37,7 +33,3 @@ fn run() -> Result<()> {
assert_eq!(num_60s_books, 10);
Ok(())
}
fn main() {
run().unwrap()
}

View File

@@ -1,4 +1,4 @@
// # Iterating docs and positioms.
// # Iterating docs and positions.
//
// At its core of tantivy, relies on a data structure
// called an inverted index.
@@ -9,11 +9,8 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::schema::*;
use tantivy::Index;
use tantivy::{DocId, DocSet, Postings};
use tantivy::{doc, DocSet, Index, Postings, TERMINATED};
fn main() -> tantivy::Result<()> {
// We first create a schema for the sake of the
@@ -25,12 +22,12 @@ fn main() -> tantivy::Result<()> {
let title = schema_builder.add_text_field("title", TEXT | STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 50_000_000)?;
index_writer.add_document(doc!(title => "The Old Man and the Sea"));
index_writer.add_document(doc!(title => "Of Mice and Men"));
index_writer.add_document(doc!(title => "The modern Promotheus"));
index_writer.add_document(doc!(title => "The Old Man and the Sea"))?;
index_writer.add_document(doc!(title => "Of Mice and Men"))?;
index_writer.add_document(doc!(title => "The modern Promotheus"))?;
index_writer.commit()?;
let reader = index.reader()?;
@@ -48,29 +45,28 @@ fn main() -> tantivy::Result<()> {
// Inverted index stands for the combination of
// - the term dictionary
// - the inverted lists associated to each terms and their positions
let inverted_index = segment_reader.inverted_index(title);
let inverted_index = segment_reader.inverted_index(title)?;
// A `Term` is a text token associated with a field.
// Let's go through all docs containing the term `title:the` and access their position
let term_the = Term::from_field_text(title, "the");
// This segment posting object is like a cursor over the documents matching the term.
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
// and positions.
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term
// frequencies and positions.
//
// If you don't need all this information, you may get better performance by decompressing less
// information.
// If you don't need all this information, you may get better performance by decompressing
// less information.
if let Some(mut segment_postings) =
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)?
{
// this buffer will be used to request for positions
let mut positions: Vec<u32> = Vec::with_capacity(100);
while segment_postings.advance() {
// the number of time the term appears in the document.
let doc_id: DocId = segment_postings.doc(); //< do not try to access this before calling advance once.
let mut doc_id = segment_postings.doc();
while doc_id != TERMINATED {
// This MAY contains deleted documents as well.
if segment_reader.is_deleted(doc_id) {
doc_id = segment_postings.advance();
continue;
}
@@ -89,6 +85,7 @@ fn main() -> tantivy::Result<()> {
// Doc 2: TermFreq 1: [0]
// ```
println!("Doc {}: TermFreq {}: {:?}", doc_id, term_freq, positions);
doc_id = segment_postings.advance();
}
}
}
@@ -109,22 +106,27 @@ fn main() -> tantivy::Result<()> {
// Inverted index stands for the combination of
// - the term dictionary
// - the inverted lists associated to each terms and their positions
let inverted_index = segment_reader.inverted_index(title);
let inverted_index = segment_reader.inverted_index(title)?;
// This segment posting object is like a cursor over the documents matching the term.
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
// and positions.
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term
// frequencies and positions.
//
// If you don't need all this information, you may get better performance by decompressing less
// information.
// If you don't need all this information, you may get better performance by decompressing
// less information.
if let Some(mut block_segment_postings) =
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)?
{
while block_segment_postings.advance() {
loop {
let docs = block_segment_postings.docs();
if docs.is_empty() {
break;
}
// Once again these docs MAY contains deleted documents as well.
let docs = block_segment_postings.docs();
// Prints `Docs [0, 2].`
println!("Docs {:?}", docs);
block_segment_postings.advance();
}
}
}

105
examples/json_field.rs Normal file
View File

@@ -0,0 +1,105 @@
// # Json field example
//
// This example shows how the json field can be used
// to make tantivy partially schemaless by setting it as
// default query parser field.
use tantivy::collector::{Count, TopDocs};
use tantivy::query::QueryParser;
use tantivy::schema::{Schema, FAST, STORED, STRING, TEXT};
use tantivy::Index;
fn main() -> tantivy::Result<()> {
// # Defining the schema
let mut schema_builder = Schema::builder();
schema_builder.add_date_field("timestamp", FAST | STORED);
let event_type = schema_builder.add_text_field("event_type", STRING | STORED);
let attributes = schema_builder.add_json_field("attributes", STORED | TEXT);
let schema = schema_builder.build();
// # Indexing documents
let index = Index::create_in_ram(schema.clone());
let mut index_writer = index.writer(50_000_000)?;
let doc = schema.parse_document(
r#"{
"timestamp": "2022-02-22T23:20:50.53Z",
"event_type": "click",
"attributes": {
"target": "submit-button",
"cart": {"product_id": 103},
"description": "the best vacuum cleaner ever"
}
}"#,
)?;
index_writer.add_document(doc)?;
let doc = schema.parse_document(
r#"{
"timestamp": "2022-02-22T23:20:51.53Z",
"event_type": "click",
"attributes": {
"target": "submit-button",
"cart": {"product_id": 133},
"description": "das keyboard",
"event_type": "holiday-sale"
}
}"#,
)?;
index_writer.add_document(doc)?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
// # Default fields: event_type and attributes
// By setting attributes as a default field it allows omitting attributes itself, e.g. "target",
// instead of "attributes.target"
let query_parser = QueryParser::for_index(&index, vec![event_type, attributes]);
{
let query = query_parser.parse_query("target:submit-button")?;
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2))?;
assert_eq!(count_docs.len(), 2);
}
{
let query = query_parser.parse_query("target:submit")?;
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2))?;
assert_eq!(count_docs.len(), 2);
}
{
let query = query_parser.parse_query("cart.product_id:103")?;
let count_docs = searcher.search(&*query, &Count)?;
assert_eq!(count_docs, 1);
}
{
let query = query_parser.parse_query("click AND cart.product_id:133")?;
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
assert_eq!(hits.len(), 1);
}
{
// The sub-fields in the json field marked as default field still need to be explicitly
// addressed
let query = query_parser.parse_query("click AND 133")?;
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
assert_eq!(hits.len(), 0);
}
{
// Default json fields are ignored if they collide with the schema
let query = query_parser.parse_query("event_type:holiday-sale")?;
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
assert_eq!(hits.len(), 0);
}
// # Query via full attribute path
{
// This only searches in our schema's `event_type` field
let query = query_parser.parse_query("event_type:click")?;
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
assert_eq!(hits.len(), 2);
}
{
// Default json fields can still be accessed by full path
let query = query_parser.parse_query("attributes.event_type:holiday-sale")?;
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
assert_eq!(hits.len(), 1);
}
Ok(())
}

View File

@@ -25,14 +25,12 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use std::sync::{Arc, RwLock};
use std::thread;
use std::time::Duration;
use tantivy::schema::{Schema, STORED, TEXT};
use tantivy::Opstamp;
use tantivy::{Index, IndexWriter};
use tantivy::{doc, Index, IndexWriter, Opstamp, TantivyError};
fn main() -> tantivy::Result<()> {
// # Defining the schema
@@ -49,10 +47,9 @@ fn main() -> tantivy::Result<()> {
thread::spawn(move || {
// we index 100 times the document... for the sake of the example.
for i in 0..100 {
let opstamp = {
// A read lock is sufficient here.
let index_writer_rlock = index_writer_clone_1.read().unwrap();
index_writer_rlock.add_document(
let opstamp = index_writer_clone_1
.read().unwrap() //< A read lock is sufficient here.
.add_document(
doc!(
title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
@@ -63,11 +60,11 @@ fn main() -> tantivy::Result<()> {
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
))
};
))?;
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
thread::sleep(Duration::from_millis(20));
}
Result::<(), TantivyError>::Ok(())
});
// # Second indexing thread.
@@ -83,19 +80,21 @@ fn main() -> tantivy::Result<()> {
index_writer_rlock.add_document(doc!(
title => "Manufacturing consent",
body => "Some great book description..."
))
))?
};
println!("add doc {} from thread 2 - opstamp {}", i, opstamp);
thread::sleep(Duration::from_millis(10));
}
Result::<(), TantivyError>::Ok(())
});
// # In the main thread, we commit 10 times, once every 500ms.
for _ in 0..10 {
let opstamp: Opstamp = {
// Committing or rollbacking on the other hand requires write lock. This will block other threads.
// Committing or rollbacking on the other hand requires write lock. This will block
// other threads.
let mut index_writer_wlock = index_writer.write().unwrap();
index_writer_wlock.commit().unwrap()
index_writer_wlock.commit()?
};
println!("committed with opstamp {}", opstamp);
thread::sleep(Duration::from_millis(500));

View File

@@ -0,0 +1,135 @@
// # Pre-tokenized text example
//
// This example shows how to use pre-tokenized text. Sometimes you might
// want to index and search through text which is already split into
// tokens by some external tool.
//
// In this example we will:
// - use tantivy tokenizer to create tokens and load them directly into tantivy,
// - import tokenized text straight from json,
// - perform a search on documents with pre-tokenized text
use tantivy::collector::{Count, TopDocs};
use tantivy::query::TermQuery;
use tantivy::schema::*;
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, Tokenizer};
use tantivy::{doc, Index, ReloadPolicy};
use tempfile::TempDir;
fn pre_tokenize_text(text: &str) -> Vec<Token> {
let mut token_stream = SimpleTokenizer.token_stream(text);
let mut tokens = vec![];
while token_stream.advance() {
tokens.push(token_stream.token().clone());
}
tokens
}
fn main() -> tantivy::Result<()> {
let index_path = TempDir::new()?;
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title", TEXT | STORED);
schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_dir(&index_path, schema.clone())?;
let mut index_writer = index.writer(50_000_000)?;
// We can create a document manually, by setting the fields
// one by one in a Document object.
let title = schema.get_field("title").unwrap();
let body = schema.get_field("body").unwrap();
let title_text = "The Old Man and the Sea";
let body_text = "He was an old man who fished alone in a skiff in the Gulf Stream";
// Content of our first document
// We create `PreTokenizedString` which contains original text and vector of tokens
let title_tok = PreTokenizedString {
text: String::from(title_text),
tokens: pre_tokenize_text(title_text),
};
println!(
"Original text: \"{}\" and tokens: {:?}",
title_tok.text, title_tok.tokens
);
let body_tok = PreTokenizedString {
text: String::from(body_text),
tokens: pre_tokenize_text(body_text),
};
// Now lets create a document and add our `PreTokenizedString`
let old_man_doc = doc!(title => title_tok, body => body_tok);
// ... now let's just add it to the IndexWriter
index_writer.add_document(old_man_doc)?;
// Pretokenized text can also be fed as JSON
let short_man_json = r#"{
"title":[{
"text":"The Old Man",
"tokens":[
{"offset_from":0,"offset_to":3,"position":0,"text":"The","position_length":1},
{"offset_from":4,"offset_to":7,"position":1,"text":"Old","position_length":1},
{"offset_from":8,"offset_to":11,"position":2,"text":"Man","position_length":1}
]
}]
}"#;
let short_man_doc = schema.parse_document(short_man_json)?;
index_writer.add_document(short_man_doc)?;
// Let's commit changes
index_writer.commit()?;
// ... and now is the time to query our index
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.try_into()?;
let searcher = reader.searcher();
// We want to get documents with token "Man", we will use TermQuery to do it
// Using PreTokenizedString means the tokens are stored as is avoiding stemming
// and lowercasing, which preserves full words in their original form
let query = TermQuery::new(
Term::from_field_text(title, "Man"),
IndexRecordOption::Basic,
);
let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count))?;
assert_eq!(count, 2);
// Now let's print out the results.
// Note that the tokens are not stored along with the original text
// in the document store
for (_score, doc_address) in top_docs {
let retrieved_doc = searcher.doc(doc_address)?;
println!("Document: {}", schema.to_json(&retrieved_doc));
}
// In contrary to the previous query, when we search for the "man" term we
// should get no results, as it's not one of the indexed tokens. SimpleTokenizer
// only splits text on whitespace / punctuation.
let query = TermQuery::new(
Term::from_field_text(title, "man"),
IndexRecordOption::Basic,
);
let (_top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count))?;
assert_eq!(count, 0);
Ok(())
}

View File

@@ -7,19 +7,16 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::Index;
use tantivy::{Snippet, SnippetGenerator};
use tempdir::TempDir;
use tantivy::{doc, Index, Snippet, SnippetGenerator};
use tempfile::TempDir;
fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the
// sake of this example
let index_path = TempDir::new("tantivy_example_dir")?;
let index_path = TempDir::new()?;
// # Defining the schema
let mut schema_builder = Schema::builder();
@@ -28,7 +25,7 @@ fn main() -> tantivy::Result<()> {
let schema = schema_builder.build();
// # Indexing documents
let index = Index::create_in_dir(&index_path, schema.clone())?;
let index = Index::create_in_dir(&index_path, schema)?;
let mut index_writer = index.writer(50_000_000)?;
@@ -43,7 +40,7 @@ fn main() -> tantivy::Result<()> {
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
));
))?;
// ...
index_writer.commit()?;
@@ -60,7 +57,10 @@ fn main() -> tantivy::Result<()> {
let doc = searcher.doc(doc_address)?;
let snippet = snippet_generator.snippet_from_doc(&doc);
println!("Document score {}:", score);
println!("title: {}", doc.get_first(title).unwrap().text().unwrap());
println!(
"title: {}",
doc.get_first(title).unwrap().as_text().unwrap()
);
println!("snippet: {}", snippet.to_html());
println!("custom highlighting: {}", highlight(snippet));
}
@@ -72,14 +72,14 @@ fn highlight(snippet: Snippet) -> String {
let mut result = String::new();
let mut start_from = 0;
for (start, end) in snippet.highlighted().iter().map(|h| h.bounds()) {
result.push_str(&snippet.fragments()[start_from..start]);
for fragment_range in snippet.highlighted() {
result.push_str(&snippet.fragment()[start_from..fragment_range.start]);
result.push_str(" --> ");
result.push_str(&snippet.fragments()[start..end]);
result.push_str(&snippet.fragment()[fragment_range.clone()]);
result.push_str(" <-- ");
start_from = end;
start_from = fragment_range.end;
}
result.push_str(&snippet.fragments()[start_from..]);
result.push_str(&snippet.fragment()[start_from..]);
result
}

View File

@@ -11,13 +11,11 @@
// ---
// Importing tantivy...
#[macro_use]
extern crate tantivy;
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::tokenizer::*;
use tantivy::Index;
use tantivy::{doc, Index};
fn main() -> tantivy::Result<()> {
// this example assumes you understand the content in `basic_search`
@@ -52,7 +50,7 @@ fn main() -> tantivy::Result<()> {
// This tokenizer lowers all of the text (to help with stop word matching)
// then removes all instances of `the` and `and` from the corpus
let tokenizer = SimpleTokenizer
let tokenizer = TextAnalyzer::from(SimpleTokenizer)
.filter(LowerCaser)
.filter(StopWordFilter::remove(vec![
"the".to_string(),
@@ -70,7 +68,7 @@ fn main() -> tantivy::Result<()> {
title => "The Old Man and the Sea",
body => "He was an old man who fished alone in a skiff in the Gulf Stream and \
he had gone eighty-four days now without taking a fish."
));
))?;
index_writer.add_document(doc!(
title => "Of Mice and Men",
@@ -82,7 +80,7 @@ fn main() -> tantivy::Result<()> {
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
));
))?;
index_writer.add_document(doc!(
title => "Frankenstein",
@@ -90,7 +88,7 @@ fn main() -> tantivy::Result<()> {
enterprise which you have regarded with such evil forebodings. I arrived here \
yesterday, and my first task is to assure my dear sister of my welfare and \
increasing confidence in the success of my undertaking."
));
))?;
index_writer.commit()?;

220
examples/warmer.rs Normal file
View File

@@ -0,0 +1,220 @@
use std::cmp::Reverse;
use std::collections::{HashMap, HashSet};
use std::sync::{Arc, RwLock, Weak};
use tantivy::collector::TopDocs;
use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser;
use tantivy::schema::{Field, Schema, FAST, TEXT};
use tantivy::{
doc, DocAddress, DocId, Index, IndexReader, Opstamp, Searcher, SearcherGeneration, SegmentId,
SegmentReader, Warmer,
};
// This example shows how warmers can be used to
// load a values from an external sources using the Warmer API.
//
// In this example, we assume an e-commerce search engine.
type ProductId = u64;
/// Price
type Price = u32;
pub trait PriceFetcher: Send + Sync + 'static {
fn fetch_prices(&self, product_ids: &[ProductId]) -> Vec<Price>;
}
struct DynamicPriceColumn {
field: Field,
price_cache: RwLock<HashMap<(SegmentId, Option<Opstamp>), Arc<Vec<Price>>>>,
price_fetcher: Box<dyn PriceFetcher>,
}
impl DynamicPriceColumn {
pub fn with_product_id_field<T: PriceFetcher>(field: Field, price_fetcher: T) -> Self {
DynamicPriceColumn {
field,
price_cache: Default::default(),
price_fetcher: Box::new(price_fetcher),
}
}
pub fn price_for_segment(&self, segment_reader: &SegmentReader) -> Option<Arc<Vec<Price>>> {
let segment_key = (segment_reader.segment_id(), segment_reader.delete_opstamp());
self.price_cache.read().unwrap().get(&segment_key).cloned()
}
}
impl Warmer for DynamicPriceColumn {
fn warm(&self, searcher: &Searcher) -> tantivy::Result<()> {
for segment in searcher.segment_readers() {
let key = (segment.segment_id(), segment.delete_opstamp());
let product_id_reader = segment.fast_fields().u64(self.field)?;
let product_ids: Vec<ProductId> = segment
.doc_ids_alive()
.map(|doc| product_id_reader.get(doc))
.collect();
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
let mut price_vals: Vec<Price> = Vec::new();
for doc in 0..segment.max_doc() {
if segment.is_deleted(doc) {
price_vals.push(0);
} else {
price_vals.push(prices_it.next().unwrap())
}
}
self.price_cache
.write()
.unwrap()
.insert(key, Arc::new(price_vals));
}
Ok(())
}
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
let live_segment_id_and_delete_ops: HashSet<(SegmentId, Option<Opstamp>)> =
live_generations
.iter()
.flat_map(|gen| gen.segments())
.map(|(&segment_id, &opstamp)| (segment_id, opstamp))
.collect();
let mut price_cache_wrt = self.price_cache.write().unwrap();
// let price_cache = std::mem::take(&mut *price_cache_wrt);
// Drain would be nicer here.
*price_cache_wrt = std::mem::take(&mut *price_cache_wrt)
.into_iter()
.filter(|(seg_id_and_op, _)| !live_segment_id_and_delete_ops.contains(seg_id_and_op))
.collect();
}
}
/// For the sake of this example, the table is just an editable HashMap behind a RwLock.
/// This map represents a map (ProductId -> Price)
///
/// In practise, it could be fetching things from an external service, like a SQL table.
#[derive(Default, Clone)]
pub struct ExternalPriceTable {
prices: Arc<RwLock<HashMap<ProductId, Price>>>,
}
impl ExternalPriceTable {
pub fn update_price(&self, product_id: ProductId, price: Price) {
let mut prices_wrt = self.prices.write().unwrap();
prices_wrt.insert(product_id, price);
}
}
impl PriceFetcher for ExternalPriceTable {
fn fetch_prices(&self, product_ids: &[ProductId]) -> Vec<Price> {
let prices_read = self.prices.read().unwrap();
product_ids
.iter()
.map(|product_id| prices_read.get(product_id).cloned().unwrap_or(0))
.collect()
}
}
fn main() -> tantivy::Result<()> {
// Declaring our schema.
let mut schema_builder = Schema::builder();
// The product id is assumed to be a primary id for our external price source.
let product_id = schema_builder.add_u64_field("product_id", FAST);
let text = schema_builder.add_text_field("text", TEXT);
let schema: Schema = schema_builder.build();
let price_table = ExternalPriceTable::default();
let price_dynamic_column = Arc::new(DynamicPriceColumn::with_product_id_field(
product_id,
price_table.clone(),
));
price_table.update_price(OLIVE_OIL, 12);
price_table.update_price(GLOVES, 13);
price_table.update_price(SNEAKERS, 80);
const OLIVE_OIL: ProductId = 323423;
const GLOVES: ProductId = 3966623;
const SNEAKERS: ProductId = 23222;
let index = Index::create_in_ram(schema);
let mut writer = index.writer_with_num_threads(1, 10_000_000)?;
writer.add_document(doc!(product_id=>OLIVE_OIL, text=>"cooking olive oil from greece"))?;
writer.add_document(doc!(product_id=>GLOVES, text=>"kitchen gloves, perfect for cooking"))?;
writer.add_document(doc!(product_id=>SNEAKERS, text=>"uber sweet sneakers"))?;
writer.commit()?;
let warmers: Vec<Weak<dyn Warmer>> = vec![Arc::downgrade(
&(price_dynamic_column.clone() as Arc<dyn Warmer>),
)];
let reader: IndexReader = index.reader_builder().warmers(warmers).try_into()?;
reader.reload()?;
let query_parser = QueryParser::for_index(&index, vec![text]);
let query = query_parser.parse_query("cooking")?;
let searcher = reader.searcher();
let score_by_price = move |segment_reader: &SegmentReader| {
let price = price_dynamic_column
.price_for_segment(segment_reader)
.unwrap();
move |doc_id: DocId| Reverse(price[doc_id as usize])
};
let most_expensive_first = TopDocs::with_limit(10).custom_score(score_by_price);
let hits = searcher.search(&query, &most_expensive_first)?;
assert_eq!(
&hits,
&[
(
Reverse(12u32),
DocAddress {
segment_ord: 0,
doc_id: 0u32
}
),
(
Reverse(13u32),
DocAddress {
segment_ord: 0,
doc_id: 1u32
}
),
]
);
// Olive oil just got more expensive!
price_table.update_price(OLIVE_OIL, 15);
// The price update are directly reflected on `reload`.
//
// Be careful here though!...
// You may have spotted that we are still using the same `Searcher`.
//
// It is up to the `Warmer` implementer to decide how
// to control this behavior.
reader.reload()?;
let hits_with_new_prices = searcher.search(&query, &most_expensive_first)?;
assert_eq!(
&hits_with_new_prices,
&[
(
Reverse(13u32),
DocAddress {
segment_ord: 0,
doc_id: 1u32
}
),
(
Reverse(15u32),
DocAddress {
segment_ord: 0,
doc_id: 0u32
}
),
]
);
Ok(())
}

View File

@@ -1,4 +1,3 @@
use tantivy;
use tantivy::schema::*;
// # Document from json
@@ -22,7 +21,7 @@ fn main() -> tantivy::Result<()> {
}"#;
// We can parse our document
let _mice_and_men_doc = schema.parse_document(&mice_and_men_doc_json)?;
let _mice_and_men_doc = schema.parse_document(mice_and_men_doc_json)?;
// Multi-valued field are allowed, they are
// expressed in JSON by an array.
@@ -31,7 +30,7 @@ fn main() -> tantivy::Result<()> {
"title": ["Frankenstein", "The Modern Prometheus"],
"year": 1818
}"#;
let _frankenstein_doc = schema.parse_document(&frankenstein_json)?;
let _frankenstein_doc = schema.parse_document(frankenstein_json)?;
// Note that the schema is saved in your index directory.
//

View File

@@ -0,0 +1,26 @@
[package]
name = "fastfield_codecs"
version = "0.2.0"
authors = ["Pascal Seitz <pascal@quickwit.io>"]
license = "MIT"
edition = "2021"
description = "Fast field codecs used by tantivy"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
common = { version = "0.3", path = "../common/", package = "tantivy-common" }
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" }
ownedbytes = { version = "0.3.0", path = "../ownedbytes" }
prettytable-rs = {version="0.9.0", optional= true}
rand = {version="0.8.3", optional= true}
[dev-dependencies]
more-asserts = "0.3.0"
proptest = "1.0.0"
rand = "0.8.3"
[features]
bin = ["prettytable-rs", "rand"]
default = ["bin"]

View File

@@ -0,0 +1,68 @@
# Fast Field Codecs
This crate contains various fast field codecs, used to compress/decompress fast field data in tantivy.
## Contributing
Contributing is pretty straightforward. Since the bitpacking is the simplest compressor, you can check it for reference.
A codec needs to implement 2 traits:
- A reader implementing `FastFieldCodecReader` to read the codec.
- A serializer implementing `FastFieldCodecSerializer` for compression estimation and codec name + id.
### Tests
Once the traits are implemented test and benchmark integration is pretty easy (see `test_with_codec_data_sets` and `bench.rs`).
Make sure to add the codec to the main.rs, which tests the compression ratio and estimation against different data sets. You can run it with:
```
cargo run --features bin
```
### TODO
- Add real world data sets in comparison
- Add codec to cover sparse data sets
### Codec Comparison
```
+----------------------------------+-------------------+------------------------+
| | Compression Ratio | Compression Estimation |
+----------------------------------+-------------------+------------------------+
| Autoincrement | | |
+----------------------------------+-------------------+------------------------+
| LinearInterpol | 0.000039572664 | 0.000004396963 |
+----------------------------------+-------------------+------------------------+
| MultiLinearInterpol | 0.1477348 | 0.17275847 |
+----------------------------------+-------------------+------------------------+
| Bitpacked | 0.28126493 | 0.28125 |
+----------------------------------+-------------------+------------------------+
| Monotonically increasing concave | | |
+----------------------------------+-------------------+------------------------+
| LinearInterpol | 0.25003937 | 0.26562938 |
+----------------------------------+-------------------+------------------------+
| MultiLinearInterpol | 0.190665 | 0.1883836 |
+----------------------------------+-------------------+------------------------+
| Bitpacked | 0.31251436 | 0.3125 |
+----------------------------------+-------------------+------------------------+
| Monotonically increasing convex | | |
+----------------------------------+-------------------+------------------------+
| LinearInterpol | 0.25003937 | 0.28125438 |
+----------------------------------+-------------------+------------------------+
| MultiLinearInterpol | 0.18676 | 0.2040086 |
+----------------------------------+-------------------+------------------------+
| Bitpacked | 0.31251436 | 0.3125 |
+----------------------------------+-------------------+------------------------+
| Almost monotonically increasing | | |
+----------------------------------+-------------------+------------------------+
| LinearInterpol | 0.14066513 | 0.1562544 |
+----------------------------------+-------------------+------------------------+
| MultiLinearInterpol | 0.16335973 | 0.17275847 |
+----------------------------------+-------------------+------------------------+
| Bitpacked | 0.28126493 | 0.28125 |
+----------------------------------+-------------------+------------------------+
```

View File

@@ -0,0 +1,90 @@
#![feature(test)]
extern crate test;
#[cfg(test)]
mod tests {
use fastfield_codecs::bitpacked::BitpackedCodec;
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec;
use fastfield_codecs::linear::LinearCodec;
use fastfield_codecs::*;
fn get_data() -> Vec<u64> {
let mut data: Vec<_> = (100..55000_u64)
.map(|num| num + rand::random::<u8>() as u64)
.collect();
data.push(99_000);
data.insert(1000, 2000);
data.insert(2000, 100);
data.insert(3000, 4100);
data.insert(4000, 100);
data.insert(5000, 800);
data
}
fn value_iter() -> impl Iterator<Item = u64> {
0..20_000
}
fn bench_get<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
let mut bytes = vec![];
Codec::serialize(&mut bytes, &data).unwrap();
let reader = Codec::open_from_bytes(OwnedBytes::new(bytes)).unwrap();
b.iter(|| {
let mut sum = 0u64;
for pos in value_iter() {
let val = reader.get_val(pos as u64);
debug_assert_eq!(data[pos as usize], val);
sum = sum.wrapping_add(val);
}
sum
});
}
fn bench_create<S: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
let mut bytes = vec![];
b.iter(|| {
S::serialize(&mut bytes, &data).unwrap();
});
}
use ownedbytes::OwnedBytes;
use test::Bencher;
#[bench]
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_create::<BitpackedCodec>(b, &data);
}
#[bench]
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_create::<LinearCodec>(b, &data);
}
#[bench]
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_create::<BlockwiseLinearCodec>(b, &data);
}
#[bench]
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_get::<BitpackedCodec>(b, &data);
}
#[bench]
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_get::<LinearCodec>(b, &data);
}
#[bench]
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_get::<BlockwiseLinearCodec>(b, &data);
}
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
let min_value = data.iter().cloned().min().unwrap_or(0);
let max_value = data.iter().cloned().max().unwrap_or(0);
FastFieldStats {
min_value,
max_value,
num_vals: data.len() as u64,
}
}
}

View File

@@ -0,0 +1,186 @@
use std::io::{self, Write};
use common::BinarySerializable;
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::{FastFieldCodec, FastFieldCodecType, FastFieldDataAccess};
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct BitpackedReader {
data: OwnedBytes,
bit_unpacker: BitUnpacker,
min_value_u64: u64,
max_value_u64: u64,
num_vals: u64,
}
impl FastFieldDataAccess for BitpackedReader {
#[inline]
fn get_val(&self, doc: u64) -> u64 {
self.min_value_u64 + self.bit_unpacker.get(doc, &self.data)
}
#[inline]
fn min_value(&self) -> u64 {
self.min_value_u64
}
#[inline]
fn max_value(&self) -> u64 {
self.max_value_u64
}
#[inline]
fn num_vals(&self) -> u64 {
self.num_vals
}
}
pub struct BitpackedSerializerLegacy<'a, W: 'a + Write> {
bit_packer: BitPacker,
write: &'a mut W,
min_value: u64,
num_vals: u64,
amplitude: u64,
num_bits: u8,
}
impl<'a, W: Write> BitpackedSerializerLegacy<'a, W> {
/// Creates a new fast field serializer.
///
/// The serializer in fact encode the values by bitpacking
/// `(val - min_value)`.
///
/// It requires a `min_value` and a `max_value` to compute
/// compute the minimum number of bits required to encode
/// values.
pub fn open(
write: &'a mut W,
min_value: u64,
max_value: u64,
) -> io::Result<BitpackedSerializerLegacy<'a, W>> {
assert!(min_value <= max_value);
let amplitude = max_value - min_value;
let num_bits = compute_num_bits(amplitude);
let bit_packer = BitPacker::new();
Ok(BitpackedSerializerLegacy {
bit_packer,
write,
min_value,
num_vals: 0,
amplitude,
num_bits,
})
}
/// Pushes a new value to the currently open u64 fast field.
#[inline]
pub fn add_val(&mut self, val: u64) -> io::Result<()> {
let val_to_write: u64 = val - self.min_value;
self.bit_packer
.write(val_to_write, self.num_bits, &mut self.write)?;
self.num_vals += 1;
Ok(())
}
pub fn close_field(mut self) -> io::Result<()> {
self.bit_packer.close(&mut self.write)?;
self.min_value.serialize(&mut self.write)?;
self.amplitude.serialize(&mut self.write)?;
self.num_vals.serialize(&mut self.write)?;
Ok(())
}
}
pub struct BitpackedCodec;
impl FastFieldCodec for BitpackedCodec {
/// The CODEC_TYPE is an enum value used for serialization.
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Bitpacked;
type Reader = BitpackedReader;
/// Opens a fast field given a file.
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
let footer_offset = bytes.len() - 24;
let (data, mut footer) = bytes.split(footer_offset);
let min_value = u64::deserialize(&mut footer)?;
let amplitude = u64::deserialize(&mut footer)?;
let num_vals = u64::deserialize(&mut footer)?;
let max_value = min_value + amplitude;
let num_bits = compute_num_bits(amplitude);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(BitpackedReader {
data,
bit_unpacker,
min_value_u64: min_value,
max_value_u64: max_value,
num_vals,
})
}
/// Serializes data with the BitpackedFastFieldSerializer.
///
/// The serializer in fact encode the values by bitpacking
/// `(val - min_value)`.
///
/// It requires a `min_value` and a `max_value` to compute
/// compute the minimum number of bits required to encode
/// values.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &dyn FastFieldDataAccess,
) -> io::Result<()> {
let mut serializer = BitpackedSerializerLegacy::open(
write,
fastfield_accessor.min_value(),
fastfield_accessor.max_value(),
)?;
for val in fastfield_accessor.iter() {
serializer.add_val(val)?;
}
serializer.close_field()?;
Ok(())
}
fn is_applicable(_fastfield_accessor: &impl FastFieldDataAccess) -> bool {
true
}
fn estimate(fastfield_accessor: &impl FastFieldDataAccess) -> f32 {
let amplitude = fastfield_accessor.max_value() - fastfield_accessor.min_value();
let num_bits = compute_num_bits(amplitude);
let num_bits_uncompressed = 64;
num_bits as f32 / num_bits_uncompressed as f32
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) {
crate::tests::create_and_validate::<BitpackedCodec>(data, name);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn bitpacked_fast_field_rand() {
for _ in 0..500 {
let mut data = (0..1 + rand::random::<u8>() as usize)
.map(|_| rand::random::<i64>() as u64 / 2)
.collect::<Vec<_>>();
create_and_validate(&data, "rand");
data.reverse();
create_and_validate(&data, "rand");
}
}
}

View File

@@ -0,0 +1,439 @@
//! The BlockwiseLinear codec uses linear interpolation to guess a values and stores the
//! offset, but in blocks of 512.
//!
//! With a CHUNK_SIZE of 512 and 29 byte metadata per block, we get a overhead for metadata of 232 /
//! 512 = 0,45 bits per element. The additional space required per element in a block is the the
//! maximum deviation of the linear interpolation estimation function.
//!
//! E.g. if the maximum deviation of an element is 12, all elements cost 4bits.
//!
//! Size per block:
//! Num Elements * Maximum Deviation from Interpolation + 29 Byte Metadata
use std::io::{self, Read, Write};
use std::ops::Sub;
use common::{BinarySerializable, CountingWriter, DeserializeFrom};
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::linear::{get_calculated_value, get_slope};
use crate::{FastFieldCodec, FastFieldCodecType, FastFieldDataAccess};
const CHUNK_SIZE: u64 = 512;
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct BlockwiseLinearReader {
data: OwnedBytes,
pub footer: BlockwiseLinearFooter,
}
#[derive(Clone, Debug, Default)]
struct Function {
// The offset in the data is required, because we have different bit_widths per block
data_start_offset: u64,
// start_pos in the block will be CHUNK_SIZE * BLOCK_NUM
start_pos: u64,
// only used during serialization, 0 after deserialization
end_pos: u64,
// only used during serialization, 0 after deserialization
value_start_pos: u64,
// only used during serialization, 0 after deserialization
value_end_pos: u64,
slope: f32,
// The offset so that all values are positive when writing them
positive_val_offset: u64,
num_bits: u8,
bit_unpacker: BitUnpacker,
}
impl Function {
fn calc_slope(&mut self) {
let num_vals = self.end_pos - self.start_pos;
self.slope = get_slope(self.value_start_pos, self.value_end_pos, num_vals);
}
// split the interpolation into two function, change self and return the second split
fn split(&mut self, split_pos: u64, split_pos_value: u64) -> Function {
let mut new_function = Function {
start_pos: split_pos,
end_pos: self.end_pos,
value_start_pos: split_pos_value,
value_end_pos: self.value_end_pos,
..Default::default()
};
new_function.calc_slope();
self.end_pos = split_pos;
self.value_end_pos = split_pos_value;
self.calc_slope();
new_function
}
}
impl BinarySerializable for Function {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.data_start_offset.serialize(write)?;
self.value_start_pos.serialize(write)?;
self.positive_val_offset.serialize(write)?;
self.slope.serialize(write)?;
self.num_bits.serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Function> {
let data_start_offset = u64::deserialize(reader)?;
let value_start_pos = u64::deserialize(reader)?;
let offset = u64::deserialize(reader)?;
let slope = f32::deserialize(reader)?;
let num_bits = u8::deserialize(reader)?;
let interpolation = Function {
data_start_offset,
value_start_pos,
positive_val_offset: offset,
num_bits,
bit_unpacker: BitUnpacker::new(num_bits),
slope,
..Default::default()
};
Ok(interpolation)
}
}
#[derive(Clone, Debug)]
pub struct BlockwiseLinearFooter {
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
interpolations: Vec<Function>,
}
impl BinarySerializable for BlockwiseLinearFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
let mut out = vec![];
self.num_vals.serialize(&mut out)?;
self.min_value.serialize(&mut out)?;
self.max_value.serialize(&mut out)?;
self.interpolations.serialize(&mut out)?;
write.write_all(&out)?;
(out.len() as u32).serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<BlockwiseLinearFooter> {
let mut footer = BlockwiseLinearFooter {
num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?,
interpolations: Vec::<Function>::deserialize(reader)?,
};
for (num, interpol) in footer.interpolations.iter_mut().enumerate() {
interpol.start_pos = CHUNK_SIZE * num as u64;
}
Ok(footer)
}
}
#[inline]
fn get_interpolation_position(doc: u64) -> usize {
let index = doc / CHUNK_SIZE;
index as usize
}
#[inline]
fn get_interpolation_function(doc: u64, interpolations: &[Function]) -> &Function {
&interpolations[get_interpolation_position(doc)]
}
impl FastFieldDataAccess for BlockwiseLinearReader {
#[inline]
fn get_val(&self, idx: u64) -> u64 {
let interpolation = get_interpolation_function(idx, &self.footer.interpolations);
let in_block_idx = idx - interpolation.start_pos;
let calculated_value = get_calculated_value(
interpolation.value_start_pos,
in_block_idx,
interpolation.slope,
);
let diff = interpolation.bit_unpacker.get(
in_block_idx,
&self.data[interpolation.data_start_offset as usize..],
);
(calculated_value + diff) - interpolation.positive_val_offset
}
#[inline]
fn min_value(&self) -> u64 {
self.footer.min_value
}
#[inline]
fn max_value(&self) -> u64 {
self.footer.max_value
}
#[inline]
fn num_vals(&self) -> u64 {
self.footer.num_vals
}
}
/// Same as LinearSerializer, but working on chunks of CHUNK_SIZE elements.
pub struct BlockwiseLinearCodec;
impl FastFieldCodec for BlockwiseLinearCodec {
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::BlockwiseLinear;
type Reader = BlockwiseLinearReader;
/// Opens a fast field given a file.
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
let footer_offset = bytes.len() - 4 - footer_len as usize;
let (data, mut footer) = bytes.split(footer_offset);
let footer = BlockwiseLinearFooter::deserialize(&mut footer)?;
Ok(BlockwiseLinearReader { data, footer })
}
/// Creates a new fast field serializer.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &dyn FastFieldDataAccess,
) -> io::Result<()> {
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value());
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
let mut first_function = Function {
end_pos: fastfield_accessor.num_vals(),
value_start_pos: first_val,
value_end_pos: last_val,
..Default::default()
};
first_function.calc_slope();
let mut interpolations = vec![first_function];
// Since we potentially apply multiple passes over the data, the data is cached.
// Multiple iteration can be expensive (merge with index sorting can add lot of overhead per
// iteration)
let data = fastfield_accessor.iter().collect::<Vec<_>>();
//// let's split this into chunks of CHUNK_SIZE
for data_pos in (0..data.len() as u64).step_by(CHUNK_SIZE as usize).skip(1) {
let new_fun = {
let current_interpolation = interpolations.last_mut().unwrap();
current_interpolation.split(data_pos, data[data_pos as usize])
};
interpolations.push(new_fun);
}
// calculate offset and max (-> numbits) for each function
for interpolation in &mut interpolations {
let mut offset = 0;
let mut rel_positive_max = 0;
for (pos, actual_value) in data
[interpolation.start_pos as usize..interpolation.end_pos as usize]
.iter()
.cloned()
.enumerate()
{
let calculated_value = get_calculated_value(
interpolation.value_start_pos,
pos as u64,
interpolation.slope,
);
if calculated_value > actual_value {
// negative value we need to apply an offset
// we ignore negative values in the max value calculation, because negative
// values will be offset to 0
offset = offset.max(calculated_value - actual_value);
} else {
// positive value no offset reuqired
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
}
}
interpolation.positive_val_offset = offset;
interpolation.num_bits = compute_num_bits(rel_positive_max + offset);
}
let mut bit_packer = BitPacker::new();
let write = &mut CountingWriter::wrap(write);
for interpolation in &mut interpolations {
interpolation.data_start_offset = write.written_bytes();
let num_bits = interpolation.num_bits;
for (pos, actual_value) in data
[interpolation.start_pos as usize..interpolation.end_pos as usize]
.iter()
.cloned()
.enumerate()
{
let calculated_value = get_calculated_value(
interpolation.value_start_pos,
pos as u64,
interpolation.slope,
);
let diff = (actual_value + interpolation.positive_val_offset) - calculated_value;
bit_packer.write(diff, num_bits, write)?;
}
bit_packer.flush(write)?;
}
bit_packer.close(write)?;
let footer = BlockwiseLinearFooter {
num_vals: fastfield_accessor.num_vals(),
min_value: fastfield_accessor.min_value(),
max_value: fastfield_accessor.max_value(),
interpolations,
};
footer.serialize(write)?;
Ok(())
}
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess) -> bool {
if fastfield_accessor.num_vals() < 5_000 {
return false;
}
// On serialization the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algorithm should be fine
let theorethical_maximum_offset =
fastfield_accessor.max_value() - fastfield_accessor.min_value();
if fastfield_accessor
.max_value()
.checked_add(theorethical_maximum_offset)
.is_none()
{
return false;
}
true
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima are for the deviation of the calculated value and
/// the offset is also unknown.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess) -> f32 {
let first_val_in_first_block = fastfield_accessor.get_val(0);
let last_elem_in_first_chunk = CHUNK_SIZE.min(fastfield_accessor.num_vals());
let last_val_in_first_block =
fastfield_accessor.get_val(last_elem_in_first_chunk as u64 - 1);
let slope = get_slope(
first_val_in_first_block,
last_val_in_first_block,
fastfield_accessor.num_vals(),
);
// let's sample at 0%, 5%, 10% .. 95%, 100%, but for the first block only
let sample_positions = (0..20)
.map(|pos| (last_elem_in_first_chunk as f32 / 100.0 * pos as f32 * 5.0) as usize)
.collect::<Vec<_>>();
let max_distance = sample_positions
.iter()
.map(|pos| {
let calculated_value =
get_calculated_value(first_val_in_first_block, *pos as u64, slope);
let actual_value = fastfield_accessor.get_val(*pos as u64);
distance(calculated_value, actual_value)
})
.max()
.unwrap();
// Estimate one block and extrapolate the cost to all blocks.
// the theory would be that we don't have the actual max_distance, but we are close within
// 50% threshold.
// It is multiplied by 2 because in a log case scenario the line would be as much above as
// below. So the offset would = max_distance
//
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * fastfield_accessor.num_vals() as u64
// function metadata per block
+ 29 * (fastfield_accessor.num_vals() / CHUNK_SIZE);
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals();
num_bits as f32 / num_bits_uncompressed as f32
}
}
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
if x < y {
y - x
} else {
x - y
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<BlockwiseLinearCodec, BlockwiseLinearReader>(data, name)
}
const HIGHEST_BIT: u64 = 1 << 63;
pub fn i64_to_u64(val: i64) -> u64 {
(val as u64) ^ HIGHEST_BIT
}
#[test]
fn test_compression_i64() {
let data = (i64::MAX - 600_000..=i64::MAX - 550_000)
.map(i64_to_u64)
.collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large i64");
assert!(actual_compression < 0.2);
assert!(estimate < 0.20);
assert!(estimate > 0.15);
assert!(actual_compression > 0.01);
}
#[test]
fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large");
assert!(actual_compression < 0.2);
assert!(estimate < 0.20);
assert!(estimate > 0.15);
assert!(actual_compression > 0.01);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn test_simple() {
let data = (10..=20_u64).collect::<Vec<_>>();
create_and_validate(&data, "simple monotonically");
}
#[test]
fn border_cases_1() {
let data = (0..1024).collect::<Vec<_>>();
create_and_validate(&data, "border case");
}
#[test]
fn border_case_2() {
let data = (0..1025).collect::<Vec<_>>();
create_and_validate(&data, "border case");
}
#[test]
fn rand() {
for _ in 0..10 {
let mut data = (5_000..20_000)
.map(|_| rand::random::<u32>() as u64)
.collect::<Vec<_>>();
let _ = create_and_validate(&data, "random");
data.reverse();
create_and_validate(&data, "random");
}
}
}

289
fastfield_codecs/src/lib.rs Normal file
View File

@@ -0,0 +1,289 @@
#[cfg(test)]
#[macro_use]
extern crate more_asserts;
use std::io;
use std::io::Write;
use common::BinarySerializable;
use ownedbytes::OwnedBytes;
pub mod bitpacked;
pub mod blockwise_linear;
pub mod linear;
pub trait FastFieldDataAccess {
fn get_val(&self, doc: u64) -> u64;
fn min_value(&self) -> u64;
fn max_value(&self) -> u64;
fn num_vals(&self) -> u64;
/// Returns a iterator over the data
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = u64> + 'a> {
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
#[repr(u8)]
pub enum FastFieldCodecType {
Bitpacked = 1,
Linear = 2,
BlockwiseLinear = 3,
Gcd = 4,
}
impl BinarySerializable for FastFieldCodecType {
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
self.to_code().serialize(wrt)
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let code = u8::deserialize(reader)?;
let codec_type: Self = Self::from_code(code)
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
Ok(codec_type)
}
}
impl FastFieldCodecType {
pub fn to_code(self) -> u8 {
self as u8
}
pub fn from_code(code: u8) -> Option<Self> {
match code {
1 => Some(Self::Bitpacked),
2 => Some(Self::Linear),
3 => Some(Self::BlockwiseLinear),
4 => Some(Self::Gcd),
_ => None,
}
}
}
/// The FastFieldSerializerEstimate trait is required on all variants
/// of fast field compressions, to decide which one to choose.
pub trait FastFieldCodec {
/// A codex needs to provide a unique name and id, which is
/// used for debugging and de/serialization.
const CODEC_TYPE: FastFieldCodecType;
type Reader: FastFieldDataAccess;
/// Reads the metadata and returns the CodecReader
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader>;
/// Serializes the data using the serializer into write.
///
/// The fastfield_accessor iterator should be preferred over using fastfield_accessor for
/// performance reasons.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &dyn FastFieldDataAccess,
) -> io::Result<()>;
/// Check if the Codec is able to compress the data
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess) -> bool;
/// Returns an estimate of the compression ratio.
/// The baseline is uncompressed 64bit data.
///
/// It could make sense to also return a value representing
/// computational complexity.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess) -> f32;
}
#[derive(Debug, Clone)]
/// Statistics are used in codec detection and stored in the fast field footer.
pub struct FastFieldStats {
pub min_value: u64,
pub max_value: u64,
pub num_vals: u64,
}
impl<'a> FastFieldDataAccess for &'a [u64] {
fn get_val(&self, position: u64) -> u64 {
self[position as usize]
}
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
Box::new((self as &[u64]).iter().cloned())
}
fn min_value(&self) -> u64 {
self.iter().min().unwrap_or(0)
}
fn max_value(&self) -> u64 {
self.iter().max().unwrap_or(0)
}
fn num_vals(&self) -> u64 {
self.len() as u64
}
}
impl FastFieldDataAccess for Vec<u64> {
fn get_val(&self, position: u64) -> u64 {
self[position as usize]
}
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
Box::new((self as &[u64]).iter().cloned())
}
fn min_value(&self) -> u64 {
self.iter().min().unwrap_or(0)
}
fn max_value(&self) -> u64 {
self.iter().max().unwrap_or(0)
}
fn num_vals(&self) -> u64 {
self.len() as u64
}
}
#[cfg(test)]
mod tests {
use proptest::arbitrary::any;
use proptest::proptest;
use crate::bitpacked::BitpackedCodec;
use crate::blockwise_linear::BlockwiseLinearCodec;
use crate::linear::LinearCodec;
pub fn create_and_validate<Codec: FastFieldCodec>(data: &[u64], name: &str) -> (f32, f32) {
if !Codec::is_applicable(&data) {
return (f32::MAX, 0.0);
}
let estimation = Codec::estimate(&data);
let mut out: Vec<u8> = Vec::new();
Codec::serialize(&mut out, &data).unwrap();
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
let reader = Codec::open_from_bytes(OwnedBytes::new(out)).unwrap();
assert_eq!(reader.num_vals(), data.len() as u64);
for (doc, orig_val) in data.iter().enumerate() {
let val = reader.get_val(doc as u64);
if val != *orig_val {
panic!(
"val {val:?} does not match orig_val {orig_val:?}, in data set {name}, data \
{data:?}",
);
}
}
(estimation, actual_compression)
}
proptest! {
#[test]
fn test_proptest_small(data in proptest::collection::vec(any::<u64>(), 1..10)) {
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
}
#[test]
fn test_proptest_large(data in proptest::collection::vec(any::<u64>(), 1..6000)) {
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
}
}
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
let mut data_and_names = vec![];
let data = (10..=20_u64).collect::<Vec<_>>();
data_and_names.push((data, "simple monotonically increasing"));
data_and_names.push((
vec![5, 6, 7, 8, 9, 10, 99, 100],
"offset in linear interpol",
));
data_and_names.push((vec![5, 50, 3, 13, 1, 1000, 35], "rand small"));
data_and_names.push((vec![10], "single value"));
data_and_names
}
fn test_codec<C: FastFieldCodec>() {
let codec_name = format!("{:?}", C::CODEC_TYPE);
for (data, dataset_name) in get_codec_test_data_sets() {
let (estimate, actual) = crate::tests::create_and_validate::<C>(&data, dataset_name);
let result = if estimate == f32::MAX {
"Disabled".to_string()
} else {
format!("Estimate `{estimate}` Actual `{actual}`")
};
println!("Codec {codec_name}, DataSet {dataset_name}, {result}");
}
}
#[test]
fn test_codec_bitpacking() {
test_codec::<BitpackedCodec>();
}
#[test]
fn test_codec_interpolation() {
test_codec::<LinearCodec>();
}
#[test]
fn test_codec_multi_interpolation() {
test_codec::<BlockwiseLinearCodec>();
}
use super::*;
#[test]
fn estimation_good_interpolation_case() {
let data = (10..=20000_u64).collect::<Vec<_>>();
let linear_interpol_estimation = LinearCodec::estimate(&data);
assert_le!(linear_interpol_estimation, 0.01);
let multi_linear_interpol_estimation = BlockwiseLinearCodec::estimate(&data);
assert_le!(multi_linear_interpol_estimation, 0.2);
assert_le!(linear_interpol_estimation, multi_linear_interpol_estimation);
let bitpacked_estimation = BitpackedCodec::estimate(&data);
assert_le!(linear_interpol_estimation, bitpacked_estimation);
}
#[test]
fn estimation_test_bad_interpolation_case() {
let data = vec![200, 10, 10, 10, 10, 1000, 20];
let linear_interpol_estimation = LinearCodec::estimate(&data);
assert_le!(linear_interpol_estimation, 0.32);
let bitpacked_estimation = BitpackedCodec::estimate(&data);
assert_le!(bitpacked_estimation, linear_interpol_estimation);
}
#[test]
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
let mut data = (200..=20000_u64).collect::<Vec<_>>();
data.push(1_000_000);
// in this case the linear interpolation can't in fact not be worse than bitpacking,
// but the estimator adds some threshold, which leads to estimated worse behavior
let linear_interpol_estimation = LinearCodec::estimate(&data);
assert_le!(linear_interpol_estimation, 0.35);
let bitpacked_estimation = BitpackedCodec::estimate(&data);
assert_le!(bitpacked_estimation, 0.32);
assert_le!(bitpacked_estimation, linear_interpol_estimation);
}
#[test]
fn test_fast_field_codec_type_to_code() {
let mut count_codec = 0;
for code in 0..=255 {
if let Some(codec_type) = FastFieldCodecType::from_code(code) {
assert_eq!(codec_type.to_code(), code);
count_codec += 1;
}
}
assert_eq!(count_codec, 4);
}
}

View File

@@ -0,0 +1,347 @@
use std::io::{self, Read, Write};
use std::ops::Sub;
use common::{BinarySerializable, FixedSize};
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::{FastFieldCodec, FastFieldCodecType, FastFieldDataAccess};
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct LinearReader {
data: OwnedBytes,
bit_unpacker: BitUnpacker,
pub footer: LinearFooter,
pub slope: f32,
}
#[derive(Clone, Debug)]
pub struct LinearFooter {
pub relative_max_value: u64,
pub offset: u64,
pub first_val: u64,
pub last_val: u64,
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
}
impl BinarySerializable for LinearFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.relative_max_value.serialize(write)?;
self.offset.serialize(write)?;
self.first_val.serialize(write)?;
self.last_val.serialize(write)?;
self.num_vals.serialize(write)?;
self.min_value.serialize(write)?;
self.max_value.serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearFooter> {
Ok(LinearFooter {
relative_max_value: u64::deserialize(reader)?,
offset: u64::deserialize(reader)?,
first_val: u64::deserialize(reader)?,
last_val: u64::deserialize(reader)?,
num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?,
})
}
}
impl FixedSize for LinearFooter {
const SIZE_IN_BYTES: usize = 56;
}
impl FastFieldDataAccess for LinearReader {
#[inline]
fn get_val(&self, doc: u64) -> u64 {
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope);
(calculated_value + self.bit_unpacker.get(doc, &self.data)) - self.footer.offset
}
#[inline]
fn min_value(&self) -> u64 {
self.footer.min_value
}
#[inline]
fn max_value(&self) -> u64 {
self.footer.max_value
}
#[inline]
fn num_vals(&self) -> u64 {
self.footer.num_vals
}
}
/// Fastfield serializer, which tries to guess values by linear interpolation
/// and stores the difference bitpacked.
pub struct LinearCodec;
#[inline]
pub(crate) fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
if num_vals <= 1 {
return 0.0;
}
// We calculate the slope with f64 high precision and use the result in lower precision f32
// This is done in order to handle estimations for very large values like i64::MAX
let diff = diff(last_val, first_val);
(diff / (num_vals - 1) as f64) as f32
}
/// Delay the cast, to improve precision for very large u64 values.
///
/// Since i64 is mapped monotonically to u64 space, 0i64 is after the mapping i64::MAX.
/// So very large values are not uncommon.
///
/// ```rust
/// let val1 = i64::MAX;
/// let val2 = i64::MAX - 100;
/// assert_eq!(val1 - val2, 100);
/// assert_eq!(val1 as f64 - val2 as f64, 0.0);
/// ```
fn diff(val1: u64, val2: u64) -> f64 {
if val1 >= val2 {
(val1 - val2) as f64
} else {
(val2 - val1) as f64 * -1.0
}
}
#[inline]
pub fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
if slope < 0.0 {
first_val - (pos as f32 * -slope) as u64
} else {
first_val + (pos as f32 * slope) as u64
}
}
impl FastFieldCodec for LinearCodec {
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Linear;
type Reader = LinearReader;
/// Opens a fast field given a file.
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
let footer_offset = bytes.len() - LinearFooter::SIZE_IN_BYTES;
let (data, mut footer) = bytes.split(footer_offset);
let footer = LinearFooter::deserialize(&mut footer)?;
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
let num_bits = compute_num_bits(footer.relative_max_value);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(LinearReader {
data,
bit_unpacker,
footer,
slope,
})
}
/// Creates a new fast field serializer.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &dyn FastFieldDataAccess,
) -> io::Result<()> {
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value());
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals());
// calculate offset to ensure all values are positive
let mut offset = 0;
let mut rel_positive_max = 0;
for (pos, actual_value) in fastfield_accessor.iter().enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
if calculated_value > actual_value {
// negative value we need to apply an offset
// we ignore negative values in the max value calculation, because negative values
// will be offset to 0
offset = offset.max(calculated_value - actual_value);
} else {
// positive value no offset reuqired
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
}
}
// rel_positive_max will be adjusted by offset
let relative_max_value = rel_positive_max + offset;
let num_bits = compute_num_bits(relative_max_value);
let mut bit_packer = BitPacker::new();
for (pos, val) in fastfield_accessor.iter().enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
let diff = (val + offset) - calculated_value;
bit_packer.write(diff, num_bits, write)?;
}
bit_packer.close(write)?;
let footer = LinearFooter {
relative_max_value,
offset,
first_val,
last_val,
num_vals: fastfield_accessor.num_vals(),
min_value: fastfield_accessor.min_value(),
max_value: fastfield_accessor.max_value(),
};
footer.serialize(write)?;
Ok(())
}
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess) -> bool {
if fastfield_accessor.num_vals() < 3 {
return false; // disable compressor for this case
}
// On serialisation the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algorithm should be fine
let theorethical_maximum_offset =
fastfield_accessor.max_value() - fastfield_accessor.min_value();
if fastfield_accessor
.max_value()
.checked_add(theorethical_maximum_offset)
.is_none()
{
return false;
}
true
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima for the deviation of the calculated value are and
/// the offset to shift all values to >=0 is also unknown.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess) -> f32 {
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals());
// let's sample at 0%, 5%, 10% .. 95%, 100%
let num_vals = fastfield_accessor.num_vals() as f32 / 100.0;
let sample_positions = (0..20)
.map(|pos| (num_vals * pos as f32 * 5.0) as usize)
.collect::<Vec<_>>();
let max_distance = sample_positions
.iter()
.map(|pos| {
let calculated_value = get_calculated_value(first_val, *pos as u64, slope);
let actual_value = fastfield_accessor.get_val(*pos as u64);
distance(calculated_value, actual_value)
})
.max()
.unwrap_or(0);
// the theory would be that we don't have the actual max_distance, but we are close within
// 50% threshold.
// It is multiplied by 2 because in a log case scenario the line would be as much above as
// below. So the offset would = max_distance
//
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64
* fastfield_accessor.num_vals()
+ LinearFooter::SIZE_IN_BYTES as u64;
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals();
num_bits as f32 / num_bits_uncompressed as f32
}
}
#[inline]
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
if x < y {
y - x
} else {
x - y
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<LinearCodec, LinearReader>(data, name)
}
#[test]
fn get_calculated_value_test() {
// pos slope
assert_eq!(get_calculated_value(100, 10, 5.0), 150);
// neg slope
assert_eq!(get_calculated_value(100, 10, -5.0), 50);
// pos slope, very high values
assert_eq!(
get_calculated_value(i64::MAX as u64, 10, 5.0),
i64::MAX as u64 + 50
);
// neg slope, very high values
assert_eq!(
get_calculated_value(i64::MAX as u64, 10, -5.0),
i64::MAX as u64 - 50
);
}
#[test]
fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large");
assert!(actual_compression < 0.01);
assert!(estimate < 0.01);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn linear_interpol_fast_field_test_large_amplitude() {
let data = vec![
i64::MAX as u64 / 2,
i64::MAX as u64 / 3,
i64::MAX as u64 / 2,
];
create_and_validate(&data, "large amplitude");
}
#[test]
fn linear_interpol_fast_concave_data() {
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
create_and_validate(&data, "concave data");
}
#[test]
fn linear_interpol_fast_convex_data() {
let data = vec![0, 40, 60, 70, 75, 77];
create_and_validate(&data, "convex data");
}
#[test]
fn linear_interpol_fast_field_test_simple() {
let data = (10..=20_u64).collect::<Vec<_>>();
create_and_validate(&data, "simple monotonically");
}
#[test]
fn linear_interpol_fast_field_rand() {
for _ in 0..5000 {
let mut data = (0..50).map(|_| rand::random::<u64>()).collect::<Vec<_>>();
create_and_validate(&data, "random");
data.reverse();
create_and_validate(&data, "random");
}
}
}

View File

@@ -0,0 +1,115 @@
#[macro_use]
extern crate prettytable;
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec;
use fastfield_codecs::linear::LinearCodec;
use fastfield_codecs::{FastFieldCodec, FastFieldCodecType, FastFieldStats};
use prettytable::{Cell, Row, Table};
fn main() {
let mut table = Table::new();
// Add a row per time
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
for (data, data_set_name) in get_codec_test_data_sets() {
let mut results = vec![];
let res = serialize_with_codec::<LinearCodec>(&data);
results.push(res);
let res = serialize_with_codec::<BlockwiseLinearCodec>(&data);
results.push(res);
let res = serialize_with_codec::<fastfield_codecs::bitpacked::BitpackedCodec>(&data);
results.push(res);
// let best_estimation_codec = results
//.iter()
//.min_by(|res1, res2| res1.partial_cmp(&res2).unwrap())
//.unwrap();
let best_compression_ratio_codec = results
.iter()
.min_by(|res1, res2| res1.partial_cmp(res2).unwrap())
.cloned()
.unwrap();
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
for (is_applicable, est, comp, codec_type) in results {
let (est_cell, ratio_cell) = if !is_applicable {
("Codec Disabled".to_string(), "".to_string())
} else {
(est.to_string(), comp.to_string())
};
let style = if comp == best_compression_ratio_codec.1 {
"Fb"
} else {
""
};
table.add_row(Row::new(vec![
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"),
Cell::new(&ratio_cell).style_spec(style),
Cell::new(&est_cell).style_spec(""),
]));
}
}
table.printstd();
}
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
let mut data_and_names = vec![];
let data = (1000..=200_000_u64).collect::<Vec<_>>();
data_and_names.push((data, "Autoincrement"));
let mut current_cumulative = 0;
let data = (1..=200_000_u64)
.map(|num| {
let num = (num as f32 + num as f32).log10() as u64;
current_cumulative += num;
current_cumulative
})
.collect::<Vec<_>>();
// let data = (1..=200000_u64).map(|num| num + num).collect::<Vec<_>>();
data_and_names.push((data, "Monotonically increasing concave"));
let mut current_cumulative = 0;
let data = (1..=200_000_u64)
.map(|num| {
let num = (200_000.0 - num as f32).log10() as u64;
current_cumulative += num;
current_cumulative
})
.collect::<Vec<_>>();
data_and_names.push((data, "Monotonically increasing convex"));
let data = (1000..=200_000_u64)
.map(|num| num + rand::random::<u8>() as u64)
.collect::<Vec<_>>();
data_and_names.push((data, "Almost monotonically increasing"));
data_and_names
}
pub fn serialize_with_codec<C: FastFieldCodec>(
data: &[u64],
) -> (bool, f32, f32, FastFieldCodecType) {
let is_applicable = C::is_applicable(&data);
if !is_applicable {
return (false, 0.0, 0.0, C::CODEC_TYPE);
}
let estimation = C::estimate(&data);
let mut out = vec![];
C::serialize(&mut out, &data).unwrap();
let actual_compression = out.len() as f32 / (data.len() * 8) as f32;
(true, estimation, actual_compression, C::CODEC_TYPE)
}
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
let min_value = data.iter().cloned().min().unwrap_or(0);
let max_value = data.iter().cloned().max().unwrap_or(0);
FastFieldStats {
min_value,
max_value,
num_vals: data.len() as u64,
}
}

11
ownedbytes/Cargo.toml Normal file
View File

@@ -0,0 +1,11 @@
[package]
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
name = "ownedbytes"
version = "0.3.0"
edition = "2021"
description = "Expose data as static slice"
license = "MIT"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
stable_deref_trait = "1.2.0"

343
ownedbytes/src/lib.rs Normal file
View File

@@ -0,0 +1,343 @@
use std::convert::TryInto;
use std::ops::{Deref, Range};
use std::sync::Arc;
use std::{fmt, io, mem};
use stable_deref_trait::StableDeref;
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
/// this data as a static slice.
///
/// The backing object is required to be `StableDeref`.
#[derive(Clone)]
pub struct OwnedBytes {
data: &'static [u8],
box_stable_deref: Arc<dyn Deref<Target = [u8]> + Sync + Send>,
}
impl OwnedBytes {
/// Creates an empty `OwnedBytes`.
pub fn empty() -> OwnedBytes {
OwnedBytes::new(&[][..])
}
/// Creates an `OwnedBytes` instance given a `StableDeref` object.
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
data_holder: T,
) -> OwnedBytes {
let box_stable_deref = Arc::new(data_holder);
let bytes: &[u8] = box_stable_deref.as_ref();
let data = unsafe { mem::transmute::<_, &'static [u8]>(bytes.deref()) };
OwnedBytes {
data,
box_stable_deref,
}
}
/// creates a fileslice that is just a view over a slice of the data.
#[must_use]
#[inline]
pub fn slice(&self, range: Range<usize>) -> Self {
OwnedBytes {
data: &self.data[range],
box_stable_deref: self.box_stable_deref.clone(),
}
}
/// Returns the underlying slice of data.
/// `Deref` and `AsRef` are also available.
#[inline]
pub fn as_slice(&self) -> &[u8] {
self.data
}
/// Returns the len of the slice.
#[inline]
pub fn len(&self) -> usize {
self.data.len()
}
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
///
/// Left will hold `split_len` bytes.
///
/// This operation is cheap and does not require to copy any memory.
/// On the other hand, both `left` and `right` retain a handle over
/// the entire slice of memory. In other words, the memory will only
/// be released when both left and right are dropped.
#[inline]
#[must_use]
pub fn split(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
let right_box_stable_deref = self.box_stable_deref.clone();
let left = OwnedBytes {
data: &self.data[..split_len],
box_stable_deref: self.box_stable_deref,
};
let right = OwnedBytes {
data: &self.data[split_len..],
box_stable_deref: right_box_stable_deref,
};
(left, right)
}
/// Splits the right part of the `OwnedBytes` at the given offset.
///
/// `self` is truncated to `split_len`, left with the remaining bytes.
pub fn split_off(&mut self, split_len: usize) -> OwnedBytes {
let right_box_stable_deref = self.box_stable_deref.clone();
let right_piece = OwnedBytes {
data: &self.data[split_len..],
box_stable_deref: right_box_stable_deref,
};
self.data = &self.data[..split_len];
right_piece
}
/// Returns true iff this `OwnedBytes` is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.as_slice().is_empty()
}
/// Drops the left most `advance_len` bytes.
#[inline]
pub fn advance(&mut self, advance_len: usize) {
self.data = &self.data[advance_len..]
}
/// Reads an `u8` from the `OwnedBytes` and advance by one byte.
#[inline]
pub fn read_u8(&mut self) -> u8 {
assert!(!self.is_empty());
let byte = self.as_slice()[0];
self.advance(1);
byte
}
/// Reads an `u64` encoded as little-endian from the `OwnedBytes` and advance by 8 bytes.
#[inline]
pub fn read_u64(&mut self) -> u64 {
assert!(self.len() > 7);
let octlet: [u8; 8] = self.as_slice()[..8].try_into().unwrap();
self.advance(8);
u64::from_le_bytes(octlet)
}
}
impl fmt::Debug for OwnedBytes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// We truncate the bytes in order to make sure the debug string
// is not too long.
let bytes_truncated: &[u8] = if self.len() > 8 {
&self.as_slice()[..10]
} else {
self.as_slice()
};
write!(f, "OwnedBytes({:?}, len={})", bytes_truncated, self.len())
}
}
impl PartialEq for OwnedBytes {
fn eq(&self, other: &OwnedBytes) -> bool {
self.as_slice() == other.as_slice()
}
}
impl Eq for OwnedBytes {}
impl PartialEq<[u8]> for OwnedBytes {
fn eq(&self, other: &[u8]) -> bool {
self.as_slice() == other
}
}
impl PartialEq<str> for OwnedBytes {
fn eq(&self, other: &str) -> bool {
self.as_slice() == other.as_bytes()
}
}
impl<'a, T: ?Sized> PartialEq<&'a T> for OwnedBytes
where OwnedBytes: PartialEq<T>
{
fn eq(&self, other: &&'a T) -> bool {
*self == **other
}
}
impl Deref for OwnedBytes {
type Target = [u8];
#[inline]
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
impl io::Read for OwnedBytes {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let read_len = {
let data = self.as_slice();
if data.len() >= buf.len() {
let buf_len = buf.len();
buf.copy_from_slice(&data[..buf_len]);
buf.len()
} else {
let data_len = data.len();
buf[..data_len].copy_from_slice(data);
data_len
}
};
self.advance(read_len);
Ok(read_len)
}
#[inline]
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
let read_len = {
let data = self.as_slice();
buf.extend(data);
data.len()
};
self.advance(read_len);
Ok(read_len)
}
#[inline]
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let read_len = self.read(buf)?;
if read_len != buf.len() {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"failed to fill whole buffer",
));
}
Ok(())
}
}
impl AsRef<[u8]> for OwnedBytes {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_slice()
}
}
#[cfg(test)]
mod tests {
use std::io::{self, Read};
use super::OwnedBytes;
#[test]
fn test_owned_bytes_debug() {
let short_bytes = OwnedBytes::new(b"abcd".as_ref());
assert_eq!(
format!("{:?}", short_bytes),
"OwnedBytes([97, 98, 99, 100], len=4)"
);
let long_bytes = OwnedBytes::new(b"abcdefghijklmnopq".as_ref());
assert_eq!(
format!("{:?}", long_bytes),
"OwnedBytes([97, 98, 99, 100, 101, 102, 103, 104, 105, 106], len=17)"
);
}
#[test]
fn test_owned_bytes_read() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcdefghiklmnopqrstuvwxyz".as_ref());
{
let mut buf = [0u8; 5];
bytes.read_exact(&mut buf[..]).unwrap();
assert_eq!(&buf, b"abcde");
assert_eq!(bytes.as_slice(), b"fghiklmnopqrstuvwxyz")
}
{
let mut buf = [0u8; 2];
bytes.read_exact(&mut buf[..]).unwrap();
assert_eq!(&buf, b"fg");
assert_eq!(bytes.as_slice(), b"hiklmnopqrstuvwxyz")
}
Ok(())
}
#[test]
fn test_owned_bytes_read_right_at_the_end() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = [0u8; 5];
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
assert_eq!(&buf, b"abcde");
assert_eq!(bytes.as_slice(), b"");
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
assert_eq!(&buf, b"abcde");
Ok(())
}
#[test]
fn test_owned_bytes_read_incomplete() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = [0u8; 7];
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
assert_eq!(&buf[..5], b"abcde");
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_read_to_end() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = Vec::new();
bytes.read_to_end(&mut buf)?;
assert_eq!(buf.as_slice(), b"abcde".as_ref());
Ok(())
}
#[test]
fn test_owned_bytes_read_u8() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"\xFF".as_ref());
assert_eq!(bytes.read_u8(), 255);
assert_eq!(bytes.len(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_read_u64() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"\0\xFF\xFF\xFF\xFF\xFF\xFF\xFF".as_ref());
assert_eq!(bytes.read_u64(), u64::MAX - 255);
assert_eq!(bytes.len(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_split() {
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
let (left, right) = bytes.split(3);
assert_eq!(left.as_slice(), b"abc");
assert_eq!(right.as_slice(), b"defghi");
}
#[test]
fn test_owned_bytes_split_boundary() {
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
{
let (left, right) = bytes.clone().split(0);
assert_eq!(left.as_slice(), b"");
assert_eq!(right.as_slice(), b"abcdefghi");
}
{
let (left, right) = bytes.split(9);
assert_eq!(left.as_slice(), b"abcdefghi");
assert_eq!(right.as_slice(), b"");
}
}
#[test]
fn test_split_off() {
let mut data = OwnedBytes::new(b"abcdef".as_ref());
assert_eq!(data, "abcdef");
assert_eq!(data.split_off(2), "cdef");
assert_eq!(data, "ab");
assert_eq!(data.split_off(1), "b");
assert_eq!(data, "a");
}
}

17
query-grammar/Cargo.toml Normal file
View File

@@ -0,0 +1,17 @@
[package]
name = "tantivy-query-grammar"
version = "0.18.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
description = """Search engine library"""
homepage = "https://github.com/quickwit-oss/tantivy"
repository = "https://github.com/quickwit-oss/tantivy"
readme = "README.md"
keywords = ["search", "information", "retrieval"]
edition = "2021"
[dependencies]
combine = {version="4", default-features=false, features=[] }
once_cell = "1.7.2"
regex ={ version = "1.5.4", default-features = false, features = ["std", "unicode"] }

3
query-grammar/README.md Normal file
View File

@@ -0,0 +1,3 @@
# Tantivy Query Grammar
This crate is used by tantivy to parse queries.

17
query-grammar/src/lib.rs Normal file
View File

@@ -0,0 +1,17 @@
#![allow(clippy::derive_partial_eq_without_eq)]
mod occur;
mod query_grammar;
mod user_input_ast;
use combine::parser::Parser;
pub use crate::occur::Occur;
use crate::query_grammar::parse_to_ast;
pub use crate::user_input_ast::{UserInputAst, UserInputBound, UserInputLeaf, UserInputLiteral};
pub struct Error;
pub fn parse_query(query: &str) -> Result<UserInputAst, Error> {
let (user_input_ast, _remaining) = parse_to_ast().parse(query).map_err(|_| Error)?;
Ok(user_input_ast)
}

View File

@@ -0,0 +1,72 @@
use std::fmt;
use std::fmt::Write;
/// Defines whether a term in a query must be present,
/// should be present or must not be present.
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
pub enum Occur {
/// For a given document to be considered for scoring,
/// at least one of the terms with the Should or the Must
/// Occur constraint must be within the document.
Should,
/// Document without the term are excluded from the search.
Must,
/// Document that contain the term are excluded from the
/// search.
MustNot,
}
impl Occur {
/// Returns the one-char prefix symbol for this `Occur`.
/// - `Should` => '?',
/// - `Must` => '+'
/// - `Not` => '-'
fn to_char(self) -> char {
match self {
Occur::Should => '?',
Occur::Must => '+',
Occur::MustNot => '-',
}
}
/// Compose two occur values.
pub fn compose(left: Occur, right: Occur) -> Occur {
match (left, right) {
(Occur::Should, _) => right,
(Occur::Must, Occur::MustNot) => Occur::MustNot,
(Occur::Must, _) => Occur::Must,
(Occur::MustNot, Occur::MustNot) => Occur::Must,
(Occur::MustNot, _) => Occur::MustNot,
}
}
}
impl fmt::Display for Occur {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_char(self.to_char())
}
}
#[cfg(test)]
mod test {
use crate::Occur;
#[test]
fn test_occur_compose() {
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
assert_eq!(
Occur::compose(Occur::Should, Occur::MustNot),
Occur::MustNot
);
assert_eq!(Occur::compose(Occur::Must, Occur::Should), Occur::Must);
assert_eq!(Occur::compose(Occur::Must, Occur::Must), Occur::Must);
assert_eq!(Occur::compose(Occur::Must, Occur::MustNot), Occur::MustNot);
assert_eq!(
Occur::compose(Occur::MustNot, Occur::Should),
Occur::MustNot
);
assert_eq!(Occur::compose(Occur::MustNot, Occur::Must), Occur::MustNot);
assert_eq!(Occur::compose(Occur::MustNot, Occur::MustNot), Occur::Must);
}
}

View File

@@ -0,0 +1,752 @@
use combine::error::StringStreamError;
use combine::parser::char::{char, digit, space, spaces, string};
use combine::parser::combinator::recognize;
use combine::parser::range::{take_while, take_while1};
use combine::parser::repeat::escaped;
use combine::parser::Parser;
use combine::{
attempt, choice, eof, many, many1, one_of, optional, parser, satisfy, skip_many1, value,
};
use once_cell::sync::Lazy;
use regex::Regex;
use super::user_input_ast::{UserInputAst, UserInputBound, UserInputLeaf, UserInputLiteral};
use crate::Occur;
// Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to
// special characters.
const SPECIAL_CHARS: &[char] = &[
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '!', '\\', '*', ' ',
];
const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|!|\\|\*|\s)"#;
/// Parses a field_name
/// A field name must have at least one character and be followed by a colon.
/// All characters are allowed including special characters `SPECIAL_CHARS`, but these
/// need to be escaped with a backslack character '\'.
fn field_name<'a>() -> impl Parser<&'a str, Output = String> {
static ESCAPED_SPECIAL_CHARS_RE: Lazy<Regex> =
Lazy::new(|| Regex::new(ESCAPED_SPECIAL_CHARS_PATTERN).unwrap());
recognize::<String, _, _>(escaped(
(
take_while1(|c| !SPECIAL_CHARS.contains(&c) && c != '-'),
take_while(|c| !SPECIAL_CHARS.contains(&c)),
),
'\\',
satisfy(|_| true), /* if the next character is not a special char, the \ will be treated
* as the \ character. */
))
.skip(char(':'))
.map(|s| ESCAPED_SPECIAL_CHARS_RE.replace_all(&s, "$1").to_string())
.and_then(|s: String| match s.is_empty() {
true => Err(StringStreamError::UnexpectedParse),
_ => Ok(s),
})
}
fn word<'a>() -> impl Parser<&'a str, Output = String> {
(
satisfy(|c: char| {
!c.is_whitespace()
&& !['-', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
}),
many(satisfy(|c: char| {
!c.is_whitespace() && ![':', '^', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
})),
)
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
.and_then(|s: String| match s.as_str() {
"OR" | "AND " | "NOT" => Err(StringStreamError::UnexpectedParse),
_ => Ok(s),
})
}
/// Parses a date time according to rfc3339
/// 2015-08-02T18:54:42+02
/// 2021-04-13T19:46:26.266051969+00:00
///
/// NOTE: also accepts 999999-99-99T99:99:99.266051969+99:99
/// We delegate rejecting such invalid dates to the logical AST computation code
/// which invokes time::OffsetDateTime::parse(..., &Rfc3339) on the value to actually parse
/// it (instead of merely extracting the datetime value as string as done here).
fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
let two_digits = || recognize::<String, _, _>((digit(), digit()));
// Parses a time zone
// -06:30
// Z
let time_zone = {
let utc = recognize::<String, _, _>(char('Z'));
let offset = recognize((
choice([char('-'), char('+')]),
two_digits(),
char(':'),
two_digits(),
));
utc.or(offset)
};
// Parses a date
// 2010-01-30
let date = {
recognize::<String, _, _>((
many1::<String, _, _>(digit()),
char('-'),
two_digits(),
char('-'),
two_digits(),
))
};
// Parses a time
// 12:30:02
// 19:46:26.266051969
let time = {
recognize::<String, _, _>((
two_digits(),
char(':'),
two_digits(),
char(':'),
two_digits(),
optional((char('.'), many1::<String, _, _>(digit()))),
time_zone,
))
};
recognize((date, char('T'), time))
}
fn term_val<'a>() -> impl Parser<&'a str, Output = String> {
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
negative_number().or(phrase.or(word()))
}
fn term_query<'a>() -> impl Parser<&'a str, Output = UserInputLiteral> {
(field_name(), term_val(), slop_val()).map(|(field_name, phrase, slop)| UserInputLiteral {
field_name: Some(field_name),
phrase,
slop,
})
}
fn slop_val<'a>() -> impl Parser<&'a str, Output = u32> {
let slop =
(char('~'), many1(digit())).and_then(|(_, slop): (_, String)| match slop.parse::<u32>() {
Ok(d) => Ok(d),
_ => Err(StringStreamError::UnexpectedParse),
});
optional(slop).map(|slop| match slop {
Some(d) => d,
_ => 0,
})
}
fn literal<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
let term_default_field = (term_val(), slop_val()).map(|(phrase, slop)| UserInputLiteral {
field_name: None,
phrase,
slop,
});
attempt(term_query())
.or(term_default_field)
.map(UserInputLeaf::from)
}
fn negative_number<'a>() -> impl Parser<&'a str, Output = String> {
(
char('-'),
many1(digit()),
optional((char('.'), many1(digit()))),
)
.map(|(s1, s2, s3): (char, String, Option<(char, String)>)| {
if let Some(('.', s3)) = s3 {
format!("{}{}.{}", s1, s2, s3)
} else {
format!("{}{}", s1, s2)
}
})
}
fn spaces1<'a>() -> impl Parser<&'a str, Output = ()> {
skip_many1(space())
}
/// Function that parses a range out of a Stream
/// Supports ranges like:
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
/// [a TO *], [a TO c], [abc TO bcd}
fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
let range_term_val = || {
attempt(date_time())
.or(word())
.or(negative_number())
.or(char('*').with(value("*".to_string())))
};
// check for unbounded range in the form of <5, <=10, >5, >=5
let elastic_unbounded_range = (
choice([
attempt(string(">=")),
attempt(string("<=")),
attempt(string("<")),
attempt(string(">")),
])
.skip(spaces()),
range_term_val(),
)
.map(
|(comparison_sign, bound): (&str, String)| match comparison_sign {
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
"<=" => (UserInputBound::Unbounded, UserInputBound::Inclusive(bound)),
"<" => (UserInputBound::Unbounded, UserInputBound::Exclusive(bound)),
">" => (UserInputBound::Exclusive(bound), UserInputBound::Unbounded),
// default case
_ => (UserInputBound::Unbounded, UserInputBound::Unbounded),
},
);
let lower_bound = (one_of("{[".chars()), range_term_val()).map(
|(boundary_char, lower_bound): (char, String)| {
if lower_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '{' {
UserInputBound::Exclusive(lower_bound)
} else {
UserInputBound::Inclusive(lower_bound)
}
},
);
let upper_bound = (range_term_val(), one_of("}]".chars())).map(
|(higher_bound, boundary_char): (String, char)| {
if higher_bound == "*" {
UserInputBound::Unbounded
} else if boundary_char == '}' {
UserInputBound::Exclusive(higher_bound)
} else {
UserInputBound::Inclusive(higher_bound)
}
},
);
// return only lower and upper
let lower_to_upper = (
lower_bound.skip((spaces(), string("TO"), spaces())),
upper_bound,
);
(
optional(field_name()).skip(spaces()),
// try elastic first, if it matches, the range is unbounded
attempt(elastic_unbounded_range).or(lower_to_upper),
)
.map(|(field, (lower, upper))|
// Construct the leaf from extracted field (optional)
// and bounds
UserInputLeaf::Range {
field,
lower,
upper
})
}
fn negate(expr: UserInputAst) -> UserInputAst {
expr.unary(Occur::MustNot)
}
fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAst> {
parser(|input| {
char('(')
.with(ast())
.skip(char(')'))
.or(char('*').map(|_| UserInputAst::from(UserInputLeaf::All)))
.or(attempt(
string("NOT").skip(spaces1()).with(leaf()).map(negate),
))
.or(attempt(range().map(UserInputAst::from)))
.or(literal().map(UserInputAst::from))
.parse_stream(input)
.into_result()
})
}
fn occur_symbol<'a>() -> impl Parser<&'a str, Output = Occur> {
char('-')
.map(|_| Occur::MustNot)
.or(char('+').map(|_| Occur::Must))
}
fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAst)> {
(optional(occur_symbol()), boosted_leaf())
}
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
(many1(digit()), optional((char('.'), many1(digit())))).map(
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
let mut float_str = int_part;
if let Some((chr, decimal_str)) = decimal_part_opt {
float_str.push(chr);
float_str.push_str(&decimal_str);
}
float_str.parse::<f64>().unwrap()
},
)
}
fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
(char('^'), positive_float_number()).map(|(_, boost)| boost)
}
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAst> {
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
Some(boost) if (boost - 1.0).abs() > f64::EPSILON => {
UserInputAst::Boost(Box::new(leaf), boost)
}
_ => leaf,
})
}
#[derive(Clone, Copy)]
enum BinaryOperand {
Or,
And,
}
fn binary_operand<'a>() -> impl Parser<&'a str, Output = BinaryOperand> {
string("AND")
.with(value(BinaryOperand::And))
.or(string("OR").with(value(BinaryOperand::Or)))
}
fn aggregate_binary_expressions(
left: UserInputAst,
others: Vec<(BinaryOperand, UserInputAst)>,
) -> UserInputAst {
let mut dnf: Vec<Vec<UserInputAst>> = vec![vec![left]];
for (operator, operand_ast) in others {
match operator {
BinaryOperand::And => {
if let Some(last) = dnf.last_mut() {
last.push(operand_ast);
}
}
BinaryOperand::Or => {
dnf.push(vec![operand_ast]);
}
}
}
if dnf.len() == 1 {
UserInputAst::and(dnf.into_iter().next().unwrap()) //< safe
} else {
let conjunctions = dnf.into_iter().map(UserInputAst::and).collect();
UserInputAst::or(conjunctions)
}
}
fn operand_leaf<'a>() -> impl Parser<&'a str, Output = (BinaryOperand, UserInputAst)> {
(
binary_operand().skip(spaces()),
boosted_leaf().skip(spaces()),
)
}
pub fn ast<'a>() -> impl Parser<&'a str, Output = UserInputAst> {
let boolean_expr = (boosted_leaf().skip(spaces()), many1(operand_leaf()))
.map(|(left, right)| aggregate_binary_expressions(left, right));
let whitespace_separated_leaves = many1(occur_leaf().skip(spaces().silent())).map(
|subqueries: Vec<(Option<Occur>, UserInputAst)>| {
if subqueries.len() == 1 {
let (occur_opt, ast) = subqueries.into_iter().next().unwrap();
match occur_opt.unwrap_or(Occur::Should) {
Occur::Must | Occur::Should => ast,
Occur::MustNot => UserInputAst::Clause(vec![(Some(Occur::MustNot), ast)]),
}
} else {
UserInputAst::Clause(subqueries.into_iter().collect())
}
},
);
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
spaces().with(expr).skip(spaces())
}
pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAst> {
spaces()
.with(optional(ast()).skip(eof()))
.map(|opt_ast| opt_ast.unwrap_or_else(UserInputAst::empty_query))
}
#[cfg(test)]
mod test {
type TestParseResult = Result<(), StringStreamError>;
use combine::parser::Parser;
use super::*;
pub fn nearly_equals(a: f64, b: f64) -> bool {
(a - b).abs() < 0.0005 * (a + b).abs()
}
fn assert_nearly_equals(expected: f64, val: f64) {
assert!(
nearly_equals(val, expected),
"Got {}, expected {}.",
val,
expected
);
}
#[test]
fn test_occur_symbol() -> TestParseResult {
assert_eq!(super::occur_symbol().parse("-")?, (Occur::MustNot, ""));
assert_eq!(super::occur_symbol().parse("+")?, (Occur::Must, ""));
Ok(())
}
#[test]
fn test_positive_float_number() {
fn valid_parse(float_str: &str, expected_val: f64, expected_remaining: &str) {
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
assert_eq!(remaining, expected_remaining);
assert_nearly_equals(val, expected_val);
}
fn error_parse(float_str: &str) {
assert!(positive_float_number().parse(float_str).is_err());
}
valid_parse("1.0", 1.0, "");
valid_parse("1", 1.0, "");
valid_parse("0.234234 aaa", 0.234234f64, " aaa");
error_parse(".3332");
error_parse("1.");
error_parse("-1.");
}
#[test]
fn test_date_time() {
let (val, remaining) = date_time()
.parse("2015-08-02T18:54:42+02:30")
.expect("cannot parse date");
assert_eq!(val, "2015-08-02T18:54:42+02:30");
assert_eq!(remaining, "");
assert!(date_time().parse("2015-08-02T18:54:42+02").is_err());
let (val, remaining) = date_time()
.parse("2021-04-13T19:46:26.266051969+00:00")
.expect("cannot parse fractional date");
assert_eq!(val, "2021-04-13T19:46:26.266051969+00:00");
assert_eq!(remaining, "");
}
fn test_parse_query_to_ast_helper(query: &str, expected: &str) {
let query = parse_to_ast().parse(query).unwrap().0;
let query_str = format!("{:?}", query);
assert_eq!(query_str, expected);
}
fn test_is_parse_err(query: &str) {
assert!(parse_to_ast().parse(query).is_err());
}
#[test]
fn test_parse_empty_to_ast() {
test_parse_query_to_ast_helper("", "<emptyclause>");
}
#[test]
fn test_parse_query_to_ast_hyphen() {
test_parse_query_to_ast_helper("\"www-form-encoded\"", "\"www-form-encoded\"");
test_parse_query_to_ast_helper("www-form-encoded", "\"www-form-encoded\"");
test_parse_query_to_ast_helper("www-form-encoded", "\"www-form-encoded\"");
}
#[test]
fn test_parse_query_to_ast_not_op() {
assert_eq!(
format!("{:?}", parse_to_ast().parse("NOT")),
"Err(UnexpectedParse)"
);
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
test_parse_query_to_ast_helper("NOT a", "(-\"a\")");
}
#[test]
fn test_boosting() {
assert!(parse_to_ast().parse("a^2^3").is_err());
assert!(parse_to_ast().parse("a^2^").is_err());
test_parse_query_to_ast_helper("a^3", "(\"a\")^3");
test_parse_query_to_ast_helper("a^3 b^2", "(*(\"a\")^3 *(\"b\")^2)");
test_parse_query_to_ast_helper("a^1", "\"a\"");
}
#[test]
fn test_parse_query_to_ast_binary_op() {
test_parse_query_to_ast_helper("a AND b", "(+\"a\" +\"b\")");
test_parse_query_to_ast_helper("a OR b", "(?\"a\" ?\"b\")");
test_parse_query_to_ast_helper("a OR b AND c", "(?\"a\" ?(+\"b\" +\"c\"))");
test_parse_query_to_ast_helper("a AND b AND c", "(+\"a\" +\"b\" +\"c\")");
assert_eq!(
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("a AND b aaa")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("aaa a OR b ")),
"Err(UnexpectedParse)"
);
assert_eq!(
format!("{:?}", parse_to_ast().parse("aaa ccc a OR b ")),
"Err(UnexpectedParse)"
);
}
#[test]
fn test_parse_elastic_query_ranges() {
test_parse_query_to_ast_helper("title: >a", "\"title\":{\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title:>=a", "\"title\":[\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title: <a", "\"title\":{\"*\" TO \"a\"}");
test_parse_query_to_ast_helper("title:<=a", "\"title\":{\"*\" TO \"a\"]");
test_parse_query_to_ast_helper("title:<=bsd", "\"title\":{\"*\" TO \"bsd\"]");
test_parse_query_to_ast_helper("weight: >70", "\"weight\":{\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight:>=70", "\"weight\":[\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: <70", "\"weight\":{\"*\" TO \"70\"}");
test_parse_query_to_ast_helper("weight:<=70", "\"weight\":{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: >60.7", "\"weight\":{\"60.7\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: <= 70", "\"weight\":{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: <= 70.5", "\"weight\":{\"*\" TO \"70.5\"]");
}
#[test]
fn test_occur_leaf() {
let ((occur, ast), _) = super::occur_leaf().parse("+abc").unwrap();
assert_eq!(occur, Some(Occur::Must));
assert_eq!(format!("{:?}", ast), "\"abc\"");
}
#[test]
fn test_field_name() {
assert_eq!(
super::field_name().parse(".my.field.name:a"),
Ok((".my.field.name".to_string(), "a"))
);
assert_eq!(
super::field_name().parse(r#"にんじん:a"#),
Ok(("にんじん".to_string(), "a"))
);
assert_eq!(
super::field_name().parse(r#"my\field:a"#),
Ok((r#"my\field"#.to_string(), "a"))
);
assert!(super::field_name().parse("my field:a").is_err());
assert_eq!(
super::field_name().parse("\\(1\\+1\\):2"),
Ok(("(1+1)".to_string(), "2"))
);
assert_eq!(
super::field_name().parse("my_field_name:a"),
Ok(("my_field_name".to_string(), "a"))
);
assert_eq!(
super::field_name().parse("myfield.b:hello").unwrap(),
("myfield.b".to_string(), "hello")
);
assert_eq!(
super::field_name().parse(r#"myfield\.b:hello"#).unwrap(),
(r#"myfield\.b"#.to_string(), "hello")
);
assert!(super::field_name().parse("my_field_name").is_err());
assert!(super::field_name().parse(":a").is_err());
assert!(super::field_name().parse("-my_field:a").is_err());
assert_eq!(
super::field_name().parse("_my_field:a"),
Ok(("_my_field".to_string(), "a"))
);
assert_eq!(
super::field_name().parse("~my~field:a"),
Ok(("~my~field".to_string(), "a"))
);
for special_char in SPECIAL_CHARS.iter() {
let query = &format!("\\{special_char}my\\{special_char}field:a");
assert_eq!(
super::field_name().parse(query),
Ok((format!("{special_char}my{special_char}field"), "a"))
);
}
}
#[test]
fn test_field_name_re() {
let escaped_special_chars_re = Regex::new(ESCAPED_SPECIAL_CHARS_PATTERN).unwrap();
for special_char in SPECIAL_CHARS.iter() {
assert_eq!(
escaped_special_chars_re.replace_all(&format!("\\{}", special_char), "$1"),
special_char.to_string()
);
}
}
#[test]
fn test_range_parser() {
// testing the range() parser separately
let res = range()
.parse("title: <hello")
.expect("Cannot parse felxible bound word")
.0;
let expected = UserInputLeaf::Range {
field: Some("title".to_string()),
lower: UserInputBound::Unbounded,
upper: UserInputBound::Exclusive("hello".to_string()),
};
let res2 = range()
.parse("title:{* TO hello}")
.expect("Cannot parse ununbounded to word")
.0;
assert_eq!(res, expected);
assert_eq!(res2, expected);
let expected_weight = UserInputLeaf::Range {
field: Some("weight".to_string()),
lower: UserInputBound::Inclusive("71.2".to_string()),
upper: UserInputBound::Unbounded,
};
let res3 = range()
.parse("weight: >=71.2")
.expect("Cannot parse flexible bound float")
.0;
let res4 = range()
.parse("weight:[71.2 TO *}")
.expect("Cannot parse float to unbounded")
.0;
assert_eq!(res3, expected_weight);
assert_eq!(res4, expected_weight);
let expected_dates = UserInputLeaf::Range {
field: Some("date_field".to_string()),
lower: UserInputBound::Exclusive("2015-08-02T18:54:42Z".to_string()),
upper: UserInputBound::Inclusive("2021-08-02T18:54:42+02:30".to_string()),
};
let res5 = range()
.parse("date_field:{2015-08-02T18:54:42Z TO 2021-08-02T18:54:42+02:30]")
.expect("Cannot parse date range")
.0;
assert_eq!(res5, expected_dates);
let expected_flexible_dates = UserInputLeaf::Range {
field: Some("date_field".to_string()),
lower: UserInputBound::Unbounded,
upper: UserInputBound::Inclusive("2021-08-02T18:54:42.12345+02:30".to_string()),
};
let res6 = range()
.parse("date_field: <=2021-08-02T18:54:42.12345+02:30")
.expect("Cannot parse date range")
.0;
assert_eq!(res6, expected_flexible_dates);
}
#[test]
fn test_parse_query_to_triming_spaces() {
test_parse_query_to_ast_helper(" abc", "\"abc\"");
test_parse_query_to_ast_helper("abc ", "\"abc\"");
test_parse_query_to_ast_helper("( a OR abc)", "(?\"a\" ?\"abc\")");
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
test_parse_query_to_ast_helper("a OR abc ", "(?\"a\" ?\"abc\")");
test_parse_query_to_ast_helper("(a OR abc )", "(?\"a\" ?\"abc\")");
test_parse_query_to_ast_helper("(a OR abc) ", "(?\"a\" ?\"abc\")");
}
#[test]
fn test_parse_query_single_term() {
test_parse_query_to_ast_helper("abc", "\"abc\"");
}
#[test]
fn test_parse_query_default_clause() {
test_parse_query_to_ast_helper("a b", "(*\"a\" *\"b\")");
}
#[test]
fn test_parse_query_must_default_clause() {
test_parse_query_to_ast_helper("+(a b)", "(*\"a\" *\"b\")");
}
#[test]
fn test_parse_query_must_single_term() {
test_parse_query_to_ast_helper("+d", "\"d\"");
}
#[test]
fn test_single_term_with_field() {
test_parse_query_to_ast_helper("abc:toto", "\"abc\":\"toto\"");
}
#[test]
fn test_single_term_with_float() {
test_parse_query_to_ast_helper("abc:1.1", "\"abc\":\"1.1\"");
test_parse_query_to_ast_helper("a.b.c:1.1", "\"a.b.c\":\"1.1\"");
test_parse_query_to_ast_helper("a\\ b\\ c:1.1", "\"a b c\":\"1.1\"");
}
#[test]
fn test_must_clause() {
test_parse_query_to_ast_helper("(+a +b)", "(+\"a\" +\"b\")");
}
#[test]
fn test_parse_test_query_plus_a_b_plus_d() {
test_parse_query_to_ast_helper("+(a b) +d", "(+(*\"a\" *\"b\") +\"d\")");
}
#[test]
fn test_parse_test_query_other() {
test_parse_query_to_ast_helper("(+a +b) d", "(*(+\"a\" +\"b\") *\"d\")");
test_parse_query_to_ast_helper("+abc:toto", "\"abc\":\"toto\"");
test_parse_query_to_ast_helper("+a\\+b\\+c:toto", "\"a+b+c\":\"toto\"");
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+\"abc\":\"toto\" -\"titi\")");
test_parse_query_to_ast_helper("-abc:toto", "(-\"abc\":\"toto\")");
test_is_parse_err("--abc:toto");
test_parse_query_to_ast_helper("abc:a b", "(*\"abc\":\"a\" *\"b\")");
test_parse_query_to_ast_helper("abc:\"a b\"", "\"abc\":\"a b\"");
test_parse_query_to_ast_helper("foo:[1 TO 5]", "\"foo\":[\"1\" TO \"5\"]");
}
#[test]
fn test_parse_query_with_range() {
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
test_parse_query_to_ast_helper("foo:{a TO z}", "\"foo\":{\"a\" TO \"z\"}");
test_parse_query_to_ast_helper("foo:[1 TO toto}", "\"foo\":[\"1\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[* TO toto}", "\"foo\":{\"*\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[1 TO *}", "\"foo\":[\"1\" TO \"*\"}");
test_parse_query_to_ast_helper(
"1.2.foo.bar:[1.1 TO *}",
"\"1.2.foo.bar\":[\"1.1\" TO \"*\"}",
);
test_is_parse_err("abc + ");
}
#[test]
fn test_slop() {
assert!(parse_to_ast().parse("\"a b\"~").is_err());
assert!(parse_to_ast().parse("foo:\"a b\"~").is_err());
assert!(parse_to_ast().parse("\"a b\"~a").is_err());
assert!(parse_to_ast().parse("\"a b\"~100000000000000000").is_err());
test_parse_query_to_ast_helper("\"a b\"^2~4", "(*(\"a b\")^2 *\"~4\")");
test_parse_query_to_ast_helper("\"~Document\"", "\"~Document\"");
test_parse_query_to_ast_helper("~Document", "\"~Document\"");
test_parse_query_to_ast_helper("a~2", "\"a~2\"");
test_parse_query_to_ast_helper("\"a b\"~0", "\"a b\"");
test_parse_query_to_ast_helper("\"a b\"~1", "\"a b\"~1");
test_parse_query_to_ast_helper("\"a b\"~3", "\"a b\"~3");
test_parse_query_to_ast_helper("foo:\"a b\"~300", "\"foo\":\"a b\"~300");
test_parse_query_to_ast_helper("\"a b\"~300^2", "(\"a b\"~300)^2");
}
}

View File

@@ -1,8 +1,9 @@
use std::fmt;
use std::fmt::{Debug, Formatter};
use crate::query::Occur;
use crate::Occur;
#[derive(PartialEq)]
pub enum UserInputLeaf {
Literal(UserInputLiteral),
All,
@@ -23,7 +24,7 @@ impl Debug for UserInputLeaf {
ref upper,
} => {
if let Some(ref field) = field {
write!(formatter, "{}:", field)?;
write!(formatter, "\"{}\":", field)?;
}
lower.display_lower(formatter)?;
write!(formatter, " TO ")?;
@@ -35,37 +36,47 @@ impl Debug for UserInputLeaf {
}
}
#[derive(PartialEq)]
pub struct UserInputLiteral {
pub field_name: Option<String>,
pub phrase: String,
pub slop: u32,
}
impl fmt::Debug for UserInputLiteral {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self.field_name {
Some(ref field_name) => write!(formatter, "{}:\"{}\"", field_name, self.phrase),
None => write!(formatter, "\"{}\"", self.phrase),
if let Some(ref field) = self.field_name {
write!(formatter, "\"{}\":", field)?;
}
write!(formatter, "\"{}\"", self.phrase)?;
if self.slop > 0 {
write!(formatter, "~{}", self.slop)?;
}
Ok(())
}
}
#[derive(PartialEq)]
pub enum UserInputBound {
Inclusive(String),
Exclusive(String),
Unbounded,
}
impl UserInputBound {
fn display_lower(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
fn display_lower(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
UserInputBound::Inclusive(ref word) => write!(formatter, "[\"{}\"", word),
UserInputBound::Exclusive(ref word) => write!(formatter, "{{\"{}\"", word),
UserInputBound::Unbounded => write!(formatter, "{{\"*\""),
}
}
fn display_upper(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
fn display_upper(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
UserInputBound::Inclusive(ref word) => write!(formatter, "\"{}\"]", word),
UserInputBound::Exclusive(ref word) => write!(formatter, "\"{}\"}}", word),
UserInputBound::Unbounded => write!(formatter, "\"*\"}}"),
}
}
@@ -73,115 +84,94 @@ impl UserInputBound {
match *self {
UserInputBound::Inclusive(ref contents) => contents,
UserInputBound::Exclusive(ref contents) => contents,
UserInputBound::Unbounded => "*",
}
}
}
pub enum UserInputAST {
Clause(Vec<UserInputAST>),
Unary(Occur, Box<UserInputAST>),
// Not(Box<UserInputAST>),
// Should(Box<UserInputAST>),
// Must(Box<UserInputAST>),
pub enum UserInputAst {
Clause(Vec<(Option<Occur>, UserInputAst)>),
Leaf(Box<UserInputLeaf>),
Boost(Box<UserInputAst>, f64),
}
impl UserInputAST {
pub fn unary(self, occur: Occur) -> UserInputAST {
UserInputAST::Unary(occur, Box::new(self))
impl UserInputAst {
#[must_use]
pub fn unary(self, occur: Occur) -> UserInputAst {
UserInputAst::Clause(vec![(Some(occur), self)])
}
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
assert!(occur != Occur::MustNot);
fn compose(occur: Occur, asts: Vec<UserInputAst>) -> UserInputAst {
assert_ne!(occur, Occur::MustNot);
assert!(!asts.is_empty());
if asts.len() == 1 {
asts.into_iter().next().unwrap() //< safe
} else {
UserInputAST::Clause(
UserInputAst::Clause(
asts.into_iter()
.map(|ast: UserInputAST| ast.unary(occur))
.map(|ast: UserInputAst| (Some(occur), ast))
.collect::<Vec<_>>(),
)
}
}
pub fn and(asts: Vec<UserInputAST>) -> UserInputAST {
UserInputAST::compose(Occur::Must, asts)
pub fn empty_query() -> UserInputAst {
UserInputAst::Clause(Vec::default())
}
pub fn or(asts: Vec<UserInputAST>) -> UserInputAST {
UserInputAST::compose(Occur::Should, asts)
pub fn and(asts: Vec<UserInputAst>) -> UserInputAst {
UserInputAst::compose(Occur::Must, asts)
}
pub fn or(asts: Vec<UserInputAst>) -> UserInputAst {
UserInputAst::compose(Occur::Should, asts)
}
}
/*
impl UserInputAST {
fn compose_occur(self, occur: Occur) -> UserInputAST {
match self {
UserInputAST::Not(other) => {
let new_occur = compose_occur(Occur::MustNot, occur);
other.simplify()
}
_ => {
self
}
}
}
pub fn simplify(self) -> UserInputAST {
match self {
UserInputAST::Clause(els) => {
if els.len() == 1 {
return els.into_iter().next().unwrap();
} else {
return self;
}
}
UserInputAST::Not(els) => {
if els.len() == 1 {
return els.into_iter().next().unwrap();
} else {
return self;
}
}
}
}
}
*/
impl From<UserInputLiteral> for UserInputLeaf {
fn from(literal: UserInputLiteral) -> UserInputLeaf {
UserInputLeaf::Literal(literal)
}
}
impl From<UserInputLeaf> for UserInputAST {
fn from(leaf: UserInputLeaf) -> UserInputAST {
UserInputAST::Leaf(Box::new(leaf))
impl From<UserInputLeaf> for UserInputAst {
fn from(leaf: UserInputLeaf) -> UserInputAst {
UserInputAst::Leaf(Box::new(leaf))
}
}
impl fmt::Debug for UserInputAST {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
fn print_occur_ast(
occur_opt: Option<Occur>,
ast: &UserInputAst,
formatter: &mut fmt::Formatter,
) -> fmt::Result {
if let Some(occur) = occur_opt {
write!(formatter, "{}{:?}", occur, ast)?;
} else {
write!(formatter, "*{:?}", ast)?;
}
Ok(())
}
impl fmt::Debug for UserInputAst {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match *self {
UserInputAST::Clause(ref subqueries) => {
UserInputAst::Clause(ref subqueries) => {
if subqueries.is_empty() {
write!(formatter, "<emptyclause>")?;
} else {
write!(formatter, "(")?;
write!(formatter, "{:?}", &subqueries[0])?;
print_occur_ast(subqueries[0].0, &subqueries[0].1, formatter)?;
for subquery in &subqueries[1..] {
write!(formatter, " {:?}", subquery)?;
write!(formatter, " ")?;
print_occur_ast(subquery.0, &subquery.1, formatter)?;
}
write!(formatter, ")")?;
}
Ok(())
}
UserInputAST::Unary(ref occur, ref subquery) => {
write!(formatter, "{}({:?})", occur.to_char(), subquery)
}
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
UserInputAst::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
UserInputAst::Boost(ref leaf, boost) => write!(formatter, "({:?})^{}", leaf, boost),
}
}
}

View File

@@ -1 +1,7 @@
use_try_shorthand = true
comment_width = 120
format_strings = true
group_imports = "StdExternalCrate"
imports_granularity = "Module"
normalize_comments = true
where_single_line = true
wrap_comments = true

36
src/aggregation/README.md Normal file
View File

@@ -0,0 +1,36 @@
# Contributing
When adding new bucket aggregation make sure to extend the "test_aggregation_flushing" test for at least 2 levels.
# Code Organization
Tantivy's aggregations have been designed to mimic the
[aggregations of elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations.html).
The code is organized in submodules:
## bucket
Contains all bucket aggregations, like range aggregation. These bucket aggregations group documents into buckets and can contain sub-aggregations.
## metric
Contains all metric aggregations, like average aggregation. Metric aggregations do not have sub aggregations.
#### agg_req
agg_req contains the users aggregation request. Deserialization from json is compatible with elasticsearch aggregation requests.
#### agg_req_with_accessor
agg_req_with_accessor contains the users aggregation request enriched with fast field accessors etc, which are
used during collection.
#### segment_agg_result
segment_agg_result contains the aggregation result tree, which is used for collection of a segment.
The tree from agg_req_with_accessor is passed during collection.
#### intermediate_agg_result
intermediate_agg_result contains the aggregation tree for merging with other trees.
#### agg_result
agg_result contains the final aggregation tree.

369
src/aggregation/agg_req.rs Normal file
View File

@@ -0,0 +1,369 @@
//! Contains the aggregation request tree. Used to build an
//! [AggregationCollector](super::AggregationCollector).
//!
//! [Aggregations] is the top level entry point to create a request, which is a `HashMap<String,
//! Aggregation>`.
//!
//! Requests are compatible with the json format of elasticsearch.
//!
//! # Example
//!
//! ```
//! use tantivy::aggregation::bucket::RangeAggregation;
//! use tantivy::aggregation::agg_req::BucketAggregationType;
//! use tantivy::aggregation::agg_req::{Aggregation, Aggregations};
//! use tantivy::aggregation::agg_req::BucketAggregation;
//! let agg_req1: Aggregations = vec![
//! (
//! "range".to_string(),
//! Aggregation::Bucket(BucketAggregation {
//! bucket_agg: BucketAggregationType::Range(RangeAggregation{
//! field: "score".to_string(),
//! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
//! keyed: false,
//! }),
//! sub_aggregation: Default::default(),
//! }),
//! ),
//! ]
//! .into_iter()
//! .collect();
//!
//! let elasticsearch_compatible_json_req = r#"
//! {
//! "range": {
//! "range": {
//! "field": "score",
//! "ranges": [
//! { "from": 3.0, "to": 7.0 },
//! { "from": 7.0, "to": 20.0 }
//! ]
//! }
//! }
//! }"#;
//! let agg_req2: Aggregations = serde_json::from_str(elasticsearch_compatible_json_req).unwrap();
//! assert_eq!(agg_req1, agg_req2);
//! ```
use std::collections::{HashMap, HashSet};
use serde::{Deserialize, Serialize};
pub use super::bucket::RangeAggregation;
use super::bucket::{HistogramAggregation, TermsAggregation};
use super::metric::{AverageAggregation, StatsAggregation};
use super::VecWithNames;
/// The top-level aggregation request structure, which contains [Aggregation] and their user defined
/// names. It is also used in [buckets](BucketAggregation) to define sub-aggregations.
///
/// The key is the user defined name of the aggregation.
pub type Aggregations = HashMap<String, Aggregation>;
/// Like Aggregations, but optimized to work with the aggregation result
#[derive(Clone, Debug)]
pub(crate) struct AggregationsInternal {
pub(crate) metrics: VecWithNames<MetricAggregation>,
pub(crate) buckets: VecWithNames<BucketAggregationInternal>,
}
impl From<Aggregations> for AggregationsInternal {
fn from(aggs: Aggregations) -> Self {
let mut metrics = vec![];
let mut buckets = vec![];
for (key, agg) in aggs {
match agg {
Aggregation::Bucket(bucket) => buckets.push((
key,
BucketAggregationInternal {
bucket_agg: bucket.bucket_agg,
sub_aggregation: bucket.sub_aggregation.into(),
},
)),
Aggregation::Metric(metric) => metrics.push((key, metric)),
}
}
Self {
metrics: VecWithNames::from_entries(metrics),
buckets: VecWithNames::from_entries(buckets),
}
}
}
#[derive(Clone, Debug)]
// Like BucketAggregation, but optimized to work with the result
pub(crate) struct BucketAggregationInternal {
/// Bucket aggregation strategy to group documents.
pub bucket_agg: BucketAggregationType,
/// The sub_aggregations in the buckets. Each bucket will aggregate on the document set in the
/// bucket.
pub sub_aggregation: AggregationsInternal,
}
impl BucketAggregationInternal {
pub(crate) fn as_range(&self) -> Option<&RangeAggregation> {
match &self.bucket_agg {
BucketAggregationType::Range(range) => Some(range),
_ => None,
}
}
pub(crate) fn as_histogram(&self) -> Option<&HistogramAggregation> {
match &self.bucket_agg {
BucketAggregationType::Histogram(histogram) => Some(histogram),
_ => None,
}
}
pub(crate) fn as_term(&self) -> Option<&TermsAggregation> {
match &self.bucket_agg {
BucketAggregationType::Terms(terms) => Some(terms),
_ => None,
}
}
}
/// Extract all fields, where the term directory is used in the tree.
pub fn get_term_dict_field_names(aggs: &Aggregations) -> HashSet<String> {
let mut term_dict_field_names = Default::default();
for el in aggs.values() {
el.get_term_dict_field_names(&mut term_dict_field_names)
}
term_dict_field_names
}
/// Extract all fast field names used in the tree.
pub fn get_fast_field_names(aggs: &Aggregations) -> HashSet<String> {
let mut fast_field_names = Default::default();
for el in aggs.values() {
el.get_fast_field_names(&mut fast_field_names)
}
fast_field_names
}
/// Aggregation request of [BucketAggregation] or [MetricAggregation].
///
/// An aggregation is either a bucket or a metric.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
pub enum Aggregation {
/// Bucket aggregation, see [BucketAggregation] for details.
Bucket(BucketAggregation),
/// Metric aggregation, see [MetricAggregation] for details.
Metric(MetricAggregation),
}
impl Aggregation {
fn get_term_dict_field_names(&self, term_field_names: &mut HashSet<String>) {
if let Aggregation::Bucket(bucket) = self {
bucket.get_term_dict_field_names(term_field_names)
}
}
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
match self {
Aggregation::Bucket(bucket) => bucket.get_fast_field_names(fast_field_names),
Aggregation::Metric(metric) => metric.get_fast_field_names(fast_field_names),
}
}
}
/// BucketAggregations create buckets of documents. Each bucket is associated with a rule which
/// determines whether or not a document in the falls into it. In other words, the buckets
/// effectively define document sets. Buckets are not necessarily disjunct, therefore a document can
/// fall into multiple buckets. In addition to the buckets themselves, the bucket aggregations also
/// compute and return the number of documents for each bucket. Bucket aggregations, as opposed to
/// metric aggregations, can hold sub-aggregations. These sub-aggregations will be aggregated for
/// the buckets created by their "parent" bucket aggregation. There are different bucket
/// aggregators, each with a different "bucketing" strategy. Some define a single bucket, some
/// define fixed number of multiple buckets, and others dynamically create the buckets during the
/// aggregation process.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BucketAggregation {
/// Bucket aggregation strategy to group documents.
#[serde(flatten)]
pub bucket_agg: BucketAggregationType,
/// The sub_aggregations in the buckets. Each bucket will aggregate on the document set in the
/// bucket.
#[serde(rename = "aggs")]
#[serde(default)]
#[serde(skip_serializing_if = "Aggregations::is_empty")]
pub sub_aggregation: Aggregations,
}
impl BucketAggregation {
fn get_term_dict_field_names(&self, term_dict_field_names: &mut HashSet<String>) {
if let BucketAggregationType::Terms(terms) = &self.bucket_agg {
term_dict_field_names.insert(terms.field.to_string());
}
term_dict_field_names.extend(get_term_dict_field_names(&self.sub_aggregation));
}
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
self.bucket_agg.get_fast_field_names(fast_field_names);
fast_field_names.extend(get_fast_field_names(&self.sub_aggregation));
}
}
/// The bucket aggregation types.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum BucketAggregationType {
/// Put data into buckets of user-defined ranges.
#[serde(rename = "range")]
Range(RangeAggregation),
/// Put data into buckets of user-defined ranges.
#[serde(rename = "histogram")]
Histogram(HistogramAggregation),
/// Put data into buckets of terms.
#[serde(rename = "terms")]
Terms(TermsAggregation),
}
impl BucketAggregationType {
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
match self {
BucketAggregationType::Terms(terms) => fast_field_names.insert(terms.field.to_string()),
BucketAggregationType::Range(range) => fast_field_names.insert(range.field.to_string()),
BucketAggregationType::Histogram(histogram) => {
fast_field_names.insert(histogram.field.to_string())
}
};
}
}
/// The aggregations in this family compute metrics based on values extracted
/// from the documents that are being aggregated. Values are extracted from the fast field of
/// the document.
/// Some aggregations output a single numeric metric (e.g. Average) and are called
/// single-value numeric metrics aggregation, others generate multiple metrics (e.g. Stats) and are
/// called multi-value numeric metrics aggregation.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum MetricAggregation {
/// Calculates the average.
#[serde(rename = "avg")]
Average(AverageAggregation),
/// Calculates stats sum, average, min, max, standard_deviation on a field.
#[serde(rename = "stats")]
Stats(StatsAggregation),
}
impl MetricAggregation {
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
match self {
MetricAggregation::Average(avg) => fast_field_names.insert(avg.field.to_string()),
MetricAggregation::Stats(stats) => fast_field_names.insert(stats.field.to_string()),
};
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn serialize_to_json_test() {
let agg_req1: Aggregations = vec![(
"range".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score".to_string(),
ranges: vec![
(f64::MIN..3f64).into(),
(3f64..7f64).into(),
(7f64..20f64).into(),
(20f64..f64::MAX).into(),
],
keyed: true,
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let elasticsearch_compatible_json_req = r#"{
"range": {
"range": {
"field": "score",
"ranges": [
{
"to": 3.0
},
{
"from": 3.0,
"to": 7.0
},
{
"from": 7.0,
"to": 20.0
},
{
"from": 20.0
}
],
"keyed": true
}
}
}"#;
let agg_req2: String = serde_json::to_string_pretty(&agg_req1).unwrap();
assert_eq!(agg_req2, elasticsearch_compatible_json_req);
}
#[test]
fn test_get_fast_field_names() {
let agg_req2: Aggregations = vec![
(
"range".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score2".to_string(),
ranges: vec![
(f64::MIN..3f64).into(),
(3f64..7f64).into(),
(7f64..20f64).into(),
(20f64..f64::MAX).into(),
],
..Default::default()
}),
sub_aggregation: Default::default(),
}),
),
(
"metric".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("field123".to_string()),
)),
),
]
.into_iter()
.collect();
let agg_req1: Aggregations = vec![(
"range".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score".to_string(),
ranges: vec![
(f64::MIN..3f64).into(),
(3f64..7f64).into(),
(7f64..20f64).into(),
(20f64..f64::MAX).into(),
],
..Default::default()
}),
sub_aggregation: agg_req2,
}),
)]
.into_iter()
.collect();
assert_eq!(
get_fast_field_names(&agg_req1),
vec![
"score".to_string(),
"score2".to_string(),
"field123".to_string()
]
.into_iter()
.collect()
)
}
}

View File

@@ -0,0 +1,221 @@
//! This will enhance the request tree with access to the fastfield and metadata.
use std::rc::Rc;
use std::sync::atomic::AtomicU32;
use std::sync::Arc;
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
use super::metric::{AverageAggregation, StatsAggregation};
use super::segment_agg_result::BucketCount;
use super::VecWithNames;
use crate::fastfield::{
type_and_cardinality, DynamicFastFieldReader, FastType, MultiValuedFastFieldReader,
};
use crate::schema::{Cardinality, Type};
use crate::{InvertedIndexReader, SegmentReader, TantivyError};
#[derive(Clone, Default)]
pub(crate) struct AggregationsWithAccessor {
pub metrics: VecWithNames<MetricAggregationWithAccessor>,
pub buckets: VecWithNames<BucketAggregationWithAccessor>,
}
impl AggregationsWithAccessor {
fn from_data(
metrics: VecWithNames<MetricAggregationWithAccessor>,
buckets: VecWithNames<BucketAggregationWithAccessor>,
) -> Self {
Self { metrics, buckets }
}
pub fn is_empty(&self) -> bool {
self.metrics.is_empty() && self.buckets.is_empty()
}
}
#[derive(Clone)]
pub(crate) enum FastFieldAccessor {
Multi(MultiValuedFastFieldReader<u64>),
Single(DynamicFastFieldReader<u64>),
}
impl FastFieldAccessor {
pub fn as_single(&self) -> Option<&DynamicFastFieldReader<u64>> {
match self {
FastFieldAccessor::Multi(_) => None,
FastFieldAccessor::Single(reader) => Some(reader),
}
}
pub fn as_multi(&self) -> Option<&MultiValuedFastFieldReader<u64>> {
match self {
FastFieldAccessor::Multi(reader) => Some(reader),
FastFieldAccessor::Single(_) => None,
}
}
}
#[derive(Clone)]
pub struct BucketAggregationWithAccessor {
/// In general there can be buckets without fast field access, e.g. buckets that are created
/// based on search terms. So eventually this needs to be Option or moved.
pub(crate) accessor: FastFieldAccessor,
pub(crate) inverted_index: Option<Arc<InvertedIndexReader>>,
pub(crate) field_type: Type,
pub(crate) bucket_agg: BucketAggregationType,
pub(crate) sub_aggregation: AggregationsWithAccessor,
pub(crate) bucket_count: BucketCount,
}
impl BucketAggregationWithAccessor {
fn try_from_bucket(
bucket: &BucketAggregationType,
sub_aggregation: &Aggregations,
reader: &SegmentReader,
bucket_count: Rc<AtomicU32>,
max_bucket_count: u32,
) -> crate::Result<BucketAggregationWithAccessor> {
let mut inverted_index = None;
let (accessor, field_type) = match &bucket {
BucketAggregationType::Range(RangeAggregation {
field: field_name, ..
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
BucketAggregationType::Histogram(HistogramAggregation {
field: field_name, ..
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
BucketAggregationType::Terms(TermsAggregation {
field: field_name, ..
}) => {
let field = reader
.schema()
.get_field(field_name)
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
inverted_index = Some(reader.inverted_index(field)?);
get_ff_reader_and_validate(reader, field_name, Cardinality::MultiValues)?
}
};
let sub_aggregation = sub_aggregation.clone();
Ok(BucketAggregationWithAccessor {
accessor,
field_type,
sub_aggregation: get_aggs_with_accessor_and_validate(
&sub_aggregation,
reader,
bucket_count.clone(),
max_bucket_count,
)?,
bucket_agg: bucket.clone(),
inverted_index,
bucket_count: BucketCount {
bucket_count,
max_bucket_count,
},
})
}
}
/// Contains the metric request and the fast field accessor.
#[derive(Clone)]
pub struct MetricAggregationWithAccessor {
pub metric: MetricAggregation,
pub field_type: Type,
pub accessor: DynamicFastFieldReader<u64>,
}
impl MetricAggregationWithAccessor {
fn try_from_metric(
metric: &MetricAggregation,
reader: &SegmentReader,
) -> crate::Result<MetricAggregationWithAccessor> {
match &metric {
MetricAggregation::Average(AverageAggregation { field: field_name })
| MetricAggregation::Stats(StatsAggregation { field: field_name }) => {
let (accessor, field_type) =
get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?;
Ok(MetricAggregationWithAccessor {
accessor: accessor
.as_single()
.expect("unexpected fast field cardinality")
.clone(),
field_type,
metric: metric.clone(),
})
}
}
}
}
pub(crate) fn get_aggs_with_accessor_and_validate(
aggs: &Aggregations,
reader: &SegmentReader,
bucket_count: Rc<AtomicU32>,
max_bucket_count: u32,
) -> crate::Result<AggregationsWithAccessor> {
let mut metrics = vec![];
let mut buckets = vec![];
for (key, agg) in aggs.iter() {
match agg {
Aggregation::Bucket(bucket) => buckets.push((
key.to_string(),
BucketAggregationWithAccessor::try_from_bucket(
&bucket.bucket_agg,
&bucket.sub_aggregation,
reader,
Rc::clone(&bucket_count),
max_bucket_count,
)?,
)),
Aggregation::Metric(metric) => metrics.push((
key.to_string(),
MetricAggregationWithAccessor::try_from_metric(metric, reader)?,
)),
}
}
Ok(AggregationsWithAccessor::from_data(
VecWithNames::from_entries(metrics),
VecWithNames::from_entries(buckets),
))
}
/// Get fast field reader with given cardinatility.
fn get_ff_reader_and_validate(
reader: &SegmentReader,
field_name: &str,
cardinality: Cardinality,
) -> crate::Result<(FastFieldAccessor, Type)> {
let field = reader
.schema()
.get_field(field_name)
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
let field_type = reader.schema().get_field_entry(field).field_type();
if let Some((ff_type, field_cardinality)) = type_and_cardinality(field_type) {
if ff_type == FastType::Date {
return Err(TantivyError::InvalidArgument(
"Unsupported field type date in aggregation".to_string(),
));
}
if cardinality != field_cardinality {
return Err(TantivyError::InvalidArgument(format!(
"Invalid field cardinality on field {} expected {:?}, but got {:?}",
field_name, cardinality, field_cardinality
)));
}
} else {
return Err(TantivyError::InvalidArgument(format!(
"Only fast fields of type f64, u64, i64 are supported, but got {:?} ",
field_type.value_type()
)));
};
let ff_fields = reader.fast_fields();
match cardinality {
Cardinality::SingleValue => ff_fields
.u64_lenient(field)
.map(|field| (FastFieldAccessor::Single(field), field_type.value_type())),
Cardinality::MultiValues => ff_fields
.u64s_lenient(field)
.map(|field| (FastFieldAccessor::Multi(field), field_type.value_type())),
}
}

View File

@@ -0,0 +1,243 @@
//! Contains the final aggregation tree.
//! This tree can be converted via the `into()` method from `IntermediateAggregationResults`.
//! This conversion computes the final result. For example: The intermediate result contains
//! intermediate average results, which is the sum and the number of values. The actual average is
//! calculated on the step from intermediate to final aggregation result tree.
use std::collections::HashMap;
use fnv::FnvHashMap;
use serde::{Deserialize, Serialize};
use super::agg_req::BucketAggregationInternal;
use super::bucket::GetDocCount;
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
use super::metric::{SingleMetricResult, Stats};
use super::Key;
use crate::TantivyError;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
/// The final aggegation result.
pub struct AggregationResults(pub HashMap<String, AggregationResult>);
impl AggregationResults {
pub(crate) fn get_value_from_aggregation(
&self,
name: &str,
agg_property: &str,
) -> crate::Result<Option<f64>> {
if let Some(agg) = self.0.get(name) {
agg.get_value_from_aggregation(name, agg_property)
} else {
// Validation is be done during request parsing, so we can't reach this state.
Err(TantivyError::InternalError(format!(
"Can't find aggregation {:?} in sub_aggregations",
name
)))
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
/// An aggregation is either a bucket or a metric.
pub enum AggregationResult {
/// Bucket result variant.
BucketResult(BucketResult),
/// Metric result variant.
MetricResult(MetricResult),
}
impl AggregationResult {
pub(crate) fn get_value_from_aggregation(
&self,
_name: &str,
agg_property: &str,
) -> crate::Result<Option<f64>> {
match self {
AggregationResult::BucketResult(_bucket) => Err(TantivyError::InternalError(
"Tried to retrieve value from bucket aggregation. This is not supported and \
should not happen during collection phase, but should be caught during validation"
.to_string(),
)),
AggregationResult::MetricResult(metric) => metric.get_value(agg_property),
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
/// MetricResult
pub enum MetricResult {
/// Average metric result.
Average(SingleMetricResult),
/// Stats metric result.
Stats(Stats),
}
impl MetricResult {
fn get_value(&self, agg_property: &str) -> crate::Result<Option<f64>> {
match self {
MetricResult::Average(avg) => Ok(avg.value),
MetricResult::Stats(stats) => stats.get_value(agg_property),
}
}
}
impl From<IntermediateMetricResult> for MetricResult {
fn from(metric: IntermediateMetricResult) -> Self {
match metric {
IntermediateMetricResult::Average(avg_data) => {
MetricResult::Average(avg_data.finalize().into())
}
IntermediateMetricResult::Stats(intermediate_stats) => {
MetricResult::Stats(intermediate_stats.finalize())
}
}
}
}
/// BucketEntry holds bucket aggregation result types.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
pub enum BucketResult {
/// This is the range entry for a bucket, which contains a key, count, from, to, and optionally
/// sub_aggregations.
Range {
/// The range buckets sorted by range.
buckets: BucketEntries<RangeBucketEntry>,
},
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
/// sub_aggregations.
Histogram {
/// The buckets.
///
/// If there are holes depends on the request, if min_doc_count is 0, then there are no
/// holes between the first and last bucket.
/// See [HistogramAggregation](super::bucket::HistogramAggregation)
buckets: BucketEntries<BucketEntry>,
},
/// This is the term result
Terms {
/// The buckets.
///
/// See [TermsAggregation](super::bucket::TermsAggregation)
buckets: Vec<BucketEntry>,
/// The number of documents that didnt make it into to TOP N due to shard_size or size
sum_other_doc_count: u64,
#[serde(skip_serializing_if = "Option::is_none")]
/// The upper bound error for the doc count of each term.
doc_count_error_upper_bound: Option<u64>,
},
}
impl BucketResult {
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> {
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
empty_bucket.into_final_bucket_result(req)
}
}
/// This is the wrapper of buckets entries, which can be vector or hashmap
/// depending on if it's keyed or not.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
pub enum BucketEntries<T> {
/// Vector format bucket entries
Vec(Vec<T>),
/// HashMap format bucket entries
HashMap(FnvHashMap<String, T>),
}
/// This is the default entry for a bucket, which contains a key, count, and optionally
/// sub_aggregations.
///
/// # JSON Format
/// ```json
/// {
/// ...
/// "my_histogram": {
/// "buckets": [
/// {
/// "key": "2.0",
/// "doc_count": 5
/// },
/// {
/// "key": "4.0",
/// "doc_count": 2
/// },
/// {
/// "key": "6.0",
/// "doc_count": 3
/// }
/// ]
/// }
/// ...
/// }
/// ```
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BucketEntry {
/// The identifier of the bucket.
pub key: Key,
/// Number of documents in the bucket.
pub doc_count: u64,
#[serde(flatten)]
/// Sub-aggregations in this bucket.
pub sub_aggregation: AggregationResults,
}
impl GetDocCount for &BucketEntry {
fn doc_count(&self) -> u64 {
self.doc_count
}
}
impl GetDocCount for BucketEntry {
fn doc_count(&self) -> u64 {
self.doc_count
}
}
/// This is the range entry for a bucket, which contains a key, count, and optionally
/// sub_aggregations.
///
/// # JSON Format
/// ```json
/// {
/// ...
/// "my_ranges": {
/// "buckets": [
/// {
/// "key": "*-10",
/// "to": 10,
/// "doc_count": 5
/// },
/// {
/// "key": "10-20",
/// "from": 10,
/// "to": 20,
/// "doc_count": 2
/// },
/// {
/// "key": "20-*",
/// "from": 20,
/// "doc_count": 3
/// }
/// ]
/// }
/// ...
/// }
/// ```
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RangeBucketEntry {
/// The identifier of the bucket.
pub key: Key,
/// Number of documents in the bucket.
pub doc_count: u64,
#[serde(flatten)]
/// sub-aggregations in this bucket.
pub sub_aggregation: AggregationResults,
/// The from range of the bucket. Equals f64::MIN when None.
#[serde(skip_serializing_if = "Option::is_none")]
pub from: Option<f64>,
/// The to range of the bucket. Equals f64::MAX when None.
#[serde(skip_serializing_if = "Option::is_none")]
pub to: Option<f64>,
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,2 @@
mod histogram;
pub use histogram::*;

View File

@@ -0,0 +1,140 @@
//! Module for all bucket aggregations.
//!
//! BucketAggregations create buckets of documents
//! [BucketAggregation](super::agg_req::BucketAggregation).
//!
//! Results of final buckets are [BucketResult](super::agg_result::BucketResult).
//! Results of intermediate buckets are
//! [IntermediateBucketResult](super::intermediate_agg_result::IntermediateBucketResult)
mod histogram;
mod range;
mod term_agg;
use std::collections::HashMap;
pub(crate) use histogram::SegmentHistogramCollector;
pub use histogram::*;
pub(crate) use range::SegmentRangeCollector;
pub use range::*;
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
pub use term_agg::*;
/// Order for buckets in a bucket aggregation.
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum Order {
/// Asc order
#[serde(rename = "asc")]
Asc,
/// Desc order
#[serde(rename = "desc")]
Desc,
}
impl Default for Order {
fn default() -> Self {
Order::Desc
}
}
#[derive(Clone, Debug, PartialEq)]
/// Order property by which to apply the order
pub enum OrderTarget {
/// The key of the bucket
Key,
/// The doc count of the bucket
Count,
/// Order by value of the sub aggregation metric with identified by given `String`.
///
/// Only single value metrics are supported currently
SubAggregation(String),
}
impl Default for OrderTarget {
fn default() -> Self {
OrderTarget::Count
}
}
impl From<&str> for OrderTarget {
fn from(val: &str) -> Self {
match val {
"_key" => OrderTarget::Key,
"_count" => OrderTarget::Count,
_ => OrderTarget::SubAggregation(val.to_string()),
}
}
}
impl ToString for OrderTarget {
fn to_string(&self) -> String {
match self {
OrderTarget::Key => "_key".to_string(),
OrderTarget::Count => "_count".to_string(),
OrderTarget::SubAggregation(agg) => agg.to_string(),
}
}
}
/// Set the order. target is either "_count", "_key", or the name of
/// a metric sub_aggregation.
///
/// De/Serializes to elasticsearch compatible JSON.
///
/// Examples in JSON format:
/// { "_count": "asc" }
/// { "_key": "asc" }
/// { "average_price": "asc" }
#[derive(Clone, Default, Debug, PartialEq)]
pub struct CustomOrder {
/// The target property by which to sort by
pub target: OrderTarget,
/// The order asc or desc
pub order: Order,
}
impl Serialize for CustomOrder {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let map: HashMap<String, Order> =
std::iter::once((self.target.to_string(), self.order)).collect();
map.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for CustomOrder {
fn deserialize<D>(deserializer: D) -> Result<CustomOrder, D::Error>
where D: Deserializer<'de> {
HashMap::<String, Order>::deserialize(deserializer).and_then(|map| {
if let Some((key, value)) = map.into_iter().next() {
Ok(CustomOrder {
target: key.as_str().into(),
order: value,
})
} else {
Err(de::Error::custom(
"unexpected empty map in order".to_string(),
))
}
})
}
}
#[test]
fn custom_order_serde_test() {
let order = CustomOrder {
target: OrderTarget::Key,
order: Order::Desc,
};
let order_str = serde_json::to_string(&order).unwrap();
assert_eq!(order_str, "{\"_key\":\"desc\"}");
let order_deser = serde_json::from_str(&order_str).unwrap();
assert_eq!(order, order_deser);
let order_deser: serde_json::Result<CustomOrder> = serde_json::from_str("{}");
assert!(order_deser.is_err());
let order_deser: serde_json::Result<CustomOrder> = serde_json::from_str("[]");
assert!(order_deser.is_err());
}

View File

@@ -0,0 +1,788 @@
use std::fmt::Debug;
use std::ops::Range;
use fnv::FnvHashMap;
use serde::{Deserialize, Serialize};
use crate::aggregation::agg_req_with_accessor::{
AggregationsWithAccessor, BucketAggregationWithAccessor,
};
use crate::aggregation::intermediate_agg_result::{
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
};
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key, SerializedKey};
use crate::fastfield::FastFieldReader;
use crate::schema::Type;
use crate::{DocId, TantivyError};
/// Provide user-defined buckets to aggregate on.
/// Two special buckets will automatically be created to cover the whole range of values.
/// The provided buckets have to be continuous.
/// During the aggregation, the values extracted from the fast_field `field` will be checked
/// against each bucket range. Note that this aggregation includes the from value and excludes the
/// to value for each range.
///
/// Result type is [BucketResult](crate::aggregation::agg_result::BucketResult) with
/// [RangeBucketEntry](crate::aggregation::agg_result::RangeBucketEntry) on the
/// AggregationCollector.
///
/// Result type is
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketResult] with
/// [crate::aggregation::intermediate_agg_result::IntermediateRangeBucketEntry] on the
/// DistributedAggregationCollector.
///
/// # Limitations/Compatibility
/// Overlapping ranges are not yet supported.
///
/// # Request JSON Format
/// ```json
/// {
/// "my_ranges": {
/// "field": "score",
/// "ranges": [
/// { "to": 3.0 },
/// { "from": 3.0, "to": 7.0 },
/// { "from": 7.0, "to": 20.0 },
/// { "from": 20.0 }
/// ]
/// }
/// }
/// ```
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct RangeAggregation {
/// The field to aggregate on.
pub field: String,
/// Note that this aggregation includes the from value and excludes the to value for each
/// range. Extra buckets will be created until the first to, and last from, if necessary.
pub ranges: Vec<RangeAggregationRange>,
/// Whether to return the buckets as a hash map
#[serde(default)]
pub keyed: bool,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
/// The range for one range bucket.
pub struct RangeAggregationRange {
/// Custom key for the range bucket
#[serde(skip_serializing_if = "Option::is_none", default)]
pub key: Option<String>,
/// The from range value, which is inclusive in the range.
/// None equals to an open ended interval.
#[serde(skip_serializing_if = "Option::is_none", default)]
pub from: Option<f64>,
/// The to range value, which is not inclusive in the range.
/// None equals to an open ended interval.
#[serde(skip_serializing_if = "Option::is_none", default)]
pub to: Option<f64>,
}
impl From<Range<f64>> for RangeAggregationRange {
fn from(range: Range<f64>) -> Self {
let from = if range.start == f64::MIN {
None
} else {
Some(range.start)
};
let to = if range.end == f64::MAX {
None
} else {
Some(range.end)
};
RangeAggregationRange {
key: None,
from,
to,
}
}
}
#[derive(Clone, Debug, PartialEq)]
/// Internally used u64 range for one range bucket.
pub(crate) struct InternalRangeAggregationRange {
/// Custom key for the range bucket
key: Option<String>,
/// u64 range value
range: Range<u64>,
}
impl From<Range<u64>> for InternalRangeAggregationRange {
fn from(range: Range<u64>) -> Self {
InternalRangeAggregationRange { key: None, range }
}
}
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct SegmentRangeAndBucketEntry {
range: Range<u64>,
bucket: SegmentRangeBucketEntry,
}
/// The collector puts values from the fast field into the correct buckets and does a conversion to
/// the correct datatype.
#[derive(Clone, Debug, PartialEq)]
pub struct SegmentRangeCollector {
/// The buckets containing the aggregation data.
buckets: Vec<SegmentRangeAndBucketEntry>,
field_type: Type,
}
#[derive(Clone, PartialEq)]
pub(crate) struct SegmentRangeBucketEntry {
pub key: Key,
pub doc_count: u64,
pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
/// The from range of the bucket. Equals f64::MIN when None.
pub from: Option<f64>,
/// The to range of the bucket. Equals f64::MAX when None. Open interval, `to` is not
/// inclusive.
pub to: Option<f64>,
}
impl Debug for SegmentRangeBucketEntry {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SegmentRangeBucketEntry")
.field("key", &self.key)
.field("doc_count", &self.doc_count)
.field("from", &self.from)
.field("to", &self.to)
.finish()
}
}
impl SegmentRangeBucketEntry {
pub(crate) fn into_intermediate_bucket_entry(
self,
agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<IntermediateRangeBucketEntry> {
let sub_aggregation = if let Some(sub_aggregation) = self.sub_aggregation {
sub_aggregation.into_intermediate_aggregations_result(agg_with_accessor)?
} else {
Default::default()
};
Ok(IntermediateRangeBucketEntry {
key: self.key,
doc_count: self.doc_count,
sub_aggregation,
from: self.from,
to: self.to,
})
}
}
impl SegmentRangeCollector {
pub fn into_intermediate_bucket_result(
self,
agg_with_accessor: &BucketAggregationWithAccessor,
) -> crate::Result<IntermediateBucketResult> {
let field_type = self.field_type;
let buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
.buckets
.into_iter()
.map(move |range_bucket| {
Ok((
range_to_string(&range_bucket.range, &field_type),
range_bucket
.bucket
.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
))
})
.collect::<crate::Result<_>>()?;
Ok(IntermediateBucketResult::Range(
IntermediateRangeBucketResult { buckets },
))
}
pub(crate) fn from_req_and_validate(
req: &RangeAggregation,
sub_aggregation: &AggregationsWithAccessor,
bucket_count: &BucketCount,
field_type: Type,
) -> crate::Result<Self> {
// The range input on the request is f64.
// We need to convert to u64 ranges, because we read the values as u64.
// The mapping from the conversion is monotonic so ordering is preserved.
let buckets: Vec<_> = extend_validate_ranges(&req.ranges, &field_type)?
.iter()
.map(|range| {
let key = range
.key
.clone()
.map(Key::Str)
.unwrap_or_else(|| range_to_key(&range.range, &field_type));
let to = if range.range.end == u64::MAX {
None
} else {
Some(f64_from_fastfield_u64(range.range.end, &field_type))
};
let from = if range.range.start == u64::MIN {
None
} else {
Some(f64_from_fastfield_u64(range.range.start, &field_type))
};
let sub_aggregation = if sub_aggregation.is_empty() {
None
} else {
Some(SegmentAggregationResultsCollector::from_req_and_validate(
sub_aggregation,
)?)
};
Ok(SegmentRangeAndBucketEntry {
range: range.range.clone(),
bucket: SegmentRangeBucketEntry {
doc_count: 0,
sub_aggregation,
key,
from,
to,
},
})
})
.collect::<crate::Result<_>>()?;
bucket_count.add_count(buckets.len() as u32);
bucket_count.validate_bucket_count()?;
Ok(SegmentRangeCollector {
buckets,
field_type,
})
}
#[inline]
pub(crate) fn collect_block(
&mut self,
doc: &[DocId],
bucket_with_accessor: &BucketAggregationWithAccessor,
force_flush: bool,
) -> crate::Result<()> {
let mut iter = doc.chunks_exact(4);
let accessor = bucket_with_accessor
.accessor
.as_single()
.expect("unexpected fast field cardinatility");
for docs in iter.by_ref() {
let val1 = accessor.get(docs[0]);
let val2 = accessor.get(docs[1]);
let val3 = accessor.get(docs[2]);
let val4 = accessor.get(docs[3]);
let bucket_pos1 = self.get_bucket_pos(val1);
let bucket_pos2 = self.get_bucket_pos(val2);
let bucket_pos3 = self.get_bucket_pos(val3);
let bucket_pos4 = self.get_bucket_pos(val4);
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation)?;
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation)?;
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
}
for doc in iter.remainder() {
let val = accessor.get(*doc);
let bucket_pos = self.get_bucket_pos(val);
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
}
if force_flush {
for bucket in &mut self.buckets {
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
sub_aggregation
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush)?;
}
}
}
Ok(())
}
#[inline]
fn increment_bucket(
&mut self,
bucket_pos: usize,
doc: DocId,
bucket_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<()> {
let bucket = &mut self.buckets[bucket_pos];
bucket.bucket.doc_count += 1;
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
sub_aggregation.collect(doc, bucket_with_accessor)?;
}
Ok(())
}
#[inline]
fn get_bucket_pos(&self, val: u64) -> usize {
let pos = self
.buckets
.binary_search_by_key(&val, |probe| probe.range.start)
.unwrap_or_else(|pos| pos - 1);
debug_assert!(self.buckets[pos].range.contains(&val));
pos
}
}
/// Converts the user provided f64 range value to fast field value space.
///
/// Internally fast field values are always stored as u64.
/// If the fast field has u64 [1,2,5], these values are stored as is in the fast field.
/// A fast field with f64 [1.0, 2.0, 5.0] is converted to u64 space, using a
/// monotonic mapping function, so the order is preserved.
///
/// Consequently, a f64 user range 1.0..3.0 needs to be converted to fast field value space using
/// the same monotonic mapping function, so that the provided ranges contain the u64 values in the
/// fast field.
/// The alternative would be that every value read would be converted to the f64 range, but that is
/// more computational expensive when many documents are hit.
fn to_u64_range(
range: &RangeAggregationRange,
field_type: &Type,
) -> crate::Result<InternalRangeAggregationRange> {
let start = if let Some(from) = range.from {
f64_to_fastfield_u64(from, field_type)
.ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))?
} else {
u64::MIN
};
let end = if let Some(to) = range.to {
f64_to_fastfield_u64(to, field_type)
.ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))?
} else {
u64::MAX
};
Ok(InternalRangeAggregationRange {
key: range.key.clone(),
range: start..end,
})
}
/// Extends the provided buckets to contain the whole value range, by inserting buckets at the
/// beginning and end and filling gaps.
fn extend_validate_ranges(
buckets: &[RangeAggregationRange],
field_type: &Type,
) -> crate::Result<Vec<InternalRangeAggregationRange>> {
let mut converted_buckets = buckets
.iter()
.map(|range| to_u64_range(range, field_type))
.collect::<crate::Result<Vec<_>>>()?;
converted_buckets.sort_by_key(|bucket| bucket.range.start);
if converted_buckets[0].range.start != u64::MIN {
converted_buckets.insert(0, (u64::MIN..converted_buckets[0].range.start).into());
}
if converted_buckets[converted_buckets.len() - 1].range.end != u64::MAX {
converted_buckets
.push((converted_buckets[converted_buckets.len() - 1].range.end..u64::MAX).into());
}
// fill up holes in the ranges
let find_hole = |converted_buckets: &[InternalRangeAggregationRange]| {
for (pos, ranges) in converted_buckets.windows(2).enumerate() {
if ranges[0].range.end > ranges[1].range.start {
return Err(TantivyError::InvalidArgument(format!(
"Overlapping ranges not supported range {:?}, range+1 {:?}",
ranges[0], ranges[1]
)));
}
if ranges[0].range.end != ranges[1].range.start {
return Ok(Some(pos));
}
}
Ok(None)
};
while let Some(hole_pos) = find_hole(&converted_buckets)? {
let new_range =
converted_buckets[hole_pos].range.end..converted_buckets[hole_pos + 1].range.start;
converted_buckets.insert(hole_pos + 1, new_range.into());
}
Ok(converted_buckets)
}
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
// it should be rendered as "*-0" and not "*-*"
let to_str = |val: u64, is_start: bool| {
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
"*".to_string()
} else {
f64_from_fastfield_u64(val, field_type).to_string()
}
};
format!("{}-{}", to_str(range.start, true), to_str(range.end, false))
}
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
Key::Str(range_to_string(range, field_type))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::aggregation::agg_req::{
Aggregation, Aggregations, BucketAggregation, BucketAggregationType,
};
use crate::aggregation::tests::{exec_request_with_query, get_test_index_with_num_docs};
use crate::fastfield::FastValue;
pub fn get_collector_from_ranges(
ranges: Vec<RangeAggregationRange>,
field_type: Type,
) -> SegmentRangeCollector {
let req = RangeAggregation {
field: "dummy".to_string(),
ranges,
..Default::default()
};
SegmentRangeCollector::from_req_and_validate(
&req,
&Default::default(),
&Default::default(),
field_type,
)
.expect("unexpected error")
}
#[test]
fn range_fraction_test() -> crate::Result<()> {
let index = get_test_index_with_num_docs(false, 100)?;
let agg_req: Aggregations = vec![(
"range".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "fraction_f64".to_string(),
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
..Default::default()
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let res = exec_request_with_query(agg_req, &index, None)?;
assert_eq!(res["range"]["buckets"][0]["key"], "*-0");
assert_eq!(res["range"]["buckets"][0]["doc_count"], 0);
assert_eq!(res["range"]["buckets"][1]["key"], "0-0.1");
assert_eq!(res["range"]["buckets"][1]["doc_count"], 10);
assert_eq!(res["range"]["buckets"][2]["key"], "0.1-0.2");
assert_eq!(res["range"]["buckets"][2]["doc_count"], 10);
assert_eq!(res["range"]["buckets"][3]["key"], "0.2-*");
assert_eq!(res["range"]["buckets"][3]["doc_count"], 80);
Ok(())
}
#[test]
fn range_keyed_buckets_test() -> crate::Result<()> {
let index = get_test_index_with_num_docs(false, 100)?;
let agg_req: Aggregations = vec![(
"range".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "fraction_f64".to_string(),
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
keyed: true,
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let res = exec_request_with_query(agg_req, &index, None)?;
assert_eq!(
res,
json!({
"range": {
"buckets": {
"*-0": { "key": "*-0", "doc_count": 0, "to": 0.0},
"0-0.1": {"key": "0-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
"0.1-0.2": {"key": "0.1-0.2", "doc_count": 10, "from": 0.1, "to": 0.2},
"0.2-*": {"key": "0.2-*", "doc_count": 80, "from": 0.2},
}
}
})
);
Ok(())
}
#[test]
fn range_custom_key_test() -> crate::Result<()> {
let index = get_test_index_with_num_docs(false, 100)?;
let agg_req: Aggregations = vec![(
"range".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "fraction_f64".to_string(),
ranges: vec![
RangeAggregationRange {
key: Some("custom-key-0-to-0.1".to_string()),
from: Some(0f64),
to: Some(0.1f64),
},
RangeAggregationRange {
key: None,
from: Some(0.1f64),
to: Some(0.2f64),
},
],
keyed: false,
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let res = exec_request_with_query(agg_req, &index, None)?;
assert_eq!(
res,
json!({
"range": {
"buckets": [
{"key": "*-0", "doc_count": 0, "to": 0.0},
{"key": "custom-key-0-to-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
{"key": "0.1-0.2", "doc_count": 10, "from": 0.1, "to": 0.2},
{"key": "0.2-*", "doc_count": 80, "from": 0.2}
]
}
})
);
Ok(())
}
#[test]
fn range_custom_key_keyed_buckets_test() -> crate::Result<()> {
let index = get_test_index_with_num_docs(false, 100)?;
let agg_req: Aggregations = vec![(
"range".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "fraction_f64".to_string(),
ranges: vec![RangeAggregationRange {
key: Some("custom-key-0-to-0.1".to_string()),
from: Some(0f64),
to: Some(0.1f64),
}],
keyed: true,
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let res = exec_request_with_query(agg_req, &index, None)?;
assert_eq!(
res,
json!({
"range": {
"buckets": {
"*-0": { "key": "*-0", "doc_count": 0, "to": 0.0},
"custom-key-0-to-0.1": {"key": "custom-key-0-to-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
"0.1-*": {"key": "0.1-*", "doc_count": 90, "from": 0.1},
}
}
})
);
Ok(())
}
#[test]
fn bucket_test_extend_range_hole() {
let buckets = vec![(10f64..20f64).into(), (30f64..40f64).into()];
let collector = get_collector_from_ranges(buckets, Type::F64);
let buckets = collector.buckets;
assert_eq!(buckets[0].range.start, u64::MIN);
assert_eq!(buckets[0].range.end, 10f64.to_u64());
assert_eq!(buckets[1].range.start, 10f64.to_u64());
assert_eq!(buckets[1].range.end, 20f64.to_u64());
// Added bucket to fill hole
assert_eq!(buckets[2].range.start, 20f64.to_u64());
assert_eq!(buckets[2].range.end, 30f64.to_u64());
assert_eq!(buckets[3].range.start, 30f64.to_u64());
assert_eq!(buckets[3].range.end, 40f64.to_u64());
}
#[test]
fn bucket_test_range_conversion_special_case() {
// the monotonic conversion between f64 and u64, does not map f64::MIN.to_u64() ==
// u64::MIN, but the into trait converts f64::MIN/MAX to None
let buckets = vec![
(f64::MIN..10f64).into(),
(10f64..20f64).into(),
(20f64..f64::MAX).into(),
];
let collector = get_collector_from_ranges(buckets, Type::F64);
let buckets = collector.buckets;
assert_eq!(buckets[0].range.start, u64::MIN);
assert_eq!(buckets[0].range.end, 10f64.to_u64());
assert_eq!(buckets[1].range.start, 10f64.to_u64());
assert_eq!(buckets[1].range.end, 20f64.to_u64());
assert_eq!(buckets[2].range.start, 20f64.to_u64());
assert_eq!(buckets[2].range.end, u64::MAX);
assert_eq!(buckets.len(), 3);
}
#[test]
fn bucket_range_test_negative_vals() {
let buckets = vec![(-10f64..-1f64).into()];
let collector = get_collector_from_ranges(buckets, Type::F64);
let buckets = collector.buckets;
assert_eq!(&buckets[0].bucket.key.to_string(), "*--10");
assert_eq!(&buckets[buckets.len() - 1].bucket.key.to_string(), "-1-*");
}
#[test]
fn bucket_range_test_positive_vals() {
let buckets = vec![(0f64..10f64).into()];
let collector = get_collector_from_ranges(buckets, Type::F64);
let buckets = collector.buckets;
assert_eq!(&buckets[0].bucket.key.to_string(), "*-0");
assert_eq!(&buckets[buckets.len() - 1].bucket.key.to_string(), "10-*");
}
#[test]
fn range_binary_search_test_u64() {
let check_ranges = |ranges: Vec<RangeAggregationRange>| {
let collector = get_collector_from_ranges(ranges, Type::U64);
let search = |val: u64| collector.get_bucket_pos(val);
assert_eq!(search(u64::MIN), 0);
assert_eq!(search(9), 0);
assert_eq!(search(10), 1);
assert_eq!(search(11), 1);
assert_eq!(search(99), 1);
assert_eq!(search(100), 2);
assert_eq!(search(u64::MAX - 1), 2); // Since the end range is never included, the max
// value
};
let ranges = vec![(10.0..100.0).into()];
check_ranges(ranges);
let ranges = vec![
RangeAggregationRange {
key: None,
to: Some(10.0),
from: None,
},
(10.0..100.0).into(),
];
check_ranges(ranges);
let ranges = vec![
RangeAggregationRange {
key: None,
to: Some(10.0),
from: None,
},
(10.0..100.0).into(),
RangeAggregationRange {
key: None,
to: None,
from: Some(100.0),
},
];
check_ranges(ranges);
}
#[test]
fn range_binary_search_test_f64() {
let ranges = vec![(10.0..100.0).into()];
let collector = get_collector_from_ranges(ranges, Type::F64);
let search = |val: u64| collector.get_bucket_pos(val);
assert_eq!(search(u64::MIN), 0);
assert_eq!(search(9f64.to_u64()), 0);
assert_eq!(search(10f64.to_u64()), 1);
assert_eq!(search(11f64.to_u64()), 1);
assert_eq!(search(99f64.to_u64()), 1);
assert_eq!(search(100f64.to_u64()), 2);
assert_eq!(search(u64::MAX - 1), 2); // Since the end range is never included,
// the max value
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use itertools::Itertools;
use rand::seq::SliceRandom;
use rand::thread_rng;
use super::*;
use crate::aggregation::bucket::range::tests::get_collector_from_ranges;
const TOTAL_DOCS: u64 = 1_000_000u64;
const NUM_DOCS: u64 = 50_000u64;
fn get_collector_with_buckets(num_buckets: u64, num_docs: u64) -> SegmentRangeCollector {
let bucket_size = num_docs / num_buckets;
let mut buckets: Vec<RangeAggregationRange> = vec![];
for i in 0..num_buckets {
let bucket_start = (i * bucket_size) as f64;
buckets.push((bucket_start..bucket_start + bucket_size as f64).into())
}
get_collector_from_ranges(buckets, Type::U64)
}
fn get_rand_docs(total_docs: u64, num_docs_returned: u64) -> Vec<u64> {
let mut rng = thread_rng();
let all_docs = (0..total_docs - 1).collect_vec();
let mut vals = all_docs
.as_slice()
.choose_multiple(&mut rng, num_docs_returned as usize)
.cloned()
.collect_vec();
vals.sort();
vals
}
fn bench_range_binary_search(b: &mut test::Bencher, num_buckets: u64) {
let collector = get_collector_with_buckets(num_buckets, TOTAL_DOCS);
let vals = get_rand_docs(TOTAL_DOCS, NUM_DOCS);
b.iter(|| {
let mut bucket_pos = 0;
for val in &vals {
bucket_pos = collector.get_bucket_pos(*val);
}
bucket_pos
})
}
#[bench]
fn bench_range_100_buckets(b: &mut test::Bencher) {
bench_range_binary_search(b, 100)
}
#[bench]
fn bench_range_10_buckets(b: &mut test::Bencher) {
bench_range_binary_search(b, 10)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,183 @@
use std::rc::Rc;
use super::agg_req::Aggregations;
use super::agg_req_with_accessor::AggregationsWithAccessor;
use super::agg_result::AggregationResults;
use super::intermediate_agg_result::IntermediateAggregationResults;
use super::segment_agg_result::SegmentAggregationResultsCollector;
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
use crate::collector::{Collector, SegmentCollector};
use crate::{SegmentReader, TantivyError};
/// The default max bucket count, before the aggregation fails.
pub const MAX_BUCKET_COUNT: u32 = 65000;
/// Collector for aggregations.
///
/// The collector collects all aggregations by the underlying aggregation request.
pub struct AggregationCollector {
agg: Aggregations,
max_bucket_count: u32,
}
impl AggregationCollector {
/// Create collector from aggregation request.
///
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
Self {
agg,
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
}
}
}
/// Collector for distributed aggregations.
///
/// The collector collects all aggregations by the underlying aggregation request.
///
/// # Purpose
/// AggregationCollector returns `IntermediateAggregationResults` and not the final
/// `AggregationResults`, so that results from different indices can be merged and then converted
/// into the final `AggregationResults` via the `into_final_result()` method.
pub struct DistributedAggregationCollector {
agg: Aggregations,
max_bucket_count: u32,
}
impl DistributedAggregationCollector {
/// Create collector from aggregation request.
///
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
Self {
agg,
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
}
}
}
impl Collector for DistributedAggregationCollector {
type Fruit = IntermediateAggregationResults;
type Child = AggregationSegmentCollector;
fn for_segment(
&self,
_segment_local_id: crate::SegmentOrdinal,
reader: &crate::SegmentReader,
) -> crate::Result<Self::Child> {
AggregationSegmentCollector::from_agg_req_and_reader(
&self.agg,
reader,
self.max_bucket_count,
)
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(
&self,
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit> {
merge_fruits(segment_fruits)
}
}
impl Collector for AggregationCollector {
type Fruit = AggregationResults;
type Child = AggregationSegmentCollector;
fn for_segment(
&self,
_segment_local_id: crate::SegmentOrdinal,
reader: &crate::SegmentReader,
) -> crate::Result<Self::Child> {
AggregationSegmentCollector::from_agg_req_and_reader(
&self.agg,
reader,
self.max_bucket_count,
)
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(
&self,
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit> {
let res = merge_fruits(segment_fruits)?;
res.into_final_bucket_result(self.agg.clone())
}
}
fn merge_fruits(
mut segment_fruits: Vec<crate::Result<IntermediateAggregationResults>>,
) -> crate::Result<IntermediateAggregationResults> {
if let Some(fruit) = segment_fruits.pop() {
let mut fruit = fruit?;
for next_fruit in segment_fruits {
fruit.merge_fruits(next_fruit?);
}
Ok(fruit)
} else {
Ok(IntermediateAggregationResults::default())
}
}
/// AggregationSegmentCollector does the aggregation collection on a segment.
pub struct AggregationSegmentCollector {
aggs_with_accessor: AggregationsWithAccessor,
result: SegmentAggregationResultsCollector,
error: Option<TantivyError>,
}
impl AggregationSegmentCollector {
/// Creates an AggregationSegmentCollector from an [Aggregations] request and a segment reader.
/// Also includes validation, e.g. checking field types and existence.
pub fn from_agg_req_and_reader(
agg: &Aggregations,
reader: &SegmentReader,
max_bucket_count: u32,
) -> crate::Result<Self> {
let aggs_with_accessor =
get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?;
let result =
SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?;
Ok(AggregationSegmentCollector {
aggs_with_accessor,
result,
error: None,
})
}
}
impl SegmentCollector for AggregationSegmentCollector {
type Fruit = crate::Result<IntermediateAggregationResults>;
#[inline]
fn collect(&mut self, doc: crate::DocId, _score: crate::Score) {
if self.error.is_some() {
return;
}
if let Err(err) = self.result.collect(doc, &self.aggs_with_accessor) {
self.error = Some(err);
}
}
fn harvest(mut self) -> Self::Fruit {
if let Some(err) = self.error {
return Err(err);
}
self.result
.flush_staged_docs(&self.aggs_with_accessor, true)?;
self.result
.into_intermediate_aggregations_result(&self.aggs_with_accessor)
}
}

View File

@@ -0,0 +1,738 @@
//! Contains the intermediate aggregation tree, that can be merged.
//! Intermediate aggregation results can be used to merge results between segments or between
//! indices.
use std::cmp::Ordering;
use std::collections::HashMap;
use fnv::FnvHashMap;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use super::agg_req::{
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
MetricAggregation,
};
use super::agg_result::{AggregationResult, BucketResult, RangeBucketEntry};
use super::bucket::{
cut_off_buckets, get_agg_name_and_property, intermediate_histogram_buckets_to_final_buckets,
GetDocCount, Order, OrderTarget, SegmentHistogramBucketEntry, TermsAggregation,
};
use super::metric::{IntermediateAverage, IntermediateStats};
use super::segment_agg_result::SegmentMetricResultCollector;
use super::{Key, SerializedKey, VecWithNames};
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
use crate::aggregation::bucket::TermsAggregationInternal;
/// Contains the intermediate aggregation result, which is optimized to be merged with other
/// intermediate results.
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntermediateAggregationResults {
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) metrics: Option<VecWithNames<IntermediateMetricResult>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) buckets: Option<VecWithNames<IntermediateBucketResult>>,
}
impl IntermediateAggregationResults {
/// Convert intermediate result and its aggregation request to the final result.
pub fn into_final_bucket_result(self, req: Aggregations) -> crate::Result<AggregationResults> {
self.into_final_bucket_result_internal(&(req.into()))
}
/// Convert intermediate result and its aggregation request to the final result.
///
/// Internal function, AggregationsInternal is used instead Aggregations, which is optimized
/// for internal processing, by splitting metric and buckets into separate groups.
pub(crate) fn into_final_bucket_result_internal(
self,
req: &AggregationsInternal,
) -> crate::Result<AggregationResults> {
// Important assumption:
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
// request
let mut results: HashMap<String, AggregationResult> = HashMap::new();
if let Some(buckets) = self.buckets {
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
} else {
// When there are no buckets, we create empty buckets, so that the serialized json
// format is constant
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
};
if let Some(metrics) = self.metrics {
convert_and_add_final_metrics_to_result(&mut results, metrics);
} else {
// When there are no metrics, we create empty metric results, so that the serialized
// json format is constant
add_empty_final_metrics_to_result(&mut results, &req.metrics)?;
}
Ok(AggregationResults(results))
}
pub(crate) fn empty_from_req(req: &AggregationsInternal) -> Self {
let metrics = if req.metrics.is_empty() {
None
} else {
let metrics = req
.metrics
.iter()
.map(|(key, req)| {
(
key.to_string(),
IntermediateMetricResult::empty_from_req(req),
)
})
.collect();
Some(VecWithNames::from_entries(metrics))
};
let buckets = if req.buckets.is_empty() {
None
} else {
let buckets = req
.buckets
.iter()
.map(|(key, req)| {
(
key.to_string(),
IntermediateBucketResult::empty_from_req(&req.bucket_agg),
)
})
.collect();
Some(VecWithNames::from_entries(buckets))
};
Self { metrics, buckets }
}
/// Merge an other intermediate aggregation result into this result.
///
/// The order of the values need to be the same on both results. This is ensured when the same
/// (key values) are present on the underlying VecWithNames struct.
pub fn merge_fruits(&mut self, other: IntermediateAggregationResults) {
if let (Some(buckets_left), Some(buckets_right)) = (&mut self.buckets, other.buckets) {
for (bucket_left, bucket_right) in
buckets_left.values_mut().zip(buckets_right.into_values())
{
bucket_left.merge_fruits(bucket_right);
}
}
if let (Some(metrics_left), Some(metrics_right)) = (&mut self.metrics, other.metrics) {
for (metric_left, metric_right) in
metrics_left.values_mut().zip(metrics_right.into_values())
{
metric_left.merge_fruits(metric_right);
}
}
}
}
fn convert_and_add_final_metrics_to_result(
results: &mut HashMap<String, AggregationResult>,
metrics: VecWithNames<IntermediateMetricResult>,
) {
results.extend(
metrics
.into_iter()
.map(|(key, metric)| (key, AggregationResult::MetricResult(metric.into()))),
);
}
fn add_empty_final_metrics_to_result(
results: &mut HashMap<String, AggregationResult>,
req_metrics: &VecWithNames<MetricAggregation>,
) -> crate::Result<()> {
results.extend(req_metrics.iter().map(|(key, req)| {
let empty_bucket = IntermediateMetricResult::empty_from_req(req);
(
key.to_string(),
AggregationResult::MetricResult(empty_bucket.into()),
)
}));
Ok(())
}
fn add_empty_final_buckets_to_result(
results: &mut HashMap<String, AggregationResult>,
req_buckets: &VecWithNames<BucketAggregationInternal>,
) -> crate::Result<()> {
let requested_buckets = req_buckets.iter();
for (key, req) in requested_buckets {
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
results.insert(key.to_string(), empty_bucket);
}
Ok(())
}
fn convert_and_add_final_buckets_to_result(
results: &mut HashMap<String, AggregationResult>,
buckets: VecWithNames<IntermediateBucketResult>,
req_buckets: &VecWithNames<BucketAggregationInternal>,
) -> crate::Result<()> {
assert_eq!(buckets.len(), req_buckets.len());
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
for ((key, bucket), req) in buckets_with_request {
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req)?);
results.insert(key, result);
}
Ok(())
}
/// An aggregation is either a bucket or a metric.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IntermediateAggregationResult {
/// Bucket variant
Bucket(IntermediateBucketResult),
/// Metric variant
Metric(IntermediateMetricResult),
}
/// Holds the intermediate data for metric results
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IntermediateMetricResult {
/// Average containing intermediate average data result
Average(IntermediateAverage),
/// AverageData variant
Stats(IntermediateStats),
}
impl From<SegmentMetricResultCollector> for IntermediateMetricResult {
fn from(tree: SegmentMetricResultCollector) -> Self {
match tree {
SegmentMetricResultCollector::Average(collector) => {
IntermediateMetricResult::Average(IntermediateAverage::from_collector(collector))
}
SegmentMetricResultCollector::Stats(collector) => {
IntermediateMetricResult::Stats(collector.stats)
}
}
}
}
impl IntermediateMetricResult {
pub(crate) fn empty_from_req(req: &MetricAggregation) -> Self {
match req {
MetricAggregation::Average(_) => {
IntermediateMetricResult::Average(IntermediateAverage::default())
}
MetricAggregation::Stats(_) => {
IntermediateMetricResult::Stats(IntermediateStats::default())
}
}
}
fn merge_fruits(&mut self, other: IntermediateMetricResult) {
match (self, other) {
(
IntermediateMetricResult::Average(avg_data_left),
IntermediateMetricResult::Average(avg_data_right),
) => {
avg_data_left.merge_fruits(avg_data_right);
}
(
IntermediateMetricResult::Stats(stats_left),
IntermediateMetricResult::Stats(stats_right),
) => {
stats_left.merge_fruits(stats_right);
}
_ => {
panic!("incompatible fruit types in tree");
}
}
}
}
/// The intermediate bucket results. Internally they can be easily merged via the keys of the
/// buckets.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IntermediateBucketResult {
/// This is the range entry for a bucket, which contains a key, count, from, to, and optionally
/// sub_aggregations.
Range(IntermediateRangeBucketResult),
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
/// sub_aggregations.
Histogram {
/// The buckets
buckets: Vec<IntermediateHistogramBucketEntry>,
},
/// Term aggregation
Terms(IntermediateTermBucketResult),
}
impl IntermediateBucketResult {
pub(crate) fn into_final_bucket_result(
self,
req: &BucketAggregationInternal,
) -> crate::Result<BucketResult> {
match self {
IntermediateBucketResult::Range(range_res) => {
let mut buckets: Vec<RangeBucketEntry> = range_res
.buckets
.into_iter()
.map(|(_, bucket)| bucket.into_final_bucket_entry(&req.sub_aggregation))
.collect::<crate::Result<Vec<_>>>()?;
buckets.sort_by(|left, right| {
left.from
.unwrap_or(f64::MIN)
.total_cmp(&right.from.unwrap_or(f64::MIN))
});
let is_keyed = req
.as_range()
.expect("unexpected aggregation, expected range aggregation")
.keyed;
let buckets = if is_keyed {
let mut bucket_map =
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
for bucket in buckets {
bucket_map.insert(bucket.key.to_string(), bucket);
}
BucketEntries::HashMap(bucket_map)
} else {
BucketEntries::Vec(buckets)
};
Ok(BucketResult::Range { buckets })
}
IntermediateBucketResult::Histogram { buckets } => {
let buckets = intermediate_histogram_buckets_to_final_buckets(
buckets,
req.as_histogram()
.expect("unexpected aggregation, expected histogram aggregation"),
&req.sub_aggregation,
)?;
let buckets = if req.as_histogram().unwrap().keyed {
let mut bucket_map =
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
for bucket in buckets {
bucket_map.insert(bucket.key.to_string(), bucket);
}
BucketEntries::HashMap(bucket_map)
} else {
BucketEntries::Vec(buckets)
};
Ok(BucketResult::Histogram { buckets })
}
IntermediateBucketResult::Terms(terms) => terms.into_final_result(
req.as_term()
.expect("unexpected aggregation, expected term aggregation"),
&req.sub_aggregation,
),
}
}
pub(crate) fn empty_from_req(req: &BucketAggregationType) -> Self {
match req {
BucketAggregationType::Terms(_) => IntermediateBucketResult::Terms(Default::default()),
BucketAggregationType::Range(_) => IntermediateBucketResult::Range(Default::default()),
BucketAggregationType::Histogram(_) => {
IntermediateBucketResult::Histogram { buckets: vec![] }
}
}
}
fn merge_fruits(&mut self, other: IntermediateBucketResult) {
match (self, other) {
(
IntermediateBucketResult::Terms(term_res_left),
IntermediateBucketResult::Terms(term_res_right),
) => {
merge_maps(&mut term_res_left.entries, term_res_right.entries);
term_res_left.sum_other_doc_count += term_res_right.sum_other_doc_count;
term_res_left.doc_count_error_upper_bound +=
term_res_right.doc_count_error_upper_bound;
}
(
IntermediateBucketResult::Range(range_res_left),
IntermediateBucketResult::Range(range_res_right),
) => {
merge_maps(&mut range_res_left.buckets, range_res_right.buckets);
}
(
IntermediateBucketResult::Histogram {
buckets: buckets_left,
..
},
IntermediateBucketResult::Histogram {
buckets: buckets_right,
..
},
) => {
let buckets = buckets_left
.drain(..)
.merge_join_by(buckets_right.into_iter(), |left, right| {
left.key.partial_cmp(&right.key).unwrap_or(Ordering::Equal)
})
.map(|either| match either {
itertools::EitherOrBoth::Both(mut left, right) => {
left.merge_fruits(right);
left
}
itertools::EitherOrBoth::Left(left) => left,
itertools::EitherOrBoth::Right(right) => right,
})
.collect();
*buckets_left = buckets;
}
(IntermediateBucketResult::Range(_), _) => {
panic!("try merge on different types")
}
(IntermediateBucketResult::Histogram { .. }, _) => {
panic!("try merge on different types")
}
(IntermediateBucketResult::Terms { .. }, _) => {
panic!("try merge on different types")
}
}
}
}
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
/// Range aggregation including error counts
pub struct IntermediateRangeBucketResult {
pub(crate) buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry>,
}
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
/// Term aggregation including error counts
pub struct IntermediateTermBucketResult {
pub(crate) entries: FnvHashMap<String, IntermediateTermBucketEntry>,
pub(crate) sum_other_doc_count: u64,
pub(crate) doc_count_error_upper_bound: u64,
}
impl IntermediateTermBucketResult {
pub(crate) fn into_final_result(
self,
req: &TermsAggregation,
sub_aggregation_req: &AggregationsInternal,
) -> crate::Result<BucketResult> {
let req = TermsAggregationInternal::from_req(req);
let mut buckets: Vec<BucketEntry> = self
.entries
.into_iter()
.filter(|bucket| bucket.1.doc_count >= req.min_doc_count)
.map(|(key, entry)| {
Ok(BucketEntry {
key: Key::Str(key),
doc_count: entry.doc_count,
sub_aggregation: entry
.sub_aggregation
.into_final_bucket_result_internal(sub_aggregation_req)?,
})
})
.collect::<crate::Result<_>>()?;
let order = req.order.order;
match req.order.target {
OrderTarget::Key => {
buckets.sort_by(|left, right| {
if req.order.order == Order::Desc {
left.key.partial_cmp(&right.key)
} else {
right.key.partial_cmp(&left.key)
}
.expect("expected type string, which is always sortable")
});
}
OrderTarget::Count => {
if req.order.order == Order::Desc {
buckets.sort_unstable_by_key(|bucket| std::cmp::Reverse(bucket.doc_count()));
} else {
buckets.sort_unstable_by_key(|bucket| bucket.doc_count());
}
}
OrderTarget::SubAggregation(name) => {
let (agg_name, agg_property) = get_agg_name_and_property(&name);
let mut buckets_with_val = buckets
.into_iter()
.map(|bucket| {
let val = bucket
.sub_aggregation
.get_value_from_aggregation(agg_name, agg_property)?
.unwrap_or(f64::NAN);
Ok((bucket, val))
})
.collect::<crate::Result<Vec<_>>>()?;
buckets_with_val.sort_by(|(_, val1), (_, val2)| match &order {
Order::Desc => val2.total_cmp(val1),
Order::Asc => val1.total_cmp(val2),
});
buckets = buckets_with_val
.into_iter()
.map(|(bucket, _val)| bucket)
.collect_vec();
}
}
// We ignore _term_doc_count_before_cutoff here, because it increases the upperbound error
// only for terms that didn't make it into the top N.
//
// This can be interesting, as a value of quality of the results, but not good to check the
// actual error count for the returned terms.
let (_term_doc_count_before_cutoff, sum_other_doc_count) =
cut_off_buckets(&mut buckets, req.size as usize);
let doc_count_error_upper_bound = if req.show_term_doc_count_error {
Some(self.doc_count_error_upper_bound)
} else {
None
};
Ok(BucketResult::Terms {
buckets,
sum_other_doc_count: self.sum_other_doc_count + sum_other_doc_count,
doc_count_error_upper_bound,
})
}
}
trait MergeFruits {
fn merge_fruits(&mut self, other: Self);
}
fn merge_maps<V: MergeFruits + Clone>(
entries_left: &mut FnvHashMap<SerializedKey, V>,
mut entries_right: FnvHashMap<SerializedKey, V>,
) {
for (name, entry_left) in entries_left.iter_mut() {
if let Some(entry_right) = entries_right.remove(name) {
entry_left.merge_fruits(entry_right);
}
}
for (key, res) in entries_right.into_iter() {
entries_left.entry(key).or_insert(res);
}
}
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
/// sub_aggregations.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntermediateHistogramBucketEntry {
/// The unique the bucket is identified.
pub key: f64,
/// The number of documents in the bucket.
pub doc_count: u64,
/// The sub_aggregation in this bucket.
pub sub_aggregation: IntermediateAggregationResults,
}
impl IntermediateHistogramBucketEntry {
pub(crate) fn into_final_bucket_entry(
self,
req: &AggregationsInternal,
) -> crate::Result<BucketEntry> {
Ok(BucketEntry {
key: Key::F64(self.key),
doc_count: self.doc_count,
sub_aggregation: self
.sub_aggregation
.into_final_bucket_result_internal(req)?,
})
}
}
impl From<SegmentHistogramBucketEntry> for IntermediateHistogramBucketEntry {
fn from(entry: SegmentHistogramBucketEntry) -> Self {
IntermediateHistogramBucketEntry {
key: entry.key,
doc_count: entry.doc_count,
sub_aggregation: Default::default(),
}
}
}
/// This is the range entry for a bucket, which contains a key, count, and optionally
/// sub_aggregations.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntermediateRangeBucketEntry {
/// The unique the bucket is identified.
pub key: Key,
/// The number of documents in the bucket.
pub doc_count: u64,
/// The sub_aggregation in this bucket.
pub sub_aggregation: IntermediateAggregationResults,
/// The from range of the bucket. Equals f64::MIN when None.
#[serde(skip_serializing_if = "Option::is_none")]
pub from: Option<f64>,
/// The to range of the bucket. Equals f64::MAX when None.
#[serde(skip_serializing_if = "Option::is_none")]
pub to: Option<f64>,
}
impl IntermediateRangeBucketEntry {
pub(crate) fn into_final_bucket_entry(
self,
req: &AggregationsInternal,
) -> crate::Result<RangeBucketEntry> {
Ok(RangeBucketEntry {
key: self.key,
doc_count: self.doc_count,
sub_aggregation: self
.sub_aggregation
.into_final_bucket_result_internal(req)?,
to: self.to,
from: self.from,
})
}
}
/// This is the term entry for a bucket, which contains a count, and optionally
/// sub_aggregations.
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntermediateTermBucketEntry {
/// The number of documents in the bucket.
pub doc_count: u64,
/// The sub_aggregation in this bucket.
pub sub_aggregation: IntermediateAggregationResults,
}
impl MergeFruits for IntermediateTermBucketEntry {
fn merge_fruits(&mut self, other: IntermediateTermBucketEntry) {
self.doc_count += other.doc_count;
self.sub_aggregation.merge_fruits(other.sub_aggregation);
}
}
impl MergeFruits for IntermediateRangeBucketEntry {
fn merge_fruits(&mut self, other: IntermediateRangeBucketEntry) {
self.doc_count += other.doc_count;
self.sub_aggregation.merge_fruits(other.sub_aggregation);
}
}
impl MergeFruits for IntermediateHistogramBucketEntry {
fn merge_fruits(&mut self, other: IntermediateHistogramBucketEntry) {
self.doc_count += other.doc_count;
self.sub_aggregation.merge_fruits(other.sub_aggregation);
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use pretty_assertions::assert_eq;
use super::*;
fn get_sub_test_tree(data: &[(String, u64)]) -> IntermediateAggregationResults {
let mut map = HashMap::new();
let mut buckets = FnvHashMap::default();
for (key, doc_count) in data {
buckets.insert(
key.to_string(),
IntermediateRangeBucketEntry {
key: Key::Str(key.to_string()),
doc_count: *doc_count,
sub_aggregation: Default::default(),
from: None,
to: None,
},
);
}
map.insert(
"my_agg_level2".to_string(),
IntermediateBucketResult::Range(IntermediateRangeBucketResult { buckets }),
);
IntermediateAggregationResults {
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),
metrics: Default::default(),
}
}
fn get_intermediat_tree_with_ranges(
data: &[(String, u64, String, u64)],
) -> IntermediateAggregationResults {
let mut map = HashMap::new();
let mut buckets: FnvHashMap<_, _> = Default::default();
for (key, doc_count, sub_aggregation_key, sub_aggregation_count) in data {
buckets.insert(
key.to_string(),
IntermediateRangeBucketEntry {
key: Key::Str(key.to_string()),
doc_count: *doc_count,
from: None,
to: None,
sub_aggregation: get_sub_test_tree(&[(
sub_aggregation_key.to_string(),
*sub_aggregation_count,
)]),
},
);
}
map.insert(
"my_agg_level1".to_string(),
IntermediateBucketResult::Range(IntermediateRangeBucketResult { buckets }),
);
IntermediateAggregationResults {
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),
metrics: Default::default(),
}
}
#[test]
fn test_merge_fruits_tree_1() {
let mut tree_left = get_intermediat_tree_with_ranges(&[
("red".to_string(), 50, "1900".to_string(), 25),
("blue".to_string(), 30, "1900".to_string(), 30),
]);
let tree_right = get_intermediat_tree_with_ranges(&[
("red".to_string(), 60, "1900".to_string(), 30),
("blue".to_string(), 25, "1900".to_string(), 50),
]);
tree_left.merge_fruits(tree_right);
let tree_expected = get_intermediat_tree_with_ranges(&[
("red".to_string(), 110, "1900".to_string(), 55),
("blue".to_string(), 55, "1900".to_string(), 80),
]);
assert_eq!(tree_left, tree_expected);
}
#[test]
fn test_merge_fruits_tree_2() {
let mut tree_left = get_intermediat_tree_with_ranges(&[
("red".to_string(), 50, "1900".to_string(), 25),
("blue".to_string(), 30, "1900".to_string(), 30),
]);
let tree_right = get_intermediat_tree_with_ranges(&[
("red".to_string(), 60, "1900".to_string(), 30),
("green".to_string(), 25, "1900".to_string(), 50),
]);
tree_left.merge_fruits(tree_right);
let tree_expected = get_intermediat_tree_with_ranges(&[
("red".to_string(), 110, "1900".to_string(), 55),
("blue".to_string(), 30, "1900".to_string(), 30),
("green".to_string(), 25, "1900".to_string(), 50),
]);
assert_eq!(tree_left, tree_expected);
}
#[test]
fn test_merge_fruits_tree_empty() {
let mut tree_left = get_intermediat_tree_with_ranges(&[
("red".to_string(), 50, "1900".to_string(), 25),
("blue".to_string(), 30, "1900".to_string(), 30),
]);
let orig = tree_left.clone();
tree_left.merge_fruits(IntermediateAggregationResults::default());
assert_eq!(tree_left, orig);
}
}

View File

@@ -0,0 +1,114 @@
use std::fmt::Debug;
use serde::{Deserialize, Serialize};
use crate::aggregation::f64_from_fastfield_u64;
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
use crate::schema::Type;
use crate::DocId;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
/// A single-value metric aggregation that computes the average of numeric values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [super::SingleMetricResult] for return value.
///
/// # JSON Format
/// ```json
/// {
/// "avg": {
/// "field": "score",
/// }
/// }
/// ```
pub struct AverageAggregation {
/// The field name to compute the stats on.
pub field: String,
}
impl AverageAggregation {
/// Create new AverageAggregation from a field.
pub fn from_field_name(field_name: String) -> Self {
AverageAggregation { field: field_name }
}
/// Return the field name.
pub fn field_name(&self) -> &str {
&self.field
}
}
#[derive(Clone, PartialEq)]
pub(crate) struct SegmentAverageCollector {
pub data: IntermediateAverage,
field_type: Type,
}
impl Debug for SegmentAverageCollector {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("AverageCollector")
.field("data", &self.data)
.finish()
}
}
impl SegmentAverageCollector {
pub fn from_req(field_type: Type) -> Self {
Self {
field_type,
data: Default::default(),
}
}
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() {
let val1 = field.get(docs[0]);
let val2 = field.get(docs[1]);
let val3 = field.get(docs[2]);
let val4 = field.get(docs[3]);
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
let val4 = f64_from_fastfield_u64(val4, &self.field_type);
self.data.collect(val1);
self.data.collect(val2);
self.data.collect(val3);
self.data.collect(val4);
}
for doc in iter.remainder() {
let val = field.get(*doc);
let val = f64_from_fastfield_u64(val, &self.field_type);
self.data.collect(val);
}
}
}
/// Contains mergeable version of average data.
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntermediateAverage {
pub(crate) sum: f64,
pub(crate) doc_count: u64,
}
impl IntermediateAverage {
pub(crate) fn from_collector(collector: SegmentAverageCollector) -> Self {
collector.data
}
/// Merge average data into this instance.
pub fn merge_fruits(&mut self, other: IntermediateAverage) {
self.sum += other.sum;
self.doc_count += other.doc_count;
}
/// compute final result
pub fn finalize(&self) -> Option<f64> {
if self.doc_count == 0 {
None
} else {
Some(self.sum / self.doc_count as f64)
}
}
#[inline]
fn collect(&mut self, val: f64) {
self.doc_count += 1;
self.sum += val;
}
}

View File

@@ -0,0 +1,30 @@
//! Module for all metric aggregations.
//!
//! The aggregations in this family compute metrics, see [super::agg_req::MetricAggregation] for
//! details.
mod average;
mod stats;
pub use average::*;
use serde::{Deserialize, Serialize};
pub use stats::*;
/// Single-metric aggregations use this common result structure.
///
/// Main reason to wrap it in value is to match elasticsearch output structure.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SingleMetricResult {
/// The value of the single value metric.
pub value: Option<f64>,
}
impl From<f64> for SingleMetricResult {
fn from(value: f64) -> Self {
Self { value: Some(value) }
}
}
impl From<Option<f64>> for SingleMetricResult {
fn from(value: Option<f64>) -> Self {
Self { value }
}
}

View File

@@ -0,0 +1,371 @@
use serde::{Deserialize, Serialize};
use crate::aggregation::f64_from_fastfield_u64;
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
use crate::schema::Type;
use crate::{DocId, TantivyError};
/// A multi-value metric aggregation that computes stats of numeric values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [Stats] for returned statistics.
///
/// # JSON Format
/// ```json
/// {
/// "stats": {
/// "field": "score",
/// }
/// }
/// ```
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StatsAggregation {
/// The field name to compute the stats on.
pub field: String,
}
impl StatsAggregation {
/// Create new StatsAggregation from a field.
pub fn from_field_name(field_name: String) -> Self {
StatsAggregation { field: field_name }
}
/// Return the field name.
pub fn field_name(&self) -> &str {
&self.field
}
}
/// Stats contains a collection of statistics.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Stats {
/// The number of documents.
pub count: usize,
/// The sum of the fast field values.
pub sum: f64,
/// The standard deviation of the fast field values. None for count == 0.
pub standard_deviation: Option<f64>,
/// The min value of the fast field values.
pub min: Option<f64>,
/// The max value of the fast field values.
pub max: Option<f64>,
/// The average of the values. None for count == 0.
pub avg: Option<f64>,
}
impl Stats {
pub(crate) fn get_value(&self, agg_property: &str) -> crate::Result<Option<f64>> {
match agg_property {
"count" => Ok(Some(self.count as f64)),
"sum" => Ok(Some(self.sum)),
"standard_deviation" => Ok(self.standard_deviation),
"min" => Ok(self.min),
"max" => Ok(self.max),
"avg" => Ok(self.avg),
_ => Err(TantivyError::InvalidArgument(format!(
"unknown property {} on stats metric aggregation",
agg_property
))),
}
}
}
/// IntermediateStats contains the mergeable version for stats.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntermediateStats {
count: usize,
sum: f64,
squared_sum: f64,
min: f64,
max: f64,
}
impl Default for IntermediateStats {
fn default() -> Self {
Self {
count: 0,
sum: 0.0,
squared_sum: 0.0,
min: f64::MAX,
max: f64::MIN,
}
}
}
impl IntermediateStats {
pub(crate) fn avg(&self) -> Option<f64> {
if self.count == 0 {
None
} else {
Some(self.sum / (self.count as f64))
}
}
fn square_mean(&self) -> f64 {
self.squared_sum / (self.count as f64)
}
pub(crate) fn standard_deviation(&self) -> Option<f64> {
self.avg()
.map(|average| (self.square_mean() - average * average).sqrt())
}
/// Merge data from other stats into this instance.
pub fn merge_fruits(&mut self, other: IntermediateStats) {
self.count += other.count;
self.sum += other.sum;
self.squared_sum += other.squared_sum;
self.min = self.min.min(other.min);
self.max = self.max.max(other.max);
}
/// compute final resultimprove_docs
pub fn finalize(&self) -> Stats {
let min = if self.count == 0 {
None
} else {
Some(self.min)
};
let max = if self.count == 0 {
None
} else {
Some(self.max)
};
Stats {
count: self.count,
sum: self.sum,
standard_deviation: self.standard_deviation(),
min,
max,
avg: self.avg(),
}
}
#[inline]
fn collect(&mut self, value: f64) {
self.count += 1;
self.sum += value;
self.squared_sum += value * value;
self.min = self.min.min(value);
self.max = self.max.max(value);
}
}
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct SegmentStatsCollector {
pub(crate) stats: IntermediateStats,
field_type: Type,
}
impl SegmentStatsCollector {
pub fn from_req(field_type: Type) -> Self {
Self {
field_type,
stats: IntermediateStats::default(),
}
}
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() {
let val1 = field.get(docs[0]);
let val2 = field.get(docs[1]);
let val3 = field.get(docs[2]);
let val4 = field.get(docs[3]);
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
let val4 = f64_from_fastfield_u64(val4, &self.field_type);
self.stats.collect(val1);
self.stats.collect(val2);
self.stats.collect(val3);
self.stats.collect(val4);
}
for doc in iter.remainder() {
let val = field.get(*doc);
let val = f64_from_fastfield_u64(val, &self.field_type);
self.stats.collect(val);
}
}
}
#[cfg(test)]
mod tests {
use std::iter;
use serde_json::Value;
use crate::aggregation::agg_req::{
Aggregation, Aggregations, BucketAggregation, BucketAggregationType, MetricAggregation,
RangeAggregation,
};
use crate::aggregation::agg_result::AggregationResults;
use crate::aggregation::metric::StatsAggregation;
use crate::aggregation::tests::{get_test_index_2_segments, get_test_index_from_values};
use crate::aggregation::AggregationCollector;
use crate::query::{AllQuery, TermQuery};
use crate::schema::IndexRecordOption;
use crate::Term;
#[test]
fn test_aggregation_stats_empty_index() -> crate::Result<()> {
// test index without segments
let values = vec![];
let index = get_test_index_from_values(false, &values)?;
let agg_req_1: Aggregations = vec![(
"stats".to_string(),
Aggregation::Metric(MetricAggregation::Stats(StatsAggregation::from_field_name(
"score".to_string(),
))),
)]
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let reader = index.reader()?;
let searcher = reader.searcher();
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
assert_eq!(
res["stats"],
json!({
"avg": Value::Null,
"count": 0,
"max": Value::Null,
"min": Value::Null,
"standard_deviation": Value::Null,
"sum": 0.0
})
);
Ok(())
}
#[test]
fn test_aggregation_stats() -> crate::Result<()> {
let index = get_test_index_2_segments(false)?;
let reader = index.reader()?;
let text_field = reader.searcher().schema().get_field("text").unwrap();
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = vec![
(
"stats_i64".to_string(),
Aggregation::Metric(MetricAggregation::Stats(StatsAggregation::from_field_name(
"score_i64".to_string(),
))),
),
(
"stats_f64".to_string(),
Aggregation::Metric(MetricAggregation::Stats(StatsAggregation::from_field_name(
"score_f64".to_string(),
))),
),
(
"stats".to_string(),
Aggregation::Metric(MetricAggregation::Stats(StatsAggregation::from_field_name(
"score".to_string(),
))),
),
(
"range".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score".to_string(),
ranges: vec![
(3f64..7f64).into(),
(7f64..19f64).into(),
(19f64..20f64).into(),
],
..Default::default()
}),
sub_aggregation: iter::once((
"stats".to_string(),
Aggregation::Metric(MetricAggregation::Stats(
StatsAggregation::from_field_name("score".to_string()),
)),
))
.collect(),
}),
),
]
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
assert_eq!(
res["stats"],
json!({
"avg": 12.142857142857142,
"count": 7,
"max": 44.0,
"min": 1.0,
"standard_deviation": 13.65313748796613,
"sum": 85.0
})
);
assert_eq!(
res["stats_i64"],
json!({
"avg": 12.142857142857142,
"count": 7,
"max": 44.0,
"min": 1.0,
"standard_deviation": 13.65313748796613,
"sum": 85.0
})
);
assert_eq!(
res["stats_f64"],
json!({
"avg": 12.214285714285714,
"count": 7,
"max": 44.5,
"min": 1.0,
"standard_deviation": 13.819905785437443,
"sum": 85.5
})
);
assert_eq!(
res["range"]["buckets"][2]["stats"],
json!({
"avg": 10.666666666666666,
"count": 3,
"max": 14.0,
"min": 7.0,
"standard_deviation": 2.867441755680877,
"sum": 32.0
})
);
assert_eq!(
res["range"]["buckets"][3]["stats"],
json!({
"avg": serde_json::Value::Null,
"count": 0,
"max": serde_json::Value::Null,
"min": serde_json::Value::Null,
"standard_deviation": serde_json::Value::Null,
"sum": 0.0,
})
);
Ok(())
}
}

1605
src/aggregation/mod.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,313 @@
//! Contains aggregation trees which is used during collection in a segment.
//! This tree contains datastructrues optimized for fast collection.
//! The tree can be converted to an intermediate tree, which contains datastructrues optimized for
//! merging.
use std::fmt::Debug;
use std::rc::Rc;
use std::sync::atomic::AtomicU32;
use super::agg_req::MetricAggregation;
use super::agg_req_with_accessor::{
AggregationsWithAccessor, BucketAggregationWithAccessor, MetricAggregationWithAccessor,
};
use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector, SegmentTermCollector};
use super::collector::MAX_BUCKET_COUNT;
use super::intermediate_agg_result::{IntermediateAggregationResults, IntermediateBucketResult};
use super::metric::{
AverageAggregation, SegmentAverageCollector, SegmentStatsCollector, StatsAggregation,
};
use super::VecWithNames;
use crate::aggregation::agg_req::BucketAggregationType;
use crate::{DocId, TantivyError};
pub(crate) const DOC_BLOCK_SIZE: usize = 64;
pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE];
#[derive(Clone, PartialEq)]
pub(crate) struct SegmentAggregationResultsCollector {
pub(crate) metrics: Option<VecWithNames<SegmentMetricResultCollector>>,
pub(crate) buckets: Option<VecWithNames<SegmentBucketResultCollector>>,
staged_docs: DocBlock,
num_staged_docs: usize,
}
impl Default for SegmentAggregationResultsCollector {
fn default() -> Self {
Self {
metrics: Default::default(),
buckets: Default::default(),
staged_docs: [0; DOC_BLOCK_SIZE],
num_staged_docs: Default::default(),
}
}
}
impl Debug for SegmentAggregationResultsCollector {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SegmentAggregationResultsCollector")
.field("metrics", &self.metrics)
.field("buckets", &self.buckets)
.field("staged_docs", &&self.staged_docs[..self.num_staged_docs])
.field("num_staged_docs", &self.num_staged_docs)
.finish()
}
}
impl SegmentAggregationResultsCollector {
pub fn into_intermediate_aggregations_result(
self,
agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<IntermediateAggregationResults> {
let buckets = if let Some(buckets) = self.buckets {
let entries = buckets
.into_iter()
.zip(agg_with_accessor.buckets.values())
.map(|((key, bucket), acc)| Ok((key, bucket.into_intermediate_bucket_result(acc)?)))
.collect::<crate::Result<Vec<(String, _)>>>()?;
Some(VecWithNames::from_entries(entries))
} else {
None
};
let metrics = self.metrics.map(VecWithNames::from_other);
Ok(IntermediateAggregationResults { metrics, buckets })
}
pub(crate) fn from_req_and_validate(req: &AggregationsWithAccessor) -> crate::Result<Self> {
let buckets = req
.buckets
.entries()
.map(|(key, req)| {
Ok((
key.to_string(),
SegmentBucketResultCollector::from_req_and_validate(req)?,
))
})
.collect::<crate::Result<Vec<(String, _)>>>()?;
let metrics = req
.metrics
.entries()
.map(|(key, req)| {
Ok((
key.to_string(),
SegmentMetricResultCollector::from_req_and_validate(req)?,
))
})
.collect::<crate::Result<Vec<(String, _)>>>()?;
let metrics = if metrics.is_empty() {
None
} else {
Some(VecWithNames::from_entries(metrics))
};
let buckets = if buckets.is_empty() {
None
} else {
Some(VecWithNames::from_entries(buckets))
};
Ok(SegmentAggregationResultsCollector {
metrics,
buckets,
staged_docs: [0; DOC_BLOCK_SIZE],
num_staged_docs: 0,
})
}
#[inline]
pub(crate) fn collect(
&mut self,
doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<()> {
self.staged_docs[self.num_staged_docs] = doc;
self.num_staged_docs += 1;
if self.num_staged_docs == self.staged_docs.len() {
self.flush_staged_docs(agg_with_accessor, false)?;
}
Ok(())
}
pub(crate) fn flush_staged_docs(
&mut self,
agg_with_accessor: &AggregationsWithAccessor,
force_flush: bool,
) -> crate::Result<()> {
if self.num_staged_docs == 0 {
return Ok(());
}
if let Some(metrics) = &mut self.metrics {
for (collector, agg_with_accessor) in
metrics.values_mut().zip(agg_with_accessor.metrics.values())
{
collector
.collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor);
}
}
if let Some(buckets) = &mut self.buckets {
for (collector, agg_with_accessor) in
buckets.values_mut().zip(agg_with_accessor.buckets.values())
{
collector.collect_block(
&self.staged_docs[..self.num_staged_docs],
agg_with_accessor,
force_flush,
)?;
}
}
self.num_staged_docs = 0;
Ok(())
}
}
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SegmentMetricResultCollector {
Average(SegmentAverageCollector),
Stats(SegmentStatsCollector),
}
impl SegmentMetricResultCollector {
pub fn from_req_and_validate(req: &MetricAggregationWithAccessor) -> crate::Result<Self> {
match &req.metric {
MetricAggregation::Average(AverageAggregation { field: _ }) => {
Ok(SegmentMetricResultCollector::Average(
SegmentAverageCollector::from_req(req.field_type),
))
}
MetricAggregation::Stats(StatsAggregation { field: _ }) => {
Ok(SegmentMetricResultCollector::Stats(
SegmentStatsCollector::from_req(req.field_type),
))
}
}
}
pub(crate) fn collect_block(&mut self, doc: &[DocId], metric: &MetricAggregationWithAccessor) {
match self {
SegmentMetricResultCollector::Average(avg_collector) => {
avg_collector.collect_block(doc, &metric.accessor);
}
SegmentMetricResultCollector::Stats(stats_collector) => {
stats_collector.collect_block(doc, &metric.accessor);
}
}
}
}
/// SegmentBucketAggregationResultCollectors will have specialized buckets for collection inside
/// segments.
/// The typical structure of Map<Key, Bucket> is not suitable during collection for performance
/// reasons.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SegmentBucketResultCollector {
Range(SegmentRangeCollector),
Histogram(Box<SegmentHistogramCollector>),
Terms(Box<SegmentTermCollector>),
}
impl SegmentBucketResultCollector {
pub fn into_intermediate_bucket_result(
self,
agg_with_accessor: &BucketAggregationWithAccessor,
) -> crate::Result<IntermediateBucketResult> {
match self {
SegmentBucketResultCollector::Terms(terms) => {
terms.into_intermediate_bucket_result(agg_with_accessor)
}
SegmentBucketResultCollector::Range(range) => {
range.into_intermediate_bucket_result(agg_with_accessor)
}
SegmentBucketResultCollector::Histogram(histogram) => {
histogram.into_intermediate_bucket_result(agg_with_accessor)
}
}
}
pub fn from_req_and_validate(req: &BucketAggregationWithAccessor) -> crate::Result<Self> {
match &req.bucket_agg {
BucketAggregationType::Terms(terms_req) => Ok(Self::Terms(Box::new(
SegmentTermCollector::from_req_and_validate(
terms_req,
&req.sub_aggregation,
req.field_type,
req.accessor
.as_multi()
.expect("unexpected fast field cardinality"),
)?,
))),
BucketAggregationType::Range(range_req) => {
Ok(Self::Range(SegmentRangeCollector::from_req_and_validate(
range_req,
&req.sub_aggregation,
&req.bucket_count,
req.field_type,
)?))
}
BucketAggregationType::Histogram(histogram) => Ok(Self::Histogram(Box::new(
SegmentHistogramCollector::from_req_and_validate(
histogram,
&req.sub_aggregation,
req.field_type,
req.accessor
.as_single()
.expect("unexpected fast field cardinality"),
)?,
))),
}
}
#[inline]
pub(crate) fn collect_block(
&mut self,
doc: &[DocId],
bucket_with_accessor: &BucketAggregationWithAccessor,
force_flush: bool,
) -> crate::Result<()> {
match self {
SegmentBucketResultCollector::Range(range) => {
range.collect_block(doc, bucket_with_accessor, force_flush)?;
}
SegmentBucketResultCollector::Histogram(histogram) => {
histogram.collect_block(doc, bucket_with_accessor, force_flush)?;
}
SegmentBucketResultCollector::Terms(terms) => {
terms.collect_block(doc, bucket_with_accessor, force_flush)?;
}
}
Ok(())
}
}
#[derive(Clone)]
pub(crate) struct BucketCount {
/// The counter which is shared between the aggregations for one request.
pub(crate) bucket_count: Rc<AtomicU32>,
pub(crate) max_bucket_count: u32,
}
impl Default for BucketCount {
fn default() -> Self {
Self {
bucket_count: Default::default(),
max_bucket_count: MAX_BUCKET_COUNT,
}
}
}
impl BucketCount {
pub(crate) fn validate_bucket_count(&self) -> crate::Result<()> {
if self.get_count() > self.max_bucket_count {
return Err(TantivyError::InvalidArgument(
"Aborting aggregation because too many buckets were created".to_string(),
));
}
Ok(())
}
pub(crate) fn add_count(&self, count: u32) {
self.bucket_count
.fetch_add(count as u32, std::sync::atomic::Ordering::Relaxed);
}
pub(crate) fn get_count(&self) -> u32 {
self.bucket_count.load(std::sync::atomic::Ordering::Relaxed)
}
}

View File

@@ -1,58 +1,37 @@
use super::Collector;
use crate::collector::SegmentCollector;
use crate::DocId;
use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
use crate::{DocId, Score, SegmentOrdinal, SegmentReader};
/// `CountCollector` collector only counts how many
/// documents match the query.
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::Count;
/// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index};
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
/// let mut index_writer = index.writer(3_000_000).unwrap();
/// index_writer.add_document(doc!(title => "The Name of the Wind")).unwrap();
/// index_writer.add_document(doc!(title => "The Diary of Muadib")).unwrap();
/// index_writer.add_document(doc!(title => "A Dairy Cow")).unwrap();
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl")).unwrap();
/// assert!(index_writer.commit().is_ok());
///
/// {
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let count = searcher.search(&query, &Count).unwrap();
/// let reader = index.reader().unwrap();
/// let searcher = reader.searcher();
///
/// assert_eq!(count, 2);
/// }
/// // Here comes the important part
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary").unwrap();
/// let count = searcher.search(&query, &Count).unwrap();
///
/// Ok(())
/// }
/// assert_eq!(count, 2);
/// ```
pub struct Count;
@@ -61,7 +40,11 @@ impl Collector for Count {
type Child = SegmentCountCollector;
fn for_segment(&self, _: SegmentLocalId, _: &SegmentReader) -> Result<SegmentCountCollector> {
fn for_segment(
&self,
_: SegmentOrdinal,
_: &SegmentReader,
) -> crate::Result<SegmentCountCollector> {
Ok(SegmentCountCollector::default())
}
@@ -69,7 +52,7 @@ impl Collector for Count {
false
}
fn merge_fruits(&self, segment_counts: Vec<usize>) -> Result<usize> {
fn merge_fruits(&self, segment_counts: Vec<usize>) -> crate::Result<usize> {
Ok(segment_counts.into_iter().sum())
}
}
@@ -94,8 +77,7 @@ impl SegmentCollector for SegmentCountCollector {
#[cfg(test)]
mod tests {
use super::{Count, SegmentCountCollector};
use crate::collector::Collector;
use crate::collector::SegmentCollector;
use crate::collector::{Collector, SegmentCollector};
#[test]
fn test_count_collect_does_not_requires_scoring() {
@@ -110,20 +92,19 @@ mod tests {
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
count_collector.collect(0u32, 1.0);
assert_eq!(count_collector.harvest(), 1);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
count_collector.collect(0u32, 1.0);
assert_eq!(count_collector.harvest(), 1);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1f32);
count_collector.collect(1u32, 1f32);
count_collector.collect(0u32, 1.0);
count_collector.collect(1u32, 1.0);
assert_eq!(count_collector.harvest(), 2);
}
}
}

View File

@@ -1,6 +1,5 @@
use crate::collector::top_collector::{TopCollector, TopSegmentCollector};
use crate::collector::{Collector, SegmentCollector};
use crate::Result;
use crate::{DocAddress, DocId, Score, SegmentReader};
pub(crate) struct CustomScoreTopCollector<TCustomScorer, TScore = Score> {
@@ -9,16 +8,15 @@ pub(crate) struct CustomScoreTopCollector<TCustomScorer, TScore = Score> {
}
impl<TCustomScorer, TScore> CustomScoreTopCollector<TCustomScorer, TScore>
where
TScore: Clone + PartialOrd,
where TScore: Clone + PartialOrd
{
pub fn new(
pub(crate) fn new(
custom_scorer: TCustomScorer,
limit: usize,
collector: TopCollector<TScore>,
) -> CustomScoreTopCollector<TCustomScorer, TScore> {
CustomScoreTopCollector {
custom_scorer,
collector: TopCollector::with_limit(limit),
collector,
}
}
}
@@ -29,7 +27,7 @@ where
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
pub trait CustomSegmentScorer<TScore>: 'static {
/// Computes the score of a specific `doc`.
fn score(&self, doc: DocId) -> TScore;
fn score(&mut self, doc: DocId) -> TScore;
}
/// `CustomScorer` makes it possible to define any kind of score.
@@ -42,12 +40,12 @@ pub trait CustomScorer<TScore>: Sync {
type Child: CustomSegmentScorer<TScore>;
/// Builds a child scorer for a specific segment. The child scorer is associated to
/// a specific segment.
fn segment_scorer(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child>;
}
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
where
TCustomScorer: CustomScorer<TScore>,
TCustomScorer: CustomScorer<TScore> + Send + Sync,
TScore: 'static + PartialOrd + Clone + Send + Sync,
{
type Fruit = Vec<(TScore, DocAddress)>;
@@ -58,11 +56,9 @@ where
&self,
segment_local_id: u32,
segment_reader: &SegmentReader,
) -> Result<Self::Child> {
) -> crate::Result<Self::Child> {
let segment_collector = self.collector.for_segment(segment_local_id, segment_reader);
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
let segment_collector = self
.collector
.for_segment(segment_local_id, segment_reader)?;
Ok(CustomScoreTopSegmentCollector {
segment_collector,
segment_scorer,
@@ -73,7 +69,7 @@ where
false
}
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit> {
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
self.collector.merge_fruits(segment_fruits)
}
}
@@ -111,16 +107,15 @@ where
{
type Child = T;
fn segment_scorer(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
Ok((self)(segment_reader))
}
}
impl<F, TScore> CustomSegmentScorer<TScore> for F
where
F: 'static + Sync + Send + Fn(DocId) -> TScore,
where F: 'static + FnMut(DocId) -> TScore
{
fn score(&self, doc: DocId) -> TScore {
fn score(&mut self, doc: DocId) -> TScore {
(self)(doc)
}
}

View File

@@ -0,0 +1,60 @@
use std::collections::HashSet;
use super::{Collector, SegmentCollector};
use crate::{DocAddress, DocId, Score};
/// Collectors that returns the set of DocAddress that matches the query.
///
/// This collector is mostly useful for tests.
pub struct DocSetCollector;
impl Collector for DocSetCollector {
type Fruit = HashSet<DocAddress>;
type Child = DocSetChildCollector;
fn for_segment(
&self,
segment_local_id: crate::SegmentOrdinal,
_segment: &crate::SegmentReader,
) -> crate::Result<Self::Child> {
Ok(DocSetChildCollector {
segment_local_id,
docs: HashSet::new(),
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(
&self,
segment_fruits: Vec<(u32, HashSet<DocId>)>,
) -> crate::Result<Self::Fruit> {
let len: usize = segment_fruits.iter().map(|(_, docset)| docset.len()).sum();
let mut result = HashSet::with_capacity(len);
for (segment_local_id, docs) in segment_fruits {
for doc in docs {
result.insert(DocAddress::new(segment_local_id, doc));
}
}
Ok(result)
}
}
pub struct DocSetChildCollector {
segment_local_id: u32,
docs: HashSet<DocId>,
}
impl SegmentCollector for DocSetChildCollector {
type Fruit = (u32, HashSet<DocId>);
fn collect(&mut self, doc: crate::DocId, _score: Score) {
self.docs.insert(doc);
}
fn harvest(self) -> (u32, HashSet<DocId>) {
(self.segment_local_id, self.docs)
}
}

View File

@@ -1,24 +1,14 @@
use crate::collector::Collector;
use crate::collector::SegmentCollector;
use crate::docset::SkipResult;
use crate::fastfield::FacetReader;
use crate::schema::Facet;
use crate::schema::Field;
use crate::DocId;
use crate::Result;
use crate::Score;
use crate::SegmentLocalId;
use crate::SegmentReader;
use crate::TantivyError;
use std::cmp::Ordering;
use std::collections::btree_map;
use std::collections::BTreeMap;
use std::collections::BTreeSet;
use std::collections::BinaryHeap;
use std::collections::Bound;
use std::collections::{btree_map, BTreeMap, BTreeSet, BinaryHeap};
use std::iter::Peekable;
use std::ops::Bound;
use std::{u64, usize};
use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::FacetReader;
use crate::schema::{Facet, Field};
use crate::{DocId, Score, SegmentOrdinal, SegmentReader};
struct Hit<'a> {
count: u64,
facet: &'a Facet,
@@ -40,7 +30,10 @@ impl<'a> PartialOrd<Hit<'a>> for Hit<'a> {
impl<'a> Ord for Hit<'a> {
fn cmp(&self, other: &Self) -> Ordering {
other.count.cmp(&self.count)
other
.count
.cmp(&self.count)
.then(self.facet.cmp(other.facet))
}
}
@@ -81,21 +74,18 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
///
///
/// ```rust
/// #[macro_use]
/// extern crate tantivy;
/// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{Index, Result};
/// use tantivy::collector::FacetCollector;
/// use tantivy::query::AllQuery;
/// use tantivy::schema::{Facet, Schema, FacetOptions, TEXT};
/// use tantivy::{doc, Index};
///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> {
/// fn example() -> tantivy::Result<()> {
/// let mut schema_builder = Schema::builder();
///
/// // Facet have their own specific type.
/// // It is not a bad practise to put all of your
/// // facet information in the same field.
/// let facet = schema_builder.add_facet_field("facet");
/// let facet = schema_builder.add_facet_field("facet", FacetOptions::default());
/// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
@@ -106,30 +96,30 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// title => "The Name of the Wind",
/// facet => Facet::from("/lang/en"),
/// facet => Facet::from("/category/fiction/fantasy")
/// ));
/// ))?;
/// index_writer.add_document(doc!(
/// title => "Dune",
/// facet => Facet::from("/lang/en"),
/// facet => Facet::from("/category/fiction/sci-fi")
/// ));
/// ))?;
/// index_writer.add_document(doc!(
/// title => "La Vénus d'Ille",
/// facet => Facet::from("/lang/fr"),
/// facet => Facet::from("/category/fiction/fantasy"),
/// facet => Facet::from("/category/fiction/horror")
/// ));
/// ))?;
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// facet => Facet::from("/lang/en"),
/// facet => Facet::from("/category/biography")
/// ));
/// ))?;
/// index_writer.commit()?;
/// }
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// {
/// let mut facet_collector = FacetCollector::for_field(facet);
/// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/lang");
/// facet_collector.add_facet("/category");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
@@ -145,7 +135,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// }
///
/// {
/// let mut facet_collector = FacetCollector::for_field(facet);
/// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
///
@@ -160,8 +150,8 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// ]);
/// }
///
/// {
/// let mut facet_collector = FacetCollector::for_field(facet);
/// {
/// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
///
@@ -174,6 +164,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
///
/// Ok(())
/// }
/// # assert!(example().is_ok());
/// ```
pub struct FacetCollector {
field: Field,
@@ -191,6 +182,11 @@ pub struct FacetSegmentCollector {
collapse_facet_ords: Vec<u64>,
}
enum SkipResult {
Found,
NotFound,
}
fn skip<'a, I: Iterator<Item = &'a Facet>>(
target: &[u8],
collapse_it: &mut Peekable<I>,
@@ -200,14 +196,14 @@ fn skip<'a, I: Iterator<Item = &'a Facet>>(
Some(facet_bytes) => match facet_bytes.encoded_str().as_bytes().cmp(target) {
Ordering::Less => {}
Ordering::Greater => {
return SkipResult::OverStep;
return SkipResult::NotFound;
}
Ordering::Equal => {
return SkipResult::Reached;
return SkipResult::Found;
}
},
None => {
return SkipResult::End;
return SkipResult::NotFound;
}
}
collapse_it.next();
@@ -237,9 +233,7 @@ impl FacetCollector {
/// If you need the correct number of unique documents for two such facets,
/// just add them in separate `FacetCollector`.
pub fn add_facet<T>(&mut self, facet_from: T)
where
Facet: From<T>,
{
where Facet: From<T> {
let facet = Facet::from(facet_from);
for old_facet in &self.facets {
assert!(
@@ -262,13 +256,10 @@ impl Collector for FacetCollector {
fn for_segment(
&self,
_: SegmentLocalId,
_: SegmentOrdinal,
reader: &SegmentReader,
) -> Result<FacetSegmentCollector> {
let field_name = reader.schema().get_field_name(self.field);
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
})?;
) -> crate::Result<FacetSegmentCollector> {
let facet_reader = reader.facet_reader(self.field)?;
let mut collapse_mapping = Vec::new();
let mut counts = Vec::new();
@@ -277,14 +268,14 @@ impl Collector for FacetCollector {
let mut collapse_facet_it = self.facets.iter().peekable();
collapse_facet_ords.push(0);
{
let mut facet_streamer = facet_reader.facet_dict().range().into_stream();
let mut facet_streamer = facet_reader.facet_dict().range().into_stream()?;
if facet_streamer.advance() {
'outer: loop {
// at the begining of this loop, facet_streamer
// is positionned on a term that has not been processed yet.
// at the beginning of this loop, facet_streamer
// is positioned on a term that has not been processed yet.
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
match skip_result {
SkipResult::Reached => {
SkipResult::Found => {
// we reach a facet we decided to collapse.
let collapse_depth = facet_depth(facet_streamer.key());
let mut collapsed_id = 0;
@@ -297,14 +288,12 @@ impl Collector for FacetCollector {
if depth == collapse_depth + 1 {
collapsed_id = collapse_facet_ords.len();
collapse_facet_ords.push(facet_streamer.term_ord());
collapse_mapping.push(collapsed_id);
} else {
collapse_mapping.push(collapsed_id);
}
collapse_mapping.push(collapsed_id);
}
break;
}
SkipResult::End | SkipResult::OverStep => {
SkipResult::NotFound => {
collapse_mapping.push(0);
if !facet_streamer.advance() {
break;
@@ -330,7 +319,7 @@ impl Collector for FacetCollector {
false
}
fn merge_fruits(&self, segments_facet_counts: Vec<FacetCounts>) -> Result<FacetCounts> {
fn merge_fruits(&self, segments_facet_counts: Vec<FacetCounts>) -> crate::Result<FacetCounts> {
let mut facet_counts: BTreeMap<Facet, u64> = BTreeMap::new();
for segment_facet_counts in segments_facet_counts {
for (facet, count) in segment_facet_counts.facet_counts {
@@ -371,9 +360,12 @@ impl SegmentCollector for FacetSegmentCollector {
}
let mut facet = vec![];
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
facet_dict.ord_to_term(facet_ord as u64, &mut facet);
// TODO
facet_counts.insert(Facet::from_encoded(facet).unwrap(), count);
// TODO handle errors.
if facet_dict.ord_to_term(facet_ord as u64, &mut facet).is_ok() {
if let Ok(facet) = Facet::from_encoded(facet) {
facet_counts.insert(facet, count);
}
}
}
FacetCounts { facet_counts }
}
@@ -398,10 +390,10 @@ impl<'a> Iterator for FacetChildIterator<'a> {
}
impl FacetCounts {
/// Returns an iterator over all of the facet count pairs inside this result.
/// See the documentation for [FacetCollector] for a usage example.
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
where
Facet: From<T>,
{
where Facet: From<T> {
let facet = Facet::from(facet_from);
let left_bound = Bound::Excluded(facet.clone());
let right_bound = if facet.is_root() {
@@ -417,10 +409,10 @@ impl FacetCounts {
FacetChildIterator { underlying }
}
/// Returns a vector of top `k` facets with their counts, sorted highest-to-lowest by counts.
/// See the documentation for [FacetCollector] for a usage example.
pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)>
where
Facet: From<T>,
{
where Facet: From<T> {
let mut heap = BinaryHeap::with_capacity(k);
let mut it = self.get(facet);
@@ -453,23 +445,27 @@ impl FacetCounts {
#[cfg(test)]
mod tests {
use super::{FacetCollector, FacetCounts};
use crate::core::Index;
use crate::query::AllQuery;
use crate::schema::{Document, Facet, Field, Schema};
use std::iter;
use rand::distributions::Uniform;
use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng};
use std::iter;
use super::{FacetCollector, FacetCounts};
use crate::collector::Count;
use crate::core::Index;
use crate::query::{AllQuery, QueryParser, TermQuery};
use crate::schema::{Document, Facet, FacetOptions, Field, IndexRecordOption, Schema};
use crate::Term;
#[test]
fn test_facet_collector_drilldown() {
fn test_facet_collector_drilldown() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut index_writer = index.writer_for_tests()?;
let num_facets: usize = 3 * 4 * 5;
let facets: Vec<Facet> = (0..num_facets)
.map(|mut n| {
@@ -484,14 +480,14 @@ mod tests {
for i in 0..num_facets * 10 {
let mut doc = Document::new();
doc.add_facet(facet_field, facets[i % num_facets].clone());
index_writer.add_document(doc);
index_writer.add_document(doc)?;
}
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet(Facet::from("/top1"));
let counts = searcher.search(&AllQuery, &facet_collector).unwrap();
let counts = searcher.search(&AllQuery, &facet_collector)?;
{
let facets: Vec<(String, u64)> = counts
@@ -511,44 +507,97 @@ mod tests {
.collect::<Vec<_>>()
);
}
Ok(())
}
#[test]
#[should_panic(expected = "Tried to add a facet which is a descendant of \
an already added facet.")]
#[should_panic(
expected = "Tried to add a facet which is a descendant of an already added facet."
)]
fn test_misused_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field(0));
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/country/europe"));
}
#[test]
fn test_doc_unsorted_multifacet() {
fn test_doc_unsorted_multifacet() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facets");
let facet_field = schema_builder.add_facet_field("facets", FacetOptions::default());
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/subjects/A/a"),
facet_field => Facet::from_text(&"/subjects/B/a"),
facet_field => Facet::from_text(&"/subjects/A/b"),
facet_field => Facet::from_text(&"/subjects/B/b"),
));
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
facet_field => Facet::from_text(&"/subjects/A/a").unwrap(),
facet_field => Facet::from_text(&"/subjects/B/a").unwrap(),
facet_field => Facet::from_text(&"/subjects/A/b").unwrap(),
facet_field => Facet::from_text(&"/subjects/B/b").unwrap(),
))?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 1);
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet("/subjects");
let counts = searcher.search(&AllQuery, &facet_collector).unwrap();
let counts = searcher.search(&AllQuery, &facet_collector)?;
let facets: Vec<(&Facet, u64)> = counts.get("/subjects").collect();
assert_eq!(facets[0].1, 1);
Ok(())
}
#[test]
fn test_doc_search_by_facet() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/A").unwrap(),
))?;
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/B").unwrap(),
))?;
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/C/A").unwrap(),
))?;
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/D/C/A").unwrap(),
))?;
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 4);
let count_facet = |facet_str: &str| {
let term = Term::from_facet(facet_field, &Facet::from_text(facet_str).unwrap());
searcher
.search(&TermQuery::new(term, IndexRecordOption::Basic), &Count)
.unwrap()
};
assert_eq!(count_facet("/"), 4);
assert_eq!(count_facet("/A"), 3);
assert_eq!(count_facet("/A/B"), 1);
assert_eq!(count_facet("/A/C"), 1);
assert_eq!(count_facet("/A/C/A"), 1);
assert_eq!(count_facet("/C/A"), 0);
let query_parser = QueryParser::for_index(&index, vec![]);
{
let query = query_parser.parse_query("facet:/A/B")?;
assert_eq!(1, searcher.search(&query, &Count).unwrap());
}
{
let query = query_parser.parse_query("facet:/A")?;
assert_eq!(3, searcher.search(&query, &Count)?);
}
Ok(())
}
#[test]
fn test_non_used_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field(0));
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/countryeurope"));
}
@@ -556,7 +605,7 @@ mod tests {
#[test]
fn test_facet_collector_topk() {
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -578,9 +627,9 @@ mod tests {
.collect();
docs[..].shuffle(&mut thread_rng());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut index_writer = index.writer_for_tests().unwrap();
for doc in docs {
index_writer.add_document(doc);
index_writer.add_document(doc).unwrap();
}
index_writer.commit().unwrap();
let searcher = index.reader().unwrap().searcher();
@@ -602,23 +651,58 @@ mod tests {
}
}
#[test]
fn test_facet_collector_topk_tie_break() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let docs: Vec<Document> = vec![("b", 2), ("a", 2), ("c", 4)]
.into_iter()
.flat_map(|(c, count)| {
let facet = Facet::from(&format!("/facet/{}", c));
let doc = doc!(facet_field => facet);
iter::repeat(doc).take(count)
})
.collect();
let mut index_writer = index.writer_for_tests()?;
for doc in docs {
index_writer.add_document(doc)?;
}
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet("/facet");
let counts: FacetCounts = searcher.search(&AllQuery, &facet_collector)?;
let facets: Vec<(&Facet, u64)> = counts.top_k("/facet", 2);
assert_eq!(
facets,
vec![(&Facet::from("/facet/c"), 4), (&Facet::from("/facet/a"), 2)]
);
Ok(())
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use collector::FacetCollector;
use query::AllQuery;
use rand::{thread_rng, Rng};
use schema::Facet;
use schema::Schema;
use rand::seq::SliceRandom;
use rand::thread_rng;
use test::Bencher;
use Index;
use crate::collector::FacetCollector;
use crate::query::AllQuery;
use crate::schema::{Facet, Schema, INDEXED};
use crate::Index;
#[bench]
fn bench_facet_collector(b: &mut Bencher) {
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let facet_field = schema_builder.add_facet_field("facet", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -630,16 +714,16 @@ mod bench {
}
}
// 40425 docs
thread_rng().shuffle(&mut docs[..]);
docs[..].shuffle(&mut thread_rng());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut index_writer = index.writer_for_tests().unwrap();
for doc in docs {
index_writer.add_document(doc);
index_writer.add_document(doc).unwrap();
}
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let searcher = index.searcher();
let searcher = reader.searcher();
let facet_collector = FacetCollector::for_field(facet_field);
searcher.search(&AllQuery, &facet_collector).unwrap();
});

View File

@@ -0,0 +1,186 @@
// # Custom collector example
//
// This example shows how you can implement your own
// collector. As an example, we will compute a collector
// that computes the standard deviation of a given fast field.
//
// Of course, you can have a look at the tantivy's built-in collectors
// such as the `CountCollector` for more examples.
// ---
// Importing tantivy...
use std::marker::PhantomData;
use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
use crate::schema::Field;
use crate::{Score, SegmentReader, TantivyError};
/// The `FilterCollector` filters docs using a fast field value and a predicate.
/// Only the documents for which the predicate returned "true" will be passed on to the next
/// collector.
///
/// ```rust
/// use tantivy::collector::{TopDocs, FilterCollector};
/// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT, INDEXED, FAST};
/// use tantivy::{doc, DocAddress, Index};
///
/// # fn main() -> tantivy::Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let price = schema_builder.add_u64_field("price", INDEXED | FAST);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
///
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
/// index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64))?;
/// index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64))?;
/// index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64))?;
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64))?;
/// index_writer.commit()?;
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let no_filter_collector = FilterCollector::new(price, &|value: u64| value > 20_120u64, TopDocs::with_limit(2));
/// let top_docs = searcher.search(&query, &no_filter_collector)?;
///
/// assert_eq!(top_docs.len(), 1);
/// assert_eq!(top_docs[0].1, DocAddress::new(0, 1));
///
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
/// let filtered_top_docs = searcher.search(&query, &filter_all_collector)?;
///
/// assert_eq!(filtered_top_docs.len(), 0);
/// # Ok(())
/// # }
/// ```
pub struct FilterCollector<TCollector, TPredicate, TPredicateValue: FastValue>
where TPredicate: 'static + Clone
{
field: Field,
collector: TCollector,
predicate: TPredicate,
t_predicate_value: PhantomData<TPredicateValue>,
}
impl<TCollector, TPredicate, TPredicateValue: FastValue>
FilterCollector<TCollector, TPredicate, TPredicateValue>
where
TCollector: Collector + Send + Sync,
TPredicate: Fn(TPredicateValue) -> bool + Send + Sync + Clone,
{
/// Create a new FilterCollector.
pub fn new(
field: Field,
predicate: TPredicate,
collector: TCollector,
) -> FilterCollector<TCollector, TPredicate, TPredicateValue> {
FilterCollector {
field,
predicate,
collector,
t_predicate_value: PhantomData,
}
}
}
impl<TCollector, TPredicate, TPredicateValue: FastValue> Collector
for FilterCollector<TCollector, TPredicate, TPredicateValue>
where
TCollector: Collector + Send + Sync,
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync + Clone,
TPredicateValue: FastValue,
{
// That's the type of our result.
// Our standard deviation will be a float.
type Fruit = TCollector::Fruit;
type Child = FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>;
fn for_segment(
&self,
segment_local_id: u32,
segment_reader: &SegmentReader,
) -> crate::Result<FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>> {
let schema = segment_reader.schema();
let field_entry = schema.get_field_entry(self.field);
if !field_entry.is_fast() {
return Err(TantivyError::SchemaError(format!(
"Field {:?} is not a fast field.",
field_entry.name()
)));
}
let requested_type = TPredicateValue::to_type();
let field_schema_type = field_entry.field_type().value_type();
if requested_type != field_schema_type {
return Err(TantivyError::SchemaError(format!(
"Field {:?} is of type {:?}!={:?}",
field_entry.name(),
requested_type,
field_schema_type
)));
}
let fast_field_reader = segment_reader
.fast_fields()
.typed_fast_field_reader(self.field)?;
let segment_collector = self
.collector
.for_segment(segment_local_id, segment_reader)?;
Ok(FilterSegmentCollector {
fast_field_reader,
segment_collector,
predicate: self.predicate.clone(),
t_predicate_value: PhantomData,
})
}
fn requires_scoring(&self) -> bool {
self.collector.requires_scoring()
}
fn merge_fruits(
&self,
segment_fruits: Vec<<TCollector::Child as SegmentCollector>::Fruit>,
) -> crate::Result<TCollector::Fruit> {
self.collector.merge_fruits(segment_fruits)
}
}
pub struct FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
where
TPredicate: 'static,
TPredicateValue: FastValue,
{
fast_field_reader: DynamicFastFieldReader<TPredicateValue>,
segment_collector: TSegmentCollector,
predicate: TPredicate,
t_predicate_value: PhantomData<TPredicateValue>,
}
impl<TSegmentCollector, TPredicate, TPredicateValue> SegmentCollector
for FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
where
TSegmentCollector: SegmentCollector,
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
TPredicateValue: FastValue,
{
type Fruit = TSegmentCollector::Fruit;
fn collect(&mut self, doc: u32, score: Score) {
let value = self.fast_field_reader.get(doc);
if (self.predicate)(value) {
self.segment_collector.collect(doc, score)
}
}
fn harvest(self) -> <TSegmentCollector as SegmentCollector>::Fruit {
self.segment_collector.harvest()
}
}

View File

@@ -0,0 +1,296 @@
use fastdivide::DividerU64;
use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
use crate::schema::{Field, Type};
use crate::{DocId, Score};
/// Histogram builds an histogram of the values of a fastfield for the
/// collected DocSet.
///
/// At construction, it is given parameters that define a partition of an interval
/// [min_val, max_val) into N buckets with the same width.
/// The ith bucket is then defined by `[min_val + i * bucket_width, min_val + (i+1) * bucket_width)`
///
/// An histogram is then defined as a `Vec<u64>` of length `num_buckets`, that contains a count of
/// documents for each value bucket.
///
/// See also [`HistogramCollector::new()`].
///
/// # Warning
///
/// f64 fields are not supported.
#[derive(Clone)]
pub struct HistogramCollector {
min_value: u64,
num_buckets: usize,
divider: DividerU64,
field: Field,
}
impl HistogramCollector {
/// Builds a new HistogramCollector.
///
/// The scale/range of the histogram is not dynamic. It is required to
/// define it by supplying following parameter:
/// - `min_value`: the minimum value that can be recorded in the histogram.
/// - `bucket_width`: the length of the interval that is associated to each buckets.
/// - `num_buckets`: The overall number of buckets.
///
/// Together, this parameters define a partition of `[min_value, min_value + num_buckets *
/// bucket_width)` into `num_buckets` intervals of width bucket that we call `bucket`.
///
/// # Disclaimer
/// This function panics if the field given is of type f64.
pub fn new<TFastValue: FastValue>(
field: Field,
min_value: TFastValue,
bucket_width: u64,
num_buckets: usize,
) -> HistogramCollector {
let fast_type = TFastValue::to_type();
assert!(fast_type == Type::U64 || fast_type == Type::I64 || fast_type == Type::Date);
HistogramCollector {
min_value: min_value.to_u64(),
num_buckets,
field,
divider: DividerU64::divide_by(bucket_width),
}
}
}
struct HistogramComputer {
counts: Vec<u64>,
min_value: u64,
divider: DividerU64,
}
impl HistogramComputer {
#[inline]
pub(crate) fn add_value(&mut self, value: u64) {
if value < self.min_value {
return;
}
let delta = value - self.min_value;
let bucket_id: usize = self.divider.divide(delta) as usize;
if bucket_id < self.counts.len() {
self.counts[bucket_id] += 1;
}
}
fn harvest(self) -> Vec<u64> {
self.counts
}
}
pub struct SegmentHistogramCollector {
histogram_computer: HistogramComputer,
ff_reader: DynamicFastFieldReader<u64>,
}
impl SegmentCollector for SegmentHistogramCollector {
type Fruit = Vec<u64>;
fn collect(&mut self, doc: DocId, _score: Score) {
let value = self.ff_reader.get(doc);
self.histogram_computer.add_value(value);
}
fn harvest(self) -> Self::Fruit {
self.histogram_computer.harvest()
}
}
impl Collector for HistogramCollector {
type Fruit = Vec<u64>;
type Child = SegmentHistogramCollector;
fn for_segment(
&self,
_segment_local_id: crate::SegmentOrdinal,
segment: &crate::SegmentReader,
) -> crate::Result<Self::Child> {
let ff_reader = segment.fast_fields().u64_lenient(self.field)?;
Ok(SegmentHistogramCollector {
histogram_computer: HistogramComputer {
counts: vec![0; self.num_buckets],
min_value: self.min_value,
divider: self.divider,
},
ff_reader,
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(&self, child_histograms: Vec<Vec<u64>>) -> crate::Result<Vec<u64>> {
Ok(add_vecs(child_histograms, self.num_buckets))
}
}
pub fn add_arrays_into(acc: &mut [u64], add: &[u64]) {
assert_eq!(acc.len(), add.len());
for (dest_bucket, bucket_count) in acc.iter_mut().zip(add) {
*dest_bucket += bucket_count;
}
}
fn add_vecs(mut vals_list: Vec<Vec<u64>>, len: usize) -> Vec<u64> {
let mut acc = vals_list.pop().unwrap_or_else(|| vec![0u64; len]);
assert_eq!(acc.len(), len);
for vals in vals_list {
add_arrays_into(&mut acc, &vals);
}
acc
}
#[cfg(test)]
mod tests {
use fastdivide::DividerU64;
use query::AllQuery;
use super::{add_vecs, HistogramCollector, HistogramComputer};
use crate::schema::{Schema, FAST};
use crate::time::{Date, Month};
use crate::{doc, query, DateTime, Index};
#[test]
fn test_add_histograms_simple() {
assert_eq!(
add_vecs(vec![vec![1, 0, 3], vec![11, 2, 3], vec![0, 0, 1]], 3),
vec![12, 2, 7]
)
}
#[test]
fn test_add_histograms_empty() {
assert_eq!(add_vecs(vec![], 3), vec![0, 0, 0])
}
#[test]
fn test_histogram_builder_simple() {
// [1..3)
// [3..5)
// ..
// [9..11)
let mut histogram_computer = HistogramComputer {
counts: vec![0; 5],
min_value: 1,
divider: DividerU64::divide_by(2),
};
histogram_computer.add_value(1);
histogram_computer.add_value(7);
assert_eq!(histogram_computer.harvest(), vec![1, 0, 0, 1, 0]);
}
#[test]
fn test_histogram_too_low_is_ignored() {
let mut histogram_computer = HistogramComputer {
counts: vec![0; 5],
min_value: 2,
divider: DividerU64::divide_by(2),
};
histogram_computer.add_value(0);
assert_eq!(histogram_computer.harvest(), vec![0, 0, 0, 0, 0]);
}
#[test]
fn test_histogram_too_high_is_ignored() {
let mut histogram_computer = HistogramComputer {
counts: vec![0u64; 5],
min_value: 0,
divider: DividerU64::divide_by(2),
};
histogram_computer.add_value(10);
assert_eq!(histogram_computer.harvest(), vec![0, 0, 0, 0, 0]);
}
#[test]
fn test_no_segments() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let val_field = schema_builder.add_u64_field("val_field", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let reader = index.reader()?;
let searcher = reader.searcher();
let all_query = AllQuery;
let histogram_collector = HistogramCollector::new(val_field, 0u64, 2, 5);
let histogram = searcher.search(&all_query, &histogram_collector)?;
assert_eq!(histogram, vec![0; 5]);
Ok(())
}
#[test]
fn test_histogram_i64() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let val_field = schema_builder.add_i64_field("val_field", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut writer = index.writer_with_num_threads(1, 4_000_000)?;
writer.add_document(doc!(val_field=>12i64))?;
writer.add_document(doc!(val_field=>-30i64))?;
writer.add_document(doc!(val_field=>-12i64))?;
writer.add_document(doc!(val_field=>-10i64))?;
writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let all_query = AllQuery;
let histogram_collector = HistogramCollector::new(val_field, -20i64, 10u64, 4);
let histogram = searcher.search(&all_query, &histogram_collector)?;
assert_eq!(histogram, vec![1, 1, 0, 1]);
Ok(())
}
#[test]
fn test_histogram_merge() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let val_field = schema_builder.add_i64_field("val_field", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut writer = index.writer_with_num_threads(1, 4_000_000)?;
writer.add_document(doc!(val_field=>12i64))?;
writer.commit()?;
writer.add_document(doc!(val_field=>-30i64))?;
writer.commit()?;
writer.add_document(doc!(val_field=>-12i64))?;
writer.commit()?;
writer.add_document(doc!(val_field=>-10i64))?;
writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let all_query = AllQuery;
let histogram_collector = HistogramCollector::new(val_field, -20i64, 10u64, 4);
let histogram = searcher.search(&all_query, &histogram_collector)?;
assert_eq!(histogram, vec![1, 1, 0, 1]);
Ok(())
}
#[test]
fn test_histogram_dates() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date_field", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut writer = index.writer_with_num_threads(1, 4_000_000)?;
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1982, Month::September, 17)?.with_hms(0, 0, 0)?)))?;
writer.add_document(
doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1986, Month::March, 9)?.with_hms(0, 0, 0)?)),
)?;
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1983, Month::September, 27)?.with_hms(0, 0, 0)?)))?;
writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let all_query = AllQuery;
let week_histogram_collector = HistogramCollector::new(
date_field,
DateTime::from_primitive(
Date::from_calendar_date(1980, Month::January, 1)?.with_hms(0, 0, 0)?,
),
3_600_000_000 * 24 * 365, // it is just for a unit test... sorry leap years.
10,
);
let week_histogram = searcher.search(&all_query, &week_histogram_collector)?;
assert_eq!(week_histogram, vec![0, 0, 1, 1, 0, 0, 1, 0, 0, 0]);
Ok(())
}
}

View File

@@ -1,127 +0,0 @@
use std::cmp::Eq;
use std::collections::HashMap;
use std::hash::Hash;
use collector::Collector;
use fastfield::FastFieldReader;
use schema::Field;
use DocId;
use Result;
use Score;
use SegmentReader;
use SegmentLocalId;
/// Facet collector for i64/u64 fast field
pub struct IntFacetCollector<T>
where
T: FastFieldReader,
T::ValueType: Eq + Hash,
{
counters: HashMap<T::ValueType, u64>,
field: Field,
ff_reader: Option<T>,
}
impl<T> IntFacetCollector<T>
where
T: FastFieldReader,
T::ValueType: Eq + Hash,
{
/// Creates a new facet collector for aggregating a given field.
pub fn new(field: Field) -> IntFacetCollector<T> {
IntFacetCollector {
counters: HashMap::new(),
field: field,
ff_reader: None,
}
}
}
impl<T> Collector for IntFacetCollector<T>
where
T: FastFieldReader,
T::ValueType: Eq + Hash,
{
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
Ok(())
}
fn collect(&mut self, doc: DocId, _: Score) {
let val = self.ff_reader
.as_ref()
.expect(
"collect() was called before set_segment. \
This should never happen.",
)
.get(doc);
*(self.counters.entry(val).or_insert(0)) += 1;
}
}
#[cfg(test)]
mod tests {
use collector::{chain, IntFacetCollector};
use query::QueryParser;
use fastfield::{I64FastFieldReader, U64FastFieldReader};
use schema::{self, FAST, STRING};
use Index;
#[test]
// create 10 documents, set num field value to 0 or 1 for even/odd ones
// make sure we have facet counters correctly filled
fn test_facet_collector_results() {
let mut schema_builder = schema::Schema::builder();
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
let num_field_f64 = schema_builder.add_f64_field("num_f64", FAST);
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
{
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
for i in 0u64..10u64 {
index_writer.add_document(doc!(
num_field_i64 => ((i as i64) % 3i64) as i64,
num_field_u64 => (i % 2u64) as u64,
num_field_f64 => (i % 4u64) as f64,
text_field => "text"
));
}
}
assert_eq!(index_writer.commit().unwrap(), 10u64);
}
let searcher = index.reader().searcher();
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);
let mut ffvf_f64: IntFacetCollector<F64FastFieldReader> = IntFacetCollector::new(num_field_f64);
{
// perform the query
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64).push(&mut ffvf_f64);
let mut query_parser = QueryParser::for_index(index, vec![text_field]);
let query = query_parser.parse_query("text:text").unwrap();
query.search(&searcher, &mut facet_collectors).unwrap();
}
assert_eq!(ffvf_u64.counters[&0], 5);
assert_eq!(ffvf_u64.counters[&1], 5);
assert_eq!(ffvf_i64.counters[&0], 4);
assert_eq!(ffvf_i64.counters[&1], 3);
assert_eq!(ffvf_f64.counters[&0.0], 3);
assert_eq!(ffvf_f64.counters[&2.0], 2);
}
}

Some files were not shown because too many files have changed in this diff Show More