mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 21:12:54 +00:00
Compare commits
209 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
232ca5c06c | ||
|
|
743ae102f1 | ||
|
|
e78af20375 | ||
|
|
30637f7a7f | ||
|
|
0107fe886b | ||
|
|
1d9566e73c | ||
|
|
8006f1df11 | ||
|
|
ffa03bad71 | ||
|
|
98cf4ba63a | ||
|
|
4d65771e04 | ||
|
|
9712a75399 | ||
|
|
3ae03b91ae | ||
|
|
238b02ce7d | ||
|
|
3091459777 | ||
|
|
b7f8884246 | ||
|
|
e22f767fda | ||
|
|
3ecfc36e53 | ||
|
|
1c9450174e | ||
|
|
cde4c391cd | ||
|
|
6d47634616 | ||
|
|
39b182c24b | ||
|
|
baaae3f4ec | ||
|
|
63064601a7 | ||
|
|
07a8023a3a | ||
|
|
59639cd311 | ||
|
|
b0e5e1f61d | ||
|
|
234a902470 | ||
|
|
75d130f1ce | ||
|
|
410187dd24 | ||
|
|
88303d4833 | ||
|
|
a26b0ff4a2 | ||
|
|
d4ed86f13a | ||
|
|
fc8902353c | ||
|
|
a2ee988304 | ||
|
|
97b7984200 | ||
|
|
8683718159 | ||
|
|
0cf274135b | ||
|
|
a3b44773bb | ||
|
|
ec7c582109 | ||
|
|
ee7ab72fb1 | ||
|
|
2c20759829 | ||
|
|
23387b0ed0 | ||
|
|
e82859f2e6 | ||
|
|
be830b03c5 | ||
|
|
1b94a3e382 | ||
|
|
c3fbc4c8fa | ||
|
|
4ee2db25a0 | ||
|
|
e423784fd0 | ||
|
|
fdb9c3c516 | ||
|
|
6fb114224a | ||
|
|
2c3e33895a | ||
|
|
d512b53688 | ||
|
|
c8afd2b55d | ||
|
|
3fd6d7125b | ||
|
|
de6a3987a9 | ||
|
|
3dedc465fa | ||
|
|
f16cc6367e | ||
|
|
4026fc5fb1 | ||
|
|
43742a93ef | ||
|
|
2a843d86cb | ||
|
|
9a706c296a | ||
|
|
5ff8123b7a | ||
|
|
6061158506 | ||
|
|
4e8b0e89d9 | ||
|
|
0540ebb49e | ||
|
|
ef94582203 | ||
|
|
2f242d5f52 | ||
|
|
da3d372e6e | ||
|
|
42fd3fe5c7 | ||
|
|
5dae6e6bbc | ||
|
|
e608e0a1df | ||
|
|
6c8c90d348 | ||
|
|
eb50e92ec4 | ||
|
|
20bede9462 | ||
|
|
4640ab4e65 | ||
|
|
cd51ed0f9f | ||
|
|
6676fe5717 | ||
|
|
292bb17346 | ||
|
|
0300e7272b | ||
|
|
8760899fa2 | ||
|
|
c89d570a79 | ||
|
|
1da06d867b | ||
|
|
76e8db6ed3 | ||
|
|
31e5580bfa | ||
|
|
930d3db2f7 | ||
|
|
1593e1dc6f | ||
|
|
e0189fc9e6 | ||
|
|
ffdb4ef0a7 | ||
|
|
58845344c2 | ||
|
|
548ec9ecca | ||
|
|
86b700fa93 | ||
|
|
e95c49e749 | ||
|
|
f3033a8469 | ||
|
|
c4125bda59 | ||
|
|
a7ffc0e610 | ||
|
|
9370427ae2 | ||
|
|
1fc7afa90a | ||
|
|
6a104e4f69 | ||
|
|
920f086e1d | ||
|
|
13aaca7e11 | ||
|
|
df53dc4ceb | ||
|
|
dd028841e8 | ||
|
|
eb84b8a60d | ||
|
|
c05f46ad0e | ||
|
|
435ff9d524 | ||
|
|
fdd5dd8496 | ||
|
|
fb5476d5de | ||
|
|
dd8332c327 | ||
|
|
63d201150b | ||
|
|
b78efdc59f | ||
|
|
5cb08f7996 | ||
|
|
1947a19700 | ||
|
|
271b019420 | ||
|
|
340693184f | ||
|
|
97782a9511 | ||
|
|
930010aa88 | ||
|
|
7f5b07d4e7 | ||
|
|
3edb3dce6a | ||
|
|
1edaf7a312 | ||
|
|
137906ff29 | ||
|
|
143a143cde | ||
|
|
4f5ce12a77 | ||
|
|
813efa4ab3 | ||
|
|
c3b6c1dc0b | ||
|
|
6f5e0ef6f4 | ||
|
|
7224f58895 | ||
|
|
49519c3f61 | ||
|
|
cb11b92505 | ||
|
|
7b2dcfbd91 | ||
|
|
d2e30e6681 | ||
|
|
ef109927b3 | ||
|
|
44e5c4dfd3 | ||
|
|
6f223253ea | ||
|
|
f7b0392bd5 | ||
|
|
442bc9a1b8 | ||
|
|
db7d784573 | ||
|
|
79132e803a | ||
|
|
9e132b7dde | ||
|
|
1e55189db1 | ||
|
|
8b1b389a76 | ||
|
|
46f3ec87a5 | ||
|
|
f24e5f405e | ||
|
|
2589be3984 | ||
|
|
a02a9294e4 | ||
|
|
8023445b63 | ||
|
|
05ce093f97 | ||
|
|
6937e23a56 | ||
|
|
974c321153 | ||
|
|
f30ec9b36b | ||
|
|
acd7c1ea2d | ||
|
|
aaeeda2bc5 | ||
|
|
ac4d433fad | ||
|
|
a298c084e6 | ||
|
|
185a72b341 | ||
|
|
bb41ae76f9 | ||
|
|
74d32e522a | ||
|
|
927dd1ee6f | ||
|
|
2c9302290f | ||
|
|
426cc436da | ||
|
|
68d42c9cf2 | ||
|
|
ca49d6130f | ||
|
|
3588ca0561 | ||
|
|
7c6cdcd876 | ||
|
|
71366b9a56 | ||
|
|
a3247ebcfb | ||
|
|
3ec13a8719 | ||
|
|
f8593c76d5 | ||
|
|
f8710bd4b0 | ||
|
|
8d05b8f7b2 | ||
|
|
fc25516b7a | ||
|
|
5b1e71947f | ||
|
|
69351fb4a5 | ||
|
|
3d0082d020 | ||
|
|
8e450c770a | ||
|
|
a757902aed | ||
|
|
b3a8074826 | ||
|
|
4289625348 | ||
|
|
850f10c1fe | ||
|
|
d7f9bfdfc5 | ||
|
|
d0d5db4515 | ||
|
|
303fc7e820 | ||
|
|
744edb2c5c | ||
|
|
2d70efb7b0 | ||
|
|
eb5b2ffdcc | ||
|
|
38513014d5 | ||
|
|
9cb7a0f6e6 | ||
|
|
8d466b8a76 | ||
|
|
413d0e1719 | ||
|
|
0eb3c872fd | ||
|
|
f9203228be | ||
|
|
8f377b92d0 | ||
|
|
1e89f86267 | ||
|
|
d1f61a50c1 | ||
|
|
2bb85ed575 | ||
|
|
236fa74767 | ||
|
|
63b35dd87b | ||
|
|
efb910f4e8 | ||
|
|
aff7e64d4e | ||
|
|
92a3f3981f | ||
|
|
447a9361d8 | ||
|
|
5f59139484 | ||
|
|
27c373d26d | ||
|
|
80ae136646 | ||
|
|
b05b5f5487 | ||
|
|
4fe96483bc | ||
|
|
09e27740e2 | ||
|
|
f26874557e | ||
|
|
a7d10b65ae | ||
|
|
e120e3b7aa |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
*.swp
|
||||
target
|
||||
target/debug
|
||||
.vscode
|
||||
@@ -8,4 +9,4 @@ benchmark
|
||||
cpp/simdcomp/bitpackingbenchmark
|
||||
*.bk
|
||||
.idea
|
||||
trace.dat
|
||||
trace.dat
|
||||
|
||||
25
.travis.yml
25
.travis.yml
@@ -1,4 +1,6 @@
|
||||
language: rust
|
||||
sudo: required
|
||||
cache: cargo
|
||||
rust:
|
||||
- nightly
|
||||
env:
|
||||
@@ -11,6 +13,7 @@ addons:
|
||||
apt:
|
||||
sources:
|
||||
- ubuntu-toolchain-r-test
|
||||
- kalakris-cmake
|
||||
packages:
|
||||
- gcc-4.8
|
||||
- g++-4.8
|
||||
@@ -18,19 +21,17 @@ addons:
|
||||
- libelf-dev
|
||||
- libdw-dev
|
||||
- binutils-dev
|
||||
- cmake
|
||||
before_script:
|
||||
- |
|
||||
pip install 'travis-cargo<0.2' --user &&
|
||||
export PATH=$HOME/.local/bin:$PATH
|
||||
- export PATH=$HOME/.cargo/bin:$PATH
|
||||
- cargo install cargo-update || echo "cargo-update already installed"
|
||||
- cargo install cargo-travis || echo "cargo-travis already installed"
|
||||
script:
|
||||
- |
|
||||
travis-cargo build &&
|
||||
travis-cargo test &&
|
||||
travis-cargo bench &&
|
||||
travis-cargo doc
|
||||
- cargo build
|
||||
- cargo test
|
||||
- cargo test -- --ignored
|
||||
- cargo run --example simple_search
|
||||
- cargo doc
|
||||
after_success:
|
||||
- bash ./script/build-doc.sh
|
||||
- travis-cargo doc-upload
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then travis-cargo coveralls --no-sudo --verify; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then ./kcov/build/src/kcov --verify --coveralls-id=$TRAVIS_JOB_ID --include-path=`pwd`/src --exclude-path=`pwd`/cpp --exclude-pattern=/.cargo target/kcov target/debug/tantivy-*; fi
|
||||
- cargo coveralls --exclude-pattern src/functional_test.rs
|
||||
- cargo doc-upload
|
||||
|
||||
43
CHANGELOG.md
43
CHANGELOG.md
@@ -1,3 +1,46 @@
|
||||
Tantivy 0.5.2
|
||||
==========================
|
||||
|
||||
- Removed C code. Tantivy is now pure Rust.
|
||||
- BM25
|
||||
- Approximate field norms encoded over 1 byte.
|
||||
|
||||
Tantivy 0.5.1
|
||||
==========================
|
||||
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
|
||||
|
||||
|
||||
Tantivy 0.5
|
||||
==========================
|
||||
- Faceting
|
||||
- RangeQuery
|
||||
- Configurable tokenization pipeline
|
||||
- Bugfix in PhraseQuery
|
||||
- Various query optimisation
|
||||
- Allowing very large indexes
|
||||
- 64 bits file address
|
||||
- Smarter encoding of the `TermInfo` objects
|
||||
|
||||
|
||||
|
||||
Tantivy 0.4.3
|
||||
==========================
|
||||
|
||||
- Bugfix race condition when deleting files. (#198)
|
||||
|
||||
|
||||
Tantivy 0.4.2
|
||||
==========================
|
||||
|
||||
- Prevent usage of AVX2 instructions (#201)
|
||||
|
||||
|
||||
Tantivy 0.4.1
|
||||
==========================
|
||||
|
||||
- Bugfix for non-indexed fields. (#199)
|
||||
|
||||
|
||||
Tantivy 0.4.0
|
||||
==========================
|
||||
|
||||
|
||||
45
Cargo.toml
45
Cargo.toml
@@ -1,8 +1,7 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.4.1"
|
||||
version = "0.5.1"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
build = "build.rs"
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
description = """Tantivy is a search engine library."""
|
||||
@@ -14,45 +13,42 @@ keywords = ["search", "information", "retrieval"]
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.0"
|
||||
memmap = "0.4"
|
||||
lazy_static = "0.2.1"
|
||||
tinysegmenter = "0.1.0"
|
||||
regex = "0.2"
|
||||
fst = "0.1.37"
|
||||
atomicwrites = "0.1.3"
|
||||
tempfile = "2.1"
|
||||
fst = {version="0.2", default-features=false}
|
||||
atomicwrites = {version="0.1", optional=true}
|
||||
log = "0.3.6"
|
||||
combine = "2.2"
|
||||
tempdir = "0.3"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
bincode = "0.8"
|
||||
libc = {version = "0.2.20", optional=true}
|
||||
num_cpus = "1.2"
|
||||
itertools = "0.5.9"
|
||||
lz4 = "1.20"
|
||||
bit-set = "0.4.0"
|
||||
time = "0.1"
|
||||
uuid = { version = "0.5", features = ["v4", "serde"] }
|
||||
uuid = { version = "0.6", features = ["v4", "serde"] }
|
||||
chan = "0.1"
|
||||
version = "2"
|
||||
crossbeam = "0.2"
|
||||
crossbeam = "0.3"
|
||||
futures = "0.1"
|
||||
futures-cpupool = "0.1"
|
||||
error-chain = "0.8"
|
||||
owning_ref = "0.3"
|
||||
stable_deref_trait = "1.0.0"
|
||||
rust-stemmers = "0.1.0"
|
||||
downcast = { version="0.9", features = ["nightly"]}
|
||||
matches = "0.1"
|
||||
snap = "0.2"
|
||||
bitpacking = {path = "../bitpacking"}
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.2"
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.3"
|
||||
tempfile = "2.1"
|
||||
env_logger = "0.4"
|
||||
|
||||
[build-dependencies]
|
||||
gcc = {version = "0.3", optional=true}
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
debug = false
|
||||
@@ -61,10 +57,23 @@ debug-assertions = false
|
||||
|
||||
|
||||
[features]
|
||||
default = ["simdcompression"]
|
||||
simdcompression = ["libc", "gcc"]
|
||||
default = ["mmap"]
|
||||
streamdict = []
|
||||
mmap = ["fst/mmap", "atomicwrites"]
|
||||
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "tantivy-search/tantivy" }
|
||||
|
||||
[[example]]
|
||||
name = "simple_search"
|
||||
required-features = ["mmap"]
|
||||
|
||||
|
||||
[[bin]]
|
||||
name = "convert_to_static"
|
||||
path = "./bin/convert_to_static.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "test_static_dir"
|
||||
path = "./bin/test_static_dir.rs"
|
||||
49
README.md
49
README.md
@@ -5,25 +5,26 @@
|
||||
[](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy)
|
||||

|
||||
|
||||
**Tantivy** is a **full text search engine library** written in rust.
|
||||
|
||||
It is strongly inspired by Lucene's design.
|
||||
|
||||
|
||||
# Features
|
||||
|
||||
- configurable indexing (optional term frequency and position indexing)
|
||||
- Tiny startup time (<10ms), perfect for command line tools
|
||||
- tf-idf scoring
|
||||
- Basic query language
|
||||
- Phrase queries
|
||||
- Incremental indexing
|
||||
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
|
||||
- mmap based
|
||||
- Mmap directory
|
||||
- optional SIMD integer compression
|
||||
- u64 and i64 fast fields (equivalent of doc values in Lucene)
|
||||
- Single valued and multivalued u64 and i64 fast fields (equivalent of doc values in Lucene)
|
||||
- LZ4 compressed document store
|
||||
- Range queries
|
||||
- Faceting
|
||||
- configurable indexing (optional term frequency and position indexing
|
||||
- Cheesy logo with a horse
|
||||
|
||||
Tantivy supports Linux, MacOS and Windows.
|
||||
@@ -38,25 +39,47 @@ It will walk you through getting a wikipedia search engine up and running in a f
|
||||
- [For the last released version](https://docs.rs/tantivy/)
|
||||
- [For the last master branch](https://tantivy-search.github.io/tantivy/tantivy/index.html)
|
||||
|
||||
# Compiling
|
||||
# Compiling
|
||||
|
||||
Tantivy requires Rust Nightly because it uses requires the features [`box_syntax`](https://doc.rust-lang.org/stable/book/box-syntax-and-patterns.html), [`optin_builtin_traits`](https://github.com/rust-lang/rfcs/blob/master/text/0019-opt-in-builtin-traits.md), and [`conservative_impl_trait`](https://github.com/rust-lang/rfcs/blob/master/text/1522-conservative-impl-trait.md).
|
||||
By default, `tantivy` uses a git submodule called `simdcomp`.
|
||||
After cloning the repository, you will need to initialize and update
|
||||
the submodules. The project can then be built using `cargo`.
|
||||
## Development
|
||||
|
||||
Tantivy requires Rust Nightly because it uses requires the features [`box_syntax`](https://doc.rust-lang.org/stable/unstable-book/language-features/box-syntax.html), [`optin_builtin_traits`](https://github.com/rust-lang/rfcs/blob/master/text/0019-opt-in-builtin-traits.md), [`conservative_impl_trait`](https://github.com/rust-lang/rfcs/blob/master/text/1522-conservative-impl-trait.md),
|
||||
and [simd](https://github.com/rust-lang/rust/issues/27731).
|
||||
|
||||
|
||||
To check out and run test, you can simply run :
|
||||
|
||||
git clone git@github.com:tantivy-search/tantivy.git
|
||||
cd tantivy
|
||||
cargo build
|
||||
cargo +nightly build
|
||||
|
||||
|
||||
## Note on release build and performance
|
||||
|
||||
If your project depends on `tantivy`, for better performance, make sure to enable
|
||||
`sse3` instructions using a RUSTFLAGS. (This instruction set is likely to
|
||||
be available on most `x86_64` CPUs you will encounter).
|
||||
|
||||
For instance,
|
||||
|
||||
RUSTFLAGS='-C target-feature=+sse3'
|
||||
|
||||
Or, if you are targetting a specific cpu
|
||||
|
||||
RUSTFLAGS='-C target-cpu=native' build --release
|
||||
|
||||
Regardless of the flags you pass, by default `tantivy` will contain `SSE3` instructions.
|
||||
If you want to disable those, you can run the following command :
|
||||
|
||||
cargo build --no-default-features
|
||||
|
||||
Alternatively, if you are trying to compile `tantivy` without simd compression,
|
||||
you can disable this functionality. In this case, this submodule is not required
|
||||
and you can compile tantivy by using the `--no-default-features` flag.
|
||||
|
||||
cargo build --no-default-features
|
||||
cargo build --no-default-features
|
||||
|
||||
|
||||
# Contribute
|
||||
|
||||
Send me an email (paul.masurel at gmail.com) if you want to contribute to tantivy.
|
||||
Send me an email (paul.masurel at gmail.com) if you want to contribute to tantivy.
|
||||
20
bin/convert_to_static.rs
Normal file
20
bin/convert_to_static.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
extern crate tantivy;
|
||||
use tantivy::directory::write_static_from_directory;
|
||||
|
||||
fn main() {
|
||||
// Prints each argument on a separate line
|
||||
let mut args = env::args();
|
||||
args.next().unwrap();
|
||||
let directory_path= args.next().expect("Expect 2 args.<directory_path> <outputfile>");
|
||||
let output_path = args.next().expect("Expect 2 args.<directory_path> <outputfile>");
|
||||
println!("{} => {}", directory_path, output_path);
|
||||
let buffer = write_static_from_directory(&PathBuf::from(directory_path)).unwrap();
|
||||
println!("Read all");
|
||||
let mut output = File::create(output_path).unwrap();
|
||||
output.write_all(&buffer[..]).unwrap();
|
||||
output.flush().unwrap();
|
||||
}
|
||||
51
bin/test_static_dir.rs
Normal file
51
bin/test_static_dir.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
extern crate tantivy;
|
||||
use tantivy::directory::{StaticDirectory, write_static_from_directory};
|
||||
use tantivy::Index;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::collector::TopCollector;
|
||||
|
||||
|
||||
static DATA: &'static [u8] = include_bytes!("output.bin");
|
||||
|
||||
fn run() -> tantivy::Result<()> {
|
||||
// Prints each argument on a separate line
|
||||
let directory = StaticDirectory::open(DATA).unwrap();
|
||||
let index = Index::open_directory(directory).unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
|
||||
let schema = index.schema();
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
let query = query_parser.parse_query("sea whale")?;
|
||||
|
||||
let mut top_collector = TopCollector::with_limit(10);
|
||||
|
||||
searcher.search(&*query, &mut top_collector)?;
|
||||
|
||||
let doc_addresses = top_collector.docs();
|
||||
|
||||
// The actual documents still need to be
|
||||
// retrieved from Tantivy's store.
|
||||
//
|
||||
// Since the body field was not configured as stored,
|
||||
// the document returned will only contain
|
||||
// a title.
|
||||
|
||||
for doc_address in doc_addresses {
|
||||
let retrieved_doc = searcher.doc(&doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
fn main() {
|
||||
run().unwrap();
|
||||
}
|
||||
61
build.rs
61
build.rs
@@ -1,61 +0,0 @@
|
||||
#[cfg(feature = "simdcompression")]
|
||||
mod build {
|
||||
extern crate gcc;
|
||||
|
||||
pub fn build() {
|
||||
let mut config = gcc::Config::new();
|
||||
config
|
||||
.include("./cpp/simdcomp/include")
|
||||
.file("cpp/simdcomp/src/avxbitpacking.c")
|
||||
.file("cpp/simdcomp/src/simdintegratedbitpacking.c")
|
||||
.file("cpp/simdcomp/src/simdbitpacking.c")
|
||||
.file("cpp/simdcomp/src/simdpackedsearch.c")
|
||||
.file("cpp/simdcomp/src/simdcomputil.c")
|
||||
.file("cpp/simdcomp/src/simdpackedselect.c")
|
||||
.file("cpp/simdcomp/src/simdfor.c")
|
||||
.file("cpp/simdcomp_wrapper.c");
|
||||
|
||||
if !cfg!(debug_assertions) {
|
||||
config.opt_level(3);
|
||||
|
||||
if cfg!(target_env = "msvc") {
|
||||
config
|
||||
.define("NDEBUG", None)
|
||||
.flag("/Gm-")
|
||||
.flag("/GS-")
|
||||
.flag("/Gy")
|
||||
.flag("/Oi")
|
||||
.flag("/GL");
|
||||
}
|
||||
}
|
||||
|
||||
if !cfg!(target_env = "msvc") {
|
||||
config
|
||||
.include("./cpp/streamvbyte/include")
|
||||
.file("cpp/streamvbyte/src/streamvbyte.c")
|
||||
.file("cpp/streamvbyte/src/streamvbytedelta.c")
|
||||
.flag("-msse4.1")
|
||||
.flag("-march=native")
|
||||
.flag("-std=c99");
|
||||
}
|
||||
|
||||
config.compile("libsimdcomp.a");
|
||||
|
||||
// Workaround for linking static libraries built with /GL
|
||||
// https://github.com/rust-lang/rust/issues/26003
|
||||
if !cfg!(debug_assertions) && cfg!(target_env = "msvc") {
|
||||
println!("cargo:rustc-link-lib=dylib=simdcomp");
|
||||
}
|
||||
|
||||
println!("cargo:rerun-if-changed=cpp");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "simdcompression"))]
|
||||
mod build {
|
||||
pub fn build() {}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
build::build();
|
||||
}
|
||||
9
cpp/simdcomp/.gitignore
vendored
9
cpp/simdcomp/.gitignore
vendored
@@ -1,9 +0,0 @@
|
||||
Makefile.in
|
||||
lib*
|
||||
unit*
|
||||
*.o
|
||||
src/*.lo
|
||||
src/*.o
|
||||
src/.deps
|
||||
src/.dirstamp
|
||||
src/.libs
|
||||
@@ -1,11 +0,0 @@
|
||||
language: c
|
||||
sudo: false
|
||||
compiler:
|
||||
- gcc
|
||||
- clang
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make && ./unit
|
||||
@@ -1,9 +0,0 @@
|
||||
Upcoming
|
||||
- added missing include
|
||||
- improved portability (MSVC)
|
||||
- implemented C89 compatibility
|
||||
Version 0.0.3 (19 May 2014)
|
||||
- improved documentation
|
||||
Version 0.0.2 (6 February 2014)
|
||||
- added go demo
|
||||
Version 0.0.1 (5 February 2014)
|
||||
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2014--, The authors
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the {organization} nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@@ -1,137 +0,0 @@
|
||||
The SIMDComp library
|
||||
====================
|
||||
[](https://travis-ci.org/lemire/simdcomp)
|
||||
|
||||
A simple C library for compressing lists of integers using binary packing and SIMD instructions.
|
||||
The assumption is either that you have a list of 32-bit integers where most of them are small, or a list of 32-bit integers where differences between successive integers are small. No software is able to reliably compress an array of 32-bit random numbers.
|
||||
|
||||
This library can decode at least 4 billions of compressed integers per second on most
|
||||
desktop or laptop processors. That is, it can decompress data at a rate of 15 GB/s.
|
||||
This is significantly faster than generic codecs like gzip, LZO, Snappy or LZ4.
|
||||
|
||||
On a Skylake Intel processor, it can decode integers at a rate 0.3 cycles per integer,
|
||||
which can easily translate into more than 8 decoded billions integers per second.
|
||||
|
||||
Contributors: Daniel Lemire, Nathan Kurz, Christoph Rupp, Anatol Belski, Nick White and others
|
||||
|
||||
What is it for?
|
||||
-------------
|
||||
|
||||
This is a low-level library for fast integer compression. By design it does not define a compressed
|
||||
format. It is up to the (sophisticated) user to create a compressed format.
|
||||
|
||||
Requirements
|
||||
-------------
|
||||
|
||||
- Your processor should support SSE4.1 (It is supported by most Intel and AMD processors released since 2008.)
|
||||
- It is possible to build the core part of the code if your processor support SSE2 (Pentium4 or better)
|
||||
- C99 compliant compiler (GCC is assumed)
|
||||
- A Linux-like distribution is assumed by the makefile
|
||||
|
||||
For a plain C version that does not use SIMD instructions, see https://github.com/lemire/LittleIntPacker
|
||||
|
||||
Usage
|
||||
-------
|
||||
|
||||
Compression works over blocks of 128 integers.
|
||||
|
||||
For a complete working example, see example.c (you can build it and
|
||||
run it with "make example; ./example").
|
||||
|
||||
|
||||
|
||||
1) Lists of integers in random order.
|
||||
|
||||
```C
|
||||
const uint32_t b = maxbits(datain);// computes bit width
|
||||
simdpackwithoutmask(datain, buffer, b);//compressed to buffer, compressing 128 32-bit integers down to b*32 bytes
|
||||
simdunpack(buffer, backbuffer, b);//uncompressed to backbuffer
|
||||
```
|
||||
|
||||
While 128 32-bit integers are read, only b 128-bit words are written. Thus, the compression ratio is 32/b.
|
||||
|
||||
2) Sorted lists of integers.
|
||||
|
||||
We used differential coding: we store the difference between successive integers. For this purpose, we need an initial value (called offset).
|
||||
|
||||
```C
|
||||
uint32_t offset = 0;
|
||||
uint32_t b1 = simdmaxbitsd1(offset,datain); // bit width
|
||||
simdpackwithoutmaskd1(offset, datain, buffer, b1);//compressing 128 32-bit integers down to b1*32 bytes
|
||||
simdunpackd1(offset, buffer, backbuffer, b1);//uncompressed
|
||||
```
|
||||
|
||||
General example for arrays of arbitrary length:
|
||||
```C
|
||||
int compress_decompress_demo() {
|
||||
size_t k, N = 9999;
|
||||
__m128i * endofbuf;
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint8_t * buffer;
|
||||
uint32_t * backbuffer = malloc(N * sizeof(uint32_t));
|
||||
uint32_t b;
|
||||
|
||||
for (k = 0; k < N; ++k){ /* start with k=0, not k=1! */
|
||||
datain[k] = k;
|
||||
}
|
||||
|
||||
b = maxbits_length(datain, N);
|
||||
buffer = malloc(simdpack_compressedbytes(N,b)); // allocate just enough memory
|
||||
endofbuf = simdpack_length(datain, N, (__m128i *)buffer, b);
|
||||
/* compressed data is stored between buffer and endofbuf using (endofbuf-buffer)*sizeof(__m128i) bytes */
|
||||
/* would be safe to do : buffer = realloc(buffer,(endofbuf-(__m128i *)buffer)*sizeof(__m128i)); */
|
||||
simdunpack_length((const __m128i *)buffer, N, backbuffer, b);
|
||||
|
||||
for (k = 0; k < N; ++k){
|
||||
if(datain[k] != backbuffer[k]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
3) Frame-of-Reference
|
||||
|
||||
We also have frame-of-reference (FOR) functions (see simdfor.h header). They work like the bit packing
|
||||
routines, but do not use differential coding so they allow faster search in some cases, at the expense
|
||||
of compression.
|
||||
|
||||
Setup
|
||||
---------
|
||||
|
||||
|
||||
make
|
||||
make test
|
||||
|
||||
and if you are daring:
|
||||
|
||||
make install
|
||||
|
||||
Go
|
||||
--------
|
||||
|
||||
If you are a go user, there is a "go" folder where you will find a simple demo.
|
||||
|
||||
Other libraries
|
||||
----------------
|
||||
|
||||
* Fast decoder for VByte-compressed integers https://github.com/lemire/MaskedVByte
|
||||
* Fast integer compression in C using StreamVByte https://github.com/lemire/streamvbyte
|
||||
* FastPFOR is a C++ research library well suited to compress unsorted arrays: https://github.com/lemire/FastPFor
|
||||
* SIMDCompressionAndIntersection is a C++ research library well suited for sorted arrays (differential coding)
|
||||
and computing intersections: https://github.com/lemire/SIMDCompressionAndIntersection
|
||||
* TurboPFor is a C library that offers lots of interesting optimizations. Well worth checking! (GPL license) https://github.com/powturbo/TurboPFor
|
||||
* Oroch is a C++ library that offers a usable API (MIT license) https://github.com/ademakov/Oroch
|
||||
|
||||
|
||||
References
|
||||
------------
|
||||
|
||||
* Daniel Lemire, Leonid Boytsov, Nathan Kurz, SIMD Compression and the Intersection of Sorted Integers, Software Practice & Experience 46 (6) 2016. http://arxiv.org/abs/1401.6399
|
||||
* Daniel Lemire and Leonid Boytsov, Decoding billions of integers per second through vectorization, Software Practice & Experience 45 (1), 2015. http://arxiv.org/abs/1209.2137 http://onlinelibrary.wiley.com/doi/10.1002/spe.2203/abstract
|
||||
* Jeff Plaisance, Nathan Kurz, Daniel Lemire, Vectorized VByte Decoding, International Symposium on Web Algorithms 2015, 2015. http://arxiv.org/abs/1503.07387
|
||||
* Wayne Xin Zhao, Xudong Zhang, Daniel Lemire, Dongdong Shan, Jian-Yun Nie, Hongfei Yan, Ji-Rong Wen, A General SIMD-based Approach to Accelerating Compression Algorithms, ACM Transactions on Information Systems 33 (3), 2015. http://arxiv.org/abs/1502.01916
|
||||
* T. D. Wu, Bitpacking techniques for indexing genomes: I. Hash tables, Algorithms for Molecular Biology 11 (5), 2016. http://almob.biomedcentral.com/articles/10.1186/s13015-016-0069-5
|
||||
@@ -1,235 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "simdcomp.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# include <windows.h>
|
||||
|
||||
__int64 freq;
|
||||
|
||||
typedef __int64 time_snap_t;
|
||||
|
||||
static time_snap_t time_snap(void)
|
||||
{
|
||||
__int64 now;
|
||||
|
||||
QueryPerformanceCounter((LARGE_INTEGER *)&now);
|
||||
|
||||
return (__int64)((now*1000000)/freq);
|
||||
}
|
||||
# define TIME_SNAP_FMT "%I64d"
|
||||
#else
|
||||
# define time_snap clock
|
||||
# define TIME_SNAP_FMT "%lu"
|
||||
typedef clock_t time_snap_t;
|
||||
#endif
|
||||
|
||||
|
||||
void benchmarkSelect() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t backbuffer[128];
|
||||
uint32_t initial = 33;
|
||||
uint32_t b;
|
||||
time_snap_t S1, S2, S3;
|
||||
int i;
|
||||
printf("benchmarking select \n");
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = initial;
|
||||
uint32_t out[128];
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)(1655765 * i )) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(initial, buffer)<=b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
|
||||
S1 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
uint32_t valretrieved = simdselectd1(initial, (__m128i *)out, b, (uint32_t)i % 128);
|
||||
assert(valretrieved == buffer[i%128]);
|
||||
}
|
||||
S2 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
|
||||
assert(backbuffer[i % 128] == buffer[i % 128]);
|
||||
}
|
||||
S3 = time_snap();
|
||||
printf("bit width = %d, fast select function time = " TIME_SNAP_FMT ", naive time = " TIME_SNAP_FMT " \n", b, (S2-S1), (S3-S2));
|
||||
}
|
||||
}
|
||||
|
||||
int uint32_cmp(const void *a, const void *b)
|
||||
{
|
||||
const uint32_t *ia = (const uint32_t *)a;
|
||||
const uint32_t *ib = (const uint32_t *)b;
|
||||
if(*ia < *ib)
|
||||
return -1;
|
||||
else if (*ia > *ib)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* adapted from wikipedia */
|
||||
int binary_search(uint32_t * A, uint32_t key, int imin, int imax)
|
||||
{
|
||||
int imid;
|
||||
imax --;
|
||||
while(imin + 1 < imax) {
|
||||
imid = imin + ((imax - imin) / 2);
|
||||
|
||||
if (A[imid] > key) {
|
||||
imax = imid;
|
||||
} else if (A[imid] < key) {
|
||||
imin = imid;
|
||||
} else {
|
||||
return imid;
|
||||
}
|
||||
}
|
||||
return imax;
|
||||
}
|
||||
|
||||
|
||||
/* adapted from wikipedia */
|
||||
int lower_bound(uint32_t * A, uint32_t key, int imin, int imax)
|
||||
{
|
||||
int imid;
|
||||
imax --;
|
||||
while(imin + 1 < imax) {
|
||||
imid = imin + ((imax - imin) / 2);
|
||||
|
||||
if (A[imid] >= key) {
|
||||
imax = imid;
|
||||
} else if (A[imid] < key) {
|
||||
imin = imid;
|
||||
}
|
||||
}
|
||||
if(A[imin] >= key) return imin;
|
||||
return imax;
|
||||
}
|
||||
|
||||
void benchmarkSearch() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t backbuffer[128];
|
||||
uint32_t out[128];
|
||||
uint32_t result, initial = 0;
|
||||
uint32_t b, i;
|
||||
time_snap_t S1, S2, S3, S4;
|
||||
|
||||
printf("benchmarking search \n");
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = initial;
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)rand()) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
|
||||
qsort(buffer,128, sizeof(uint32_t), uint32_cmp);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(initial, buffer)<=b);
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(buffer[i] == backbuffer[i]);
|
||||
}
|
||||
S1 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
|
||||
int pos;
|
||||
uint32_t pseudorandomkey = buffer[i%128];
|
||||
__m128i vecinitial = _mm_set1_epi32(initial);
|
||||
pos = simdsearchd1(&vecinitial, (__m128i *)out, b,
|
||||
pseudorandomkey, &result);
|
||||
if((result < pseudorandomkey) || (buffer[pos] != result)) {
|
||||
printf("bug A.\n");
|
||||
} else if (pos > 0) {
|
||||
if(buffer[pos-1] >= pseudorandomkey)
|
||||
printf("bug B.\n");
|
||||
}
|
||||
}
|
||||
S2 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
int pos;
|
||||
uint32_t pseudorandomkey = buffer[i%128];
|
||||
simdunpackd1(initial, (__m128i *)out, backbuffer, b);
|
||||
pos = lower_bound(backbuffer, pseudorandomkey, 0, 128);
|
||||
result = backbuffer[pos];
|
||||
|
||||
if((result < pseudorandomkey) || (buffer[pos] != result)) {
|
||||
printf("bug C.\n");
|
||||
} else if (pos > 0) {
|
||||
if(buffer[pos-1] >= pseudorandomkey)
|
||||
printf("bug D.\n");
|
||||
}
|
||||
}
|
||||
S3 = time_snap();
|
||||
for (i = 0; i < 128 * 10; i++) {
|
||||
|
||||
int pos;
|
||||
uint32_t pseudorandomkey = buffer[i%128];
|
||||
pos = simdsearchwithlengthd1(initial, (__m128i *)out, b, 128,
|
||||
pseudorandomkey, &result);
|
||||
if((result < pseudorandomkey) || (buffer[pos] != result)) {
|
||||
printf("bug A.\n");
|
||||
} else if (pos > 0) {
|
||||
if(buffer[pos-1] >= pseudorandomkey)
|
||||
printf("bug B.\n");
|
||||
}
|
||||
}
|
||||
S4 = time_snap();
|
||||
|
||||
printf("bit width = %d, fast search function time = " TIME_SNAP_FMT ", naive time = " TIME_SNAP_FMT " , fast with length time = " TIME_SNAP_FMT " \n", b, (S2-S1), (S3-S2), (S4-S3) );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main() {
|
||||
#ifdef _MSC_VER
|
||||
QueryPerformanceFrequency((LARGE_INTEGER *)&freq);
|
||||
#endif
|
||||
benchmarkSearch();
|
||||
benchmarkSelect();
|
||||
return 0;
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
#include <stdio.h>
|
||||
|
||||
#include "simdcomp.h"
|
||||
|
||||
|
||||
#define RDTSC_START(cycles) \
|
||||
do { \
|
||||
register unsigned cyc_high, cyc_low; \
|
||||
__asm volatile( \
|
||||
"cpuid\n\t" \
|
||||
"rdtsc\n\t" \
|
||||
"mov %%edx, %0\n\t" \
|
||||
"mov %%eax, %1\n\t" \
|
||||
: "=r"(cyc_high), "=r"(cyc_low)::"%rax", "%rbx", "%rcx", "%rdx"); \
|
||||
(cycles) = ((uint64_t)cyc_high << 32) | cyc_low; \
|
||||
} while (0)
|
||||
|
||||
#define RDTSC_FINAL(cycles) \
|
||||
do { \
|
||||
register unsigned cyc_high, cyc_low; \
|
||||
__asm volatile( \
|
||||
"rdtscp\n\t" \
|
||||
"mov %%edx, %0\n\t" \
|
||||
"mov %%eax, %1\n\t" \
|
||||
"cpuid\n\t" \
|
||||
: "=r"(cyc_high), "=r"(cyc_low)::"%rax", "%rbx", "%rcx", "%rdx"); \
|
||||
(cycles) = ((uint64_t)cyc_high << 32) | cyc_low; \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
||||
|
||||
uint32_t * get_random_array_from_bit_width(uint32_t length, uint32_t bit) {
|
||||
uint32_t * answer = malloc(sizeof(uint32_t) * length);
|
||||
uint32_t mask = (uint32_t) ((UINT64_C(1) << bit) - 1);
|
||||
uint32_t i;
|
||||
for(i = 0; i < length; ++i) {
|
||||
answer[i] = rand() & mask;
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
|
||||
uint32_t * get_random_array_from_bit_width_d1(uint32_t length, uint32_t bit) {
|
||||
uint32_t * answer = malloc(sizeof(uint32_t) * length);
|
||||
uint32_t mask = (uint32_t) ((UINT64_C(1) << bit) - 1);
|
||||
uint32_t i;
|
||||
answer[0] = rand() & mask;
|
||||
for(i = 1; i < length; ++i) {
|
||||
answer[i] = answer[i-1] + (rand() & mask);
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
|
||||
|
||||
void demo128() {
|
||||
const uint32_t length = 128;
|
||||
uint32_t bit;
|
||||
printf("# --- %s\n", __func__);
|
||||
printf("# compressing %d integers\n",length);
|
||||
printf("# format: bit width, pack in cycles per int, unpack in cycles per int\n");
|
||||
for(bit = 1; bit <= 32; ++bit) {
|
||||
uint32_t i;
|
||||
|
||||
uint32_t * data = get_random_array_from_bit_width(length, bit);
|
||||
__m128i * buffer = malloc(length * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(length * sizeof(uint32_t));
|
||||
uint32_t repeat = 500;
|
||||
uint64_t min_diff;
|
||||
printf("%d\t",bit);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdpackwithoutmask(data,buffer, bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdunpack(buffer, backdata,bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
|
||||
free(data);
|
||||
free(buffer);
|
||||
free(backdata);
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n\n"); /* two blank lines are required by gnuplot */
|
||||
}
|
||||
|
||||
void demo128_d1() {
|
||||
const uint32_t length = 128;
|
||||
uint32_t bit;
|
||||
printf("# --- %s\n", __func__);
|
||||
printf("# compressing %d integers\n",length);
|
||||
printf("# format: bit width, pack in cycles per int, unpack in cycles per int\n");
|
||||
for(bit = 1; bit <= 32; ++bit) {
|
||||
uint32_t i;
|
||||
|
||||
uint32_t * data = get_random_array_from_bit_width_d1(length, bit);
|
||||
__m128i * buffer = malloc(length * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(length * sizeof(uint32_t));
|
||||
uint32_t repeat = 500;
|
||||
uint64_t min_diff;
|
||||
printf("%d\t",bit);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdpackwithoutmaskd1(0,data,buffer, bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
simdunpackd1(0,buffer, backdata,bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
|
||||
free(data);
|
||||
free(buffer);
|
||||
free(backdata);
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n\n"); /* two blank lines are required by gnuplot */
|
||||
}
|
||||
|
||||
#ifdef __AVX2__
|
||||
void demo256() {
|
||||
const uint32_t length = 256;
|
||||
uint32_t bit;
|
||||
printf("# --- %s\n", __func__);
|
||||
printf("# compressing %d integers\n",length);
|
||||
printf("# format: bit width, pack in cycles per int, unpack in cycles per int\n");
|
||||
for(bit = 1; bit <= 32; ++bit) {
|
||||
uint32_t i;
|
||||
|
||||
uint32_t * data = get_random_array_from_bit_width(length, bit);
|
||||
__m256i * buffer = malloc(length * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(length * sizeof(uint32_t));
|
||||
uint32_t repeat = 500;
|
||||
uint64_t min_diff;
|
||||
printf("%d\t",bit);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
avxpackwithoutmask(data,buffer, bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
min_diff = (uint64_t)-1;
|
||||
for (i = 0; i < repeat; i++) {
|
||||
uint64_t cycles_start, cycles_final, cycles_diff;
|
||||
__asm volatile("" ::: /* pretend to clobber */ "memory");
|
||||
RDTSC_START(cycles_start);
|
||||
avxunpack(buffer, backdata,bit);
|
||||
RDTSC_FINAL(cycles_final);
|
||||
cycles_diff = (cycles_final - cycles_start);
|
||||
if (cycles_diff < min_diff) min_diff = cycles_diff;
|
||||
}
|
||||
printf("%.2f\t",min_diff*1.0/length);
|
||||
|
||||
free(data);
|
||||
free(buffer);
|
||||
free(backdata);
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n\n"); /* two blank lines are required by gnuplot */
|
||||
}
|
||||
#endif /* avx 2 */
|
||||
|
||||
|
||||
int main() {
|
||||
demo128();
|
||||
demo128_d1();
|
||||
#ifdef __AVX2__
|
||||
demo256();
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
/* Type "make example" to build this example program. */
|
||||
#include <stdio.h>
|
||||
#include <time.h>
|
||||
#include <stdlib.h>
|
||||
#include "simdcomp.h"
|
||||
|
||||
/**
|
||||
We provide several different code examples.
|
||||
**/
|
||||
|
||||
|
||||
/* very simple test to illustrate a simple application */
|
||||
int compress_decompress_demo() {
|
||||
size_t k, N = 9999;
|
||||
__m128i * endofbuf;
|
||||
int howmanybytes;
|
||||
float compratio;
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint8_t * buffer;
|
||||
uint32_t * backbuffer = malloc(N * sizeof(uint32_t));
|
||||
uint32_t b;
|
||||
printf("== simple test\n");
|
||||
|
||||
for (k = 0; k < N; ++k) { /* start with k=0, not k=1! */
|
||||
datain[k] = k;
|
||||
}
|
||||
|
||||
b = maxbits_length(datain, N);
|
||||
buffer = malloc(simdpack_compressedbytes(N,b));
|
||||
endofbuf = simdpack_length(datain, N, (__m128i *)buffer, b);
|
||||
howmanybytes = (endofbuf-(__m128i *)buffer)*sizeof(__m128i); /* number of compressed bytes */
|
||||
compratio = N*sizeof(uint32_t) * 1.0 / howmanybytes;
|
||||
/* endofbuf points to the end of the compressed data */
|
||||
buffer = realloc(buffer,(endofbuf-(__m128i *)buffer)*sizeof(__m128i)); /* optional but safe. */
|
||||
printf("Compressed %d integers down to %d bytes (comp. ratio = %f).\n",(int)N,howmanybytes,compratio);
|
||||
/* in actual applications b must be stored and retrieved: caller is responsible for that. */
|
||||
simdunpack_length((const __m128i *)buffer, N, backbuffer, b); /* will return a pointer to endofbuf */
|
||||
|
||||
for (k = 0; k < N; ++k) {
|
||||
if(datain[k] != backbuffer[k]) {
|
||||
printf("bug at %lu \n",(unsigned long)k);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
printf("Code works!\n");
|
||||
free(datain);
|
||||
free(buffer);
|
||||
free(backbuffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* compresses data from datain to buffer, returns how many bytes written
|
||||
used below in simple_demo */
|
||||
size_t compress(uint32_t * datain, size_t length, uint8_t * buffer) {
|
||||
uint32_t offset;
|
||||
uint8_t * initout;
|
||||
size_t k;
|
||||
if(length/SIMDBlockSize*SIMDBlockSize != length) {
|
||||
printf("Data length should be a multiple of %i \n",SIMDBlockSize);
|
||||
}
|
||||
offset = 0;
|
||||
initout = buffer;
|
||||
for(k = 0; k < length / SIMDBlockSize; ++k) {
|
||||
uint32_t b = simdmaxbitsd1(offset,
|
||||
datain + k * SIMDBlockSize);
|
||||
*buffer++ = b;
|
||||
simdpackwithoutmaskd1(offset, datain + k * SIMDBlockSize, (__m128i *) buffer,
|
||||
b);
|
||||
offset = datain[k * SIMDBlockSize + SIMDBlockSize - 1];
|
||||
buffer += b * sizeof(__m128i);
|
||||
}
|
||||
return buffer - initout;
|
||||
}
|
||||
|
||||
/* Another illustration ... */
|
||||
void simple_demo() {
|
||||
size_t REPEAT = 10, gap;
|
||||
size_t N = 1000 * SIMDBlockSize;/* SIMDBlockSize is 128 */
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
size_t compsize;
|
||||
clock_t start, end;
|
||||
uint8_t * buffer = malloc(N * sizeof(uint32_t) + N / SIMDBlockSize); /* output buffer */
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
printf("== simple demo\n");
|
||||
for (gap = 1; gap <= 243; gap *= 3) {
|
||||
size_t k, repeat;
|
||||
uint32_t offset = 0;
|
||||
uint32_t bogus = 0;
|
||||
double numberofseconds;
|
||||
|
||||
printf("\n");
|
||||
printf(" gap = %lu \n", (unsigned long) gap);
|
||||
datain[0] = 0;
|
||||
for (k = 1; k < N; ++k)
|
||||
datain[k] = datain[k-1] + ( rand() % (gap + 1) );
|
||||
compsize = compress(datain,N,buffer);
|
||||
printf("compression ratio = %f \n", (N * sizeof(uint32_t))/ (compsize * 1.0 ));
|
||||
start = clock();
|
||||
for(repeat = 0; repeat < REPEAT; ++repeat) {
|
||||
uint8_t * decbuffer = buffer;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
uint8_t b = *decbuffer++;
|
||||
simdunpackd1(offset, (__m128i *) decbuffer, backbuffer, b);
|
||||
/* do something here with backbuffer */
|
||||
bogus += backbuffer[3];
|
||||
decbuffer += b * sizeof(__m128i);
|
||||
offset = backbuffer[SIMDBlockSize - 1];
|
||||
}
|
||||
}
|
||||
end = clock();
|
||||
numberofseconds = (end-start)/(double)CLOCKS_PER_SEC;
|
||||
printf("decoding speed in million of integers per second %f \n",N*REPEAT/(numberofseconds*1000.0*1000.0));
|
||||
start = clock();
|
||||
for(repeat = 0; repeat < REPEAT; ++repeat) {
|
||||
uint8_t * decbuffer = buffer;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
memcpy(backbuffer,decbuffer+k*SIMDBlockSize,SIMDBlockSize*sizeof(uint32_t));
|
||||
bogus += backbuffer[3] - backbuffer[100];
|
||||
}
|
||||
}
|
||||
end = clock();
|
||||
numberofseconds = (end-start)/(double)CLOCKS_PER_SEC;
|
||||
printf("memcpy speed in million of integers per second %f \n",N*REPEAT/(numberofseconds*1000.0*1000.0));
|
||||
printf("ignore me %i \n",bogus);
|
||||
printf("All tests are in CPU cache. Avoid out-of-cache decoding in applications.\n");
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
}
|
||||
|
||||
/* Used below in more_sophisticated_demo ... */
|
||||
size_t varying_bit_width_compress(uint32_t * datain, size_t length, uint8_t * buffer) {
|
||||
uint8_t * initout;
|
||||
size_t k;
|
||||
if(length/SIMDBlockSize*SIMDBlockSize != length) {
|
||||
printf("Data length should be a multiple of %i \n",SIMDBlockSize);
|
||||
}
|
||||
initout = buffer;
|
||||
for(k = 0; k < length / SIMDBlockSize; ++k) {
|
||||
uint32_t b = maxbits(datain);
|
||||
*buffer++ = b;
|
||||
simdpackwithoutmask(datain, (__m128i *)buffer, b);
|
||||
datain += SIMDBlockSize;
|
||||
buffer += b * sizeof(__m128i);
|
||||
}
|
||||
return buffer - initout;
|
||||
}
|
||||
|
||||
/* Here we compress the data in blocks of 128 integers with varying bit width */
|
||||
int varying_bit_width_demo() {
|
||||
size_t nn = 128 * 2;
|
||||
uint32_t * datainn = malloc(nn * sizeof(uint32_t));
|
||||
uint8_t * buffern = malloc(nn * sizeof(uint32_t) + nn / SIMDBlockSize);
|
||||
uint8_t * initbuffern = buffern;
|
||||
uint32_t * backbuffern = malloc(nn * sizeof(uint32_t));
|
||||
size_t k, compsize;
|
||||
printf("== varying bit-width demo\n");
|
||||
|
||||
for(k=0; k<nn; ++k) {
|
||||
datainn[k] = rand() % (k + 1);
|
||||
}
|
||||
|
||||
compsize = varying_bit_width_compress(datainn,nn,buffern);
|
||||
printf("encoded size: %u (original size: %u)\n", (unsigned)compsize,
|
||||
(unsigned)(nn * sizeof(uint32_t)));
|
||||
|
||||
for (k = 0; k * SIMDBlockSize < nn; ++k) {
|
||||
uint32_t b = *buffern;
|
||||
buffern++;
|
||||
simdunpack((const __m128i *)buffern, backbuffern + k * SIMDBlockSize, b);
|
||||
buffern += b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
for (k = 0; k < nn; ++k) {
|
||||
if(backbuffern[k] != datainn[k]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
printf("Code works!\n");
|
||||
free(datainn);
|
||||
free(initbuffern);
|
||||
free(backbuffern);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main() {
|
||||
if(compress_decompress_demo() != 0) return -1;
|
||||
if(varying_bit_width_demo() != 0) return -1;
|
||||
simple_demo();
|
||||
return 0;
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
Simple Go demo
|
||||
==============
|
||||
|
||||
Setup
|
||||
======
|
||||
|
||||
Start by installing the simdcomp library (make && make install).
|
||||
|
||||
Then type:
|
||||
|
||||
go run test.go
|
||||
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
/////////
|
||||
// This particular file is in the public domain.
|
||||
// Author: Daniel Lemire
|
||||
////////
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lsimdcomp
|
||||
#include <simdcomp.h>
|
||||
*/
|
||||
import "C"
|
||||
import "fmt"
|
||||
|
||||
//////////
|
||||
// For this demo, we pack and unpack blocks of 128 integers
|
||||
/////////
|
||||
func main() {
|
||||
// I am going to use C types. Alternative might be to use unsafe.Pointer calls, see http://bit.ly/1ndw3W3
|
||||
// this is our original data
|
||||
var data [128]C.uint32_t
|
||||
for i := C.uint32_t(0); i < C.uint32_t(128); i++ {
|
||||
data[i] = i
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
////////////
|
||||
// We first pack without differential coding
|
||||
///////////
|
||||
// computing how many bits per int. is needed
|
||||
b := C.maxbits(&data[0])
|
||||
ratio := 32.0/float64(b)
|
||||
fmt.Println("Bit width ", b)
|
||||
fmt.Println(fmt.Sprintf("Compression ratio %f ", ratio))
|
||||
// we are now going to create a buffer to receive the packed data (each __m128i uses 128 bits)
|
||||
out := make([] C.__m128i,b)
|
||||
C.simdpackwithoutmask( &data[0],&out[0],b);
|
||||
var recovereddata [128]C.uint32_t
|
||||
C.simdunpack(&out[0],&recovereddata[0],b)
|
||||
for i := 0; i < 128; i++ {
|
||||
if data[i] != recovereddata[i] {
|
||||
fmt.Println("Bug ")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
///////////
|
||||
// Next, we use differential coding
|
||||
//////////
|
||||
offset := C.uint32_t(0) // if you pack data from K to K + 128, offset should be the value at K-1. When K = 0, choose a default
|
||||
b1 := C.simdmaxbitsd1(offset,&data[0])
|
||||
ratio1 := 32.0/float64(b1)
|
||||
fmt.Println("Bit width ", b1)
|
||||
fmt.Println(fmt.Sprintf("Compression ratio %f ", ratio1))
|
||||
// we are now going to create a buffer to receive the packed data (each __m128i uses 128 bits)
|
||||
out = make([] C.__m128i,b1)
|
||||
C.simdpackwithoutmaskd1(offset, &data[0],&out[0],b1);
|
||||
C.simdunpackd1(offset,&out[0],&recovereddata[0],b1)
|
||||
for i := 0; i < 128; i++ {
|
||||
if data[i] != recovereddata[i] {
|
||||
fmt.Println("Bug ")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("test succesful.")
|
||||
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef INCLUDE_AVXBITPACKING_H_
|
||||
#define INCLUDE_AVXBITPACKING_H_
|
||||
|
||||
|
||||
#ifdef __AVX2__
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
|
||||
/* AVX2 is required */
|
||||
#include <immintrin.h>
|
||||
/* for memset */
|
||||
#include <string.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
|
||||
enum{ AVXBlockSize = 256};
|
||||
|
||||
/* max integer logarithm over a range of AVXBlockSize integers (256 integer) */
|
||||
uint32_t avxmaxbits(const uint32_t * begin);
|
||||
|
||||
/* reads 256 values from "in", writes "bit" 256-bit vectors to "out" */
|
||||
void avxpack(const uint32_t * in,__m256i * out, const uint32_t bit);
|
||||
|
||||
/* reads 256 values from "in", writes "bit" 256-bit vectors to "out" */
|
||||
void avxpackwithoutmask(const uint32_t * in,__m256i * out, const uint32_t bit);
|
||||
|
||||
/* reads "bit" 256-bit vectors from "in", writes 256 values to "out" */
|
||||
void avxunpack(const __m256i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* __AVX2__ */
|
||||
|
||||
#endif /* INCLUDE_AVXBITPACKING_H_ */
|
||||
@@ -1,81 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#ifndef SIMDBITCOMPAT_H_
|
||||
#define SIMDBITCOMPAT_H_
|
||||
|
||||
#include <iso646.h> /* mostly for Microsoft compilers */
|
||||
#include <string.h>
|
||||
|
||||
#if SIMDCOMP_DEBUG
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# define SIMDCOMP_PURE
|
||||
#else
|
||||
# if defined(__GNUC__)
|
||||
# if __GNUC__ >= 3
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline __attribute__((always_inline))
|
||||
# define SIMDCOMP_NEVER_INLINE __attribute__((noinline))
|
||||
# define SIMDCOMP_PURE __attribute__((pure))
|
||||
# else
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# define SIMDCOMP_PURE
|
||||
# endif
|
||||
# elif defined(_MSC_VER)
|
||||
# define SIMDCOMP_ALWAYS_INLINE __forceinline
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# define SIMDCOMP_PURE
|
||||
# else
|
||||
# if __has_attribute(always_inline)
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline __attribute__((always_inline))
|
||||
# else
|
||||
# define SIMDCOMP_ALWAYS_INLINE inline
|
||||
# endif
|
||||
# if __has_attribute(noinline)
|
||||
# define SIMDCOMP_NEVER_INLINE __attribute__((noinline))
|
||||
# else
|
||||
# define SIMDCOMP_NEVER_INLINE
|
||||
# endif
|
||||
# if __has_attribute(pure)
|
||||
# define SIMDCOMP_PURE __attribute__((pure))
|
||||
# else
|
||||
# define SIMDCOMP_PURE
|
||||
# endif
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1600
|
||||
typedef unsigned int uint32_t;
|
||||
typedef unsigned char uint8_t;
|
||||
typedef signed char int8_t;
|
||||
#else
|
||||
#include <stdint.h> /* part of Visual Studio 2010 and better, others likely anyway */
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define SIMDCOMP_ALIGNED(x) __declspec(align(x))
|
||||
#else
|
||||
#if defined(__GNUC__)
|
||||
#define SIMDCOMP_ALIGNED(x) __attribute__ ((aligned(x)))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# include <intrin.h>
|
||||
/* 64-bit needs extending */
|
||||
# define SIMDCOMP_CTZ(result, mask) do { \
|
||||
unsigned long index; \
|
||||
if (!_BitScanForward(&(index), (mask))) { \
|
||||
(result) = 32U; \
|
||||
} else { \
|
||||
(result) = (uint32_t)(index); \
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
# define SIMDCOMP_CTZ(result, mask) \
|
||||
result = __builtin_ctz(mask)
|
||||
#endif
|
||||
|
||||
#endif /* SIMDBITCOMPAT_H_ */
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#ifndef SIMDBITPACKING_H_
|
||||
#define SIMDBITPACKING_H_
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
/* for memset */
|
||||
#include <string.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
|
||||
/***
|
||||
* Please see example.c for various examples on how to make good use
|
||||
* of these functions.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out".
|
||||
* The input values are masked so that only the least significant "bit" bits are used. */
|
||||
void simdpack(const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out".
|
||||
* The input values are assumed to be less than 1<<bit. */
|
||||
void simdpackwithoutmask(const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
/* reads "bit" 128-bit vectors from "in", writes 128 values to "out" */
|
||||
void simdunpack(const __m128i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
|
||||
/* how many compressed bytes are needed to compressed length integers using a bit width of bit with
|
||||
the simdpackFOR_length function. */
|
||||
int simdpack_compressedbytes(int length, const uint32_t bit);
|
||||
|
||||
/* like simdpack, but supports an undetermined number of inputs.
|
||||
* This is useful if you need to unpack an array of integers that is not divisible by 128 integers.
|
||||
* Returns a pointer to the (advanced) compressed array. Compressed data is stored in the memory location between
|
||||
the provided (out) pointer and the returned pointer. */
|
||||
__m128i * simdpack_length(const uint32_t * in, size_t length, __m128i * out, const uint32_t bit);
|
||||
|
||||
/* like simdunpack, but supports an undetermined number of inputs.
|
||||
* This is useful if you need to unpack an array of integers that is not divisible by 128 integers.
|
||||
* Returns a pointer to the (advanced) compressed array. The read compressed data is between the provided
|
||||
(in) pointer and the returned pointer. */
|
||||
const __m128i * simdunpack_length(const __m128i * in, size_t length, uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
|
||||
|
||||
/* like simdpack, but supports an undetermined small number of inputs. This is useful if you need to pack less
|
||||
than 128 integers.
|
||||
* Note that this function is much slower.
|
||||
* Returns a pointer to the (advanced) compressed array. Compressed data is stored in the memory location
|
||||
between the provided (out) pointer and the returned pointer. */
|
||||
__m128i * simdpack_shortlength(const uint32_t * in, int length, __m128i * out, const uint32_t bit);
|
||||
|
||||
/* like simdunpack, but supports an undetermined small number of inputs. This is useful if you need to unpack less
|
||||
than 128 integers.
|
||||
* Note that this function is much slower.
|
||||
* Returns a pointer to the (advanced) compressed array. The read compressed data is between the provided (in)
|
||||
pointer and the returned pointer. */
|
||||
const __m128i * simdunpack_shortlength(const __m128i * in, int length, uint32_t * out, const uint32_t bit);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value" */
|
||||
void simdfastset(__m128i * in128, uint32_t b, uint32_t value, size_t index);
|
||||
|
||||
#endif /* SIMDBITPACKING_H_ */
|
||||
@@ -1,22 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef SIMDCOMP_H_
|
||||
#define SIMDCOMP_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "simdbitpacking.h"
|
||||
#include "simdcomputil.h"
|
||||
#include "simdfor.h"
|
||||
#include "simdintegratedbitpacking.h"
|
||||
#include "avxbitpacking.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,54 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef SIMDCOMPUTIL_H_
|
||||
#define SIMDCOMPUTIL_H_
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
|
||||
|
||||
|
||||
|
||||
/* returns the integer logarithm of v (bit width) */
|
||||
uint32_t bits(const uint32_t v);
|
||||
|
||||
/* max integer logarithm over a range of SIMDBlockSize integers (128 integer) */
|
||||
uint32_t maxbits(const uint32_t * begin);
|
||||
|
||||
/* same as maxbits, but we specify the number of integers */
|
||||
uint32_t maxbits_length(const uint32_t * in,uint32_t length);
|
||||
|
||||
enum{ SIMDBlockSize = 128};
|
||||
|
||||
|
||||
/* computes (quickly) the minimal value of 128 values */
|
||||
uint32_t simdmin(const uint32_t * in);
|
||||
|
||||
/* computes (quickly) the minimal value of the specified number of values */
|
||||
uint32_t simdmin_length(const uint32_t * in, uint32_t length);
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
/* computes (quickly) the minimal and maximal value of the specified number of values */
|
||||
void simdmaxmin_length(const uint32_t * in, uint32_t length, uint32_t * getmin, uint32_t * getmax);
|
||||
|
||||
/* computes (quickly) the minimal and maximal value of the 128 values */
|
||||
void simdmaxmin(const uint32_t * in, uint32_t * getmin, uint32_t * getmax);
|
||||
|
||||
#endif
|
||||
|
||||
/* like maxbit over 128 integers (SIMDBlockSize) with provided initial value
|
||||
and using differential coding */
|
||||
uint32_t simdmaxbitsd1(uint32_t initvalue, const uint32_t * in);
|
||||
|
||||
/* like simdmaxbitsd1, but calculates maxbits over |length| integers
|
||||
with provided initial value. |length| can be any arbitrary value. */
|
||||
uint32_t simdmaxbitsd1_length(uint32_t initvalue, const uint32_t * in,
|
||||
uint32_t length);
|
||||
|
||||
|
||||
|
||||
#endif /* SIMDCOMPUTIL_H_ */
|
||||
@@ -1,72 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#ifndef INCLUDE_SIMDFOR_H_
|
||||
#define INCLUDE_SIMDFOR_H_
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
#include "simdbitpacking.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out" */
|
||||
void simdpackFOR(uint32_t initvalue, const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* reads "bit" 128-bit vectors from "in", writes 128 values to "out" */
|
||||
void simdunpackFOR(uint32_t initvalue, const __m128i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* how many compressed bytes are needed to compressed length integers using a bit width of bit with
|
||||
the simdpackFOR_length function. */
|
||||
int simdpackFOR_compressedbytes(int length, const uint32_t bit);
|
||||
|
||||
/* like simdpackFOR, but supports an undetermined number of inputs.
|
||||
This is useful if you need to pack less than 128 integers. Note that this function is much slower.
|
||||
Compressed data is stored in the memory location between
|
||||
the provided (out) pointer and the returned pointer. */
|
||||
__m128i * simdpackFOR_length(uint32_t initvalue, const uint32_t * in, int length, __m128i * out, const uint32_t bit);
|
||||
|
||||
/* like simdunpackFOR, but supports an undetermined number of inputs.
|
||||
This is useful if you need to unpack less than 128 integers. Note that this function is much slower.
|
||||
The read compressed data is between the provided
|
||||
(in) pointer and the returned pointer. */
|
||||
const __m128i * simdunpackFOR_length(uint32_t initvalue, const __m128i * in, int length, uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* returns the value stored at the specified "slot".
|
||||
* */
|
||||
uint32_t simdselectFOR(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int slot);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value" */
|
||||
void simdfastsetFOR(uint32_t initvalue, __m128i * in, uint32_t bit, uint32_t value, size_t index);
|
||||
|
||||
|
||||
/* searches "bit" 128-bit vectors from "in" (= length<=128 encoded integers) for the first encoded uint32 value
|
||||
* which is >= |key|, and returns its position. It is assumed that the values
|
||||
* stored are in sorted order.
|
||||
* The encoded key is stored in "*presult".
|
||||
* The first length decoded integers, ignoring others. If no value is larger or equal to the key,
|
||||
* length is returned. Length should be no larger than 128.
|
||||
*
|
||||
* If no value is larger or equal to the key,
|
||||
* length is returned */
|
||||
int simdsearchwithlengthFOR(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int length, uint32_t key, uint32_t *presult);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* INCLUDE_SIMDFOR_H_ */
|
||||
@@ -1,98 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#ifndef SIMD_INTEGRATED_BITPACKING_H
|
||||
#define SIMD_INTEGRATED_BITPACKING_H
|
||||
|
||||
#include "portability.h"
|
||||
|
||||
/* SSE2 is required */
|
||||
#include <emmintrin.h>
|
||||
|
||||
#include "simdcomputil.h"
|
||||
#include "simdbitpacking.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out"
|
||||
integer values should be in sorted order (for best results).
|
||||
The differences are masked so that only the least significant "bit" bits are used. */
|
||||
void simdpackd1(uint32_t initvalue, const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* reads 128 values from "in", writes "bit" 128-bit vectors to "out"
|
||||
integer values should be in sorted order (for best results).
|
||||
The difference values are assumed to be less than 1<<bit. */
|
||||
void simdpackwithoutmaskd1(uint32_t initvalue, const uint32_t * in,__m128i * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* reads "bit" 128-bit vectors from "in", writes 128 values to "out" */
|
||||
void simdunpackd1(uint32_t initvalue, const __m128i * in,uint32_t * out, const uint32_t bit);
|
||||
|
||||
|
||||
/* searches "bit" 128-bit vectors from "in" (= 128 encoded integers) for the first encoded uint32 value
|
||||
* which is >= |key|, and returns its position. It is assumed that the values
|
||||
* stored are in sorted order.
|
||||
* The encoded key is stored in "*presult". If no value is larger or equal to the key,
|
||||
* 128 is returned. The pointer initOffset is a pointer to the last four value decoded
|
||||
* (when starting out, this can be a zero vector or initialized with _mm_set1_epi32(init)),
|
||||
* and the vector gets updated.
|
||||
**/
|
||||
int
|
||||
simdsearchd1(__m128i * initOffset, const __m128i *in, uint32_t bit,
|
||||
uint32_t key, uint32_t *presult);
|
||||
|
||||
|
||||
/* searches "bit" 128-bit vectors from "in" (= length<=128 encoded integers) for the first encoded uint32 value
|
||||
* which is >= |key|, and returns its position. It is assumed that the values
|
||||
* stored are in sorted order.
|
||||
* The encoded key is stored in "*presult".
|
||||
* The first length decoded integers, ignoring others. If no value is larger or equal to the key,
|
||||
* length is returned. Length should be no larger than 128.
|
||||
*
|
||||
* If no value is larger or equal to the key,
|
||||
* length is returned */
|
||||
int simdsearchwithlengthd1(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int length, uint32_t key, uint32_t *presult);
|
||||
|
||||
|
||||
|
||||
/* returns the value stored at the specified "slot".
|
||||
* */
|
||||
uint32_t simdselectd1(uint32_t initvalue, const __m128i *in, uint32_t bit,
|
||||
int slot);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value",
|
||||
* you must somehow know the previous value.
|
||||
* Because of differential coding, all following values are incremented by the offset between this new
|
||||
* value and the old value...
|
||||
* This functions is useful if you want to modify the last value.
|
||||
*/
|
||||
void simdfastsetd1fromprevious( __m128i * in, uint32_t bit, uint32_t previousvalue, uint32_t value, size_t index);
|
||||
|
||||
/* given a block of 128 packed values, this function sets the value at index "index" to "value",
|
||||
* This function computes the previous value if needed.
|
||||
* Because of differential coding, all following values are incremented by the offset between this new
|
||||
* value and the old value...
|
||||
* This functions is useful if you want to modify the last value.
|
||||
*/
|
||||
void simdfastsetd1(uint32_t initvalue, __m128i * in, uint32_t bit, uint32_t value, size_t index);
|
||||
|
||||
|
||||
/*Simply scan the data
|
||||
* The pointer initOffset is a pointer to the last four value decoded
|
||||
* (when starting out, this can be a zero vector or initialized with _mm_set1_epi32(init);),
|
||||
* and the vector gets updated.
|
||||
* */
|
||||
|
||||
void
|
||||
simdscand1(__m128i * initOffset, const __m128i *in, uint32_t bit);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,79 +0,0 @@
|
||||
# minimalist makefile
|
||||
.SUFFIXES:
|
||||
#
|
||||
.SUFFIXES: .cpp .o .c .h
|
||||
ifeq ($(DEBUG),1)
|
||||
CFLAGS = -fPIC -std=c89 -ggdb -msse4.1 -march=native -Wall -Wextra -Wshadow -fsanitize=undefined -fno-omit-frame-pointer -fsanitize=address
|
||||
else
|
||||
CFLAGS = -fPIC -std=c89 -O3 -msse4.1 -march=native -Wall -Wextra -Wshadow
|
||||
endif # debug
|
||||
LDFLAGS = -shared
|
||||
LIBNAME=libsimdcomp.so.0.0.3
|
||||
all: unit unit_chars bitpackingbenchmark $(LIBNAME)
|
||||
test:
|
||||
./unit
|
||||
./unit_chars
|
||||
install: $(OBJECTS)
|
||||
cp $(LIBNAME) /usr/local/lib
|
||||
ln -s /usr/local/lib/$(LIBNAME) /usr/local/lib/libsimdcomp.so
|
||||
ldconfig
|
||||
cp $(HEADERS) /usr/local/include
|
||||
|
||||
|
||||
|
||||
HEADERS=./include/simdbitpacking.h ./include/simdcomputil.h ./include/simdintegratedbitpacking.h ./include/simdcomp.h ./include/simdfor.h ./include/avxbitpacking.h
|
||||
|
||||
uninstall:
|
||||
for h in $(HEADERS) ; do rm /usr/local/$$h; done
|
||||
rm /usr/local/lib/$(LIBNAME)
|
||||
rm /usr/local/lib/libsimdcomp.so
|
||||
ldconfig
|
||||
|
||||
|
||||
OBJECTS= simdbitpacking.o simdintegratedbitpacking.o simdcomputil.o \
|
||||
simdpackedsearch.o simdpackedselect.o simdfor.o avxbitpacking.o
|
||||
|
||||
$(LIBNAME): $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o $(LIBNAME) $(OBJECTS) $(LDFLAGS)
|
||||
|
||||
|
||||
avxbitpacking.o: ./src/avxbitpacking.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/avxbitpacking.c -Iinclude
|
||||
|
||||
|
||||
simdfor.o: ./src/simdfor.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdfor.c -Iinclude
|
||||
|
||||
|
||||
simdcomputil.o: ./src/simdcomputil.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdcomputil.c -Iinclude
|
||||
|
||||
simdbitpacking.o: ./src/simdbitpacking.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdbitpacking.c -Iinclude
|
||||
|
||||
simdintegratedbitpacking.o: ./src/simdintegratedbitpacking.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdintegratedbitpacking.c -Iinclude
|
||||
|
||||
simdpackedsearch.o: ./src/simdpackedsearch.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdpackedsearch.c -Iinclude
|
||||
|
||||
simdpackedselect.o: ./src/simdpackedselect.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/simdpackedselect.c -Iinclude
|
||||
|
||||
example: ./example.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o example ./example.c -Iinclude $(OBJECTS)
|
||||
|
||||
unit: ./tests/unit.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o unit ./tests/unit.c -Iinclude $(OBJECTS)
|
||||
|
||||
bitpackingbenchmark: ./benchmarks/bitpackingbenchmark.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o bitpackingbenchmark ./benchmarks/bitpackingbenchmark.c -Iinclude $(OBJECTS)
|
||||
benchmark: ./benchmarks/benchmark.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o benchmark ./benchmarks/benchmark.c -Iinclude $(OBJECTS)
|
||||
dynunit: ./tests/unit.c $(HEADERS) $(LIBNAME)
|
||||
$(CC) $(CFLAGS) -o dynunit ./tests/unit.c -Iinclude -lsimdcomp
|
||||
|
||||
unit_chars: ./tests/unit_chars.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o unit_chars ./tests/unit_chars.c -Iinclude $(OBJECTS)
|
||||
clean:
|
||||
rm -f unit *.o $(LIBNAME) example benchmark bitpackingbenchmark dynunit unit_chars
|
||||
@@ -1,104 +0,0 @@
|
||||
|
||||
!IFNDEF MACHINE
|
||||
!IF "$(PROCESSOR_ARCHITECTURE)"=="AMD64"
|
||||
MACHINE=x64
|
||||
!ELSE
|
||||
MACHINE=x86
|
||||
!ENDIF
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF DEBUG
|
||||
DEBUG=no
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF CC
|
||||
CC=cl.exe
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF AR
|
||||
AR=lib.exe
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF LINK
|
||||
LINK=link.exe
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF PGO
|
||||
PGO=no
|
||||
!ENDIF
|
||||
|
||||
!IFNDEF PGI
|
||||
PGI=no
|
||||
!ENDIF
|
||||
|
||||
INC = /Iinclude
|
||||
|
||||
!IF "$(DEBUG)"=="yes"
|
||||
CFLAGS = /nologo /MDd /LDd /Od /Zi /D_DEBUG /RTC1 /W3 /GS /Gm
|
||||
ARFLAGS = /nologo
|
||||
LDFLAGS = /nologo /debug /nodefaultlib:msvcrt
|
||||
!ELSE
|
||||
CFLAGS = /nologo /MD /O2 /Zi /DNDEBUG /W3 /Gm- /GS /Gy /Oi /GL /MP
|
||||
ARFLAGS = /nologo /LTCG
|
||||
LDFLAGS = /nologo /LTCG /DYNAMICBASE /incremental:no /debug /opt:ref,icf
|
||||
!ENDIF
|
||||
|
||||
!IF "$(PGI)"=="yes"
|
||||
LDFLAGS = $(LDFLAGS) /ltcg:pgi
|
||||
!ENDIF
|
||||
|
||||
!IF "$(PGO)"=="yes"
|
||||
LDFLAGS = $(LDFLAGS) /ltcg:pgo
|
||||
!ENDIF
|
||||
|
||||
LIB_OBJS = simdbitpacking.obj simdintegratedbitpacking.obj simdcomputil.obj \
|
||||
simdpackedsearch.obj simdpackedselect.obj simdfor.obj
|
||||
|
||||
|
||||
all: lib dll dynunit unit_chars example benchmark
|
||||
# need some good use case scenario to train the instrumented build
|
||||
@if "$(PGI)"=="yes" echo Running PGO training
|
||||
@if "$(PGI)"=="yes" benchmark.exe >nul 2>&1
|
||||
@if "$(PGI)"=="yes" example.exe >nul 2>&1
|
||||
|
||||
|
||||
$(LIB_OBJS):
|
||||
$(CC) $(INC) $(CFLAGS) /c src/simdbitpacking.c src/simdintegratedbitpacking.c src/simdcomputil.c \
|
||||
src/simdpackedsearch.c src/simdpackedselect.c src/simdfor.c
|
||||
|
||||
lib: $(LIB_OBJS)
|
||||
$(AR) $(ARFLAGS) /OUT:simdcomp_a.lib $(LIB_OBJS)
|
||||
|
||||
dll: $(LIB_OBJS)
|
||||
$(LINK) /DLL $(LDFLAGS) /OUT:simdcomp.dll /IMPLIB:simdcomp.lib /DEF:simdcomp.def $(LIB_OBJS)
|
||||
|
||||
unit: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c src/unit.c
|
||||
$(LINK) $(LDFLAGS) /OUT:unit.exe unit.obj simdcomp_a.lib
|
||||
|
||||
dynunit: dll
|
||||
$(CC) $(INC) $(CFLAGS) /c src/unit.c
|
||||
$(LINK) $(LDFLAGS) /OUT:unit.exe unit.obj simdcomp.lib
|
||||
|
||||
unit_chars: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c src/unit_chars.c
|
||||
$(LINK) $(LDFLAGS) /OUT:unit_chars.exe unit_chars.obj simdcomp.lib
|
||||
|
||||
|
||||
example: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c example.c
|
||||
$(LINK) $(LDFLAGS) /OUT:example.exe example.obj simdcomp.lib
|
||||
|
||||
benchmark: lib
|
||||
$(CC) $(INC) $(CFLAGS) /c src/benchmark.c
|
||||
$(LINK) $(LDFLAGS) /OUT:benchmark.exe benchmark.obj simdcomp.lib
|
||||
|
||||
clean:
|
||||
del /Q *.obj
|
||||
del /Q *.lib
|
||||
del /Q *.exe
|
||||
del /Q *.dll
|
||||
del /Q *.pgc
|
||||
del /Q *.pgd
|
||||
del /Q *.pdb
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"name": "simdcomp",
|
||||
"version": "0.0.3",
|
||||
"repo": "lemire/simdcomp",
|
||||
"description": "A simple C library for compressing lists of integers",
|
||||
"license": "BSD-3-Clause",
|
||||
"src": [
|
||||
"src/simdbitpacking.c",
|
||||
"src/simdcomputil.c",
|
||||
"src/simdintegratedbitpacking.c",
|
||||
"include/simdbitpacking.h",
|
||||
"include/simdcomp.h",
|
||||
"include/simdcomputil.h",
|
||||
"include/simdintegratedbitpacking.h"
|
||||
]
|
||||
}
|
||||
@@ -1,182 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
def howmany(bit):
|
||||
""" how many values are we going to pack? """
|
||||
return 256
|
||||
|
||||
def howmanywords(bit):
|
||||
return (howmany(bit) * bit + 255)/256
|
||||
|
||||
def howmanybytes(bit):
|
||||
return howmanywords(bit) * 16
|
||||
|
||||
print("""
|
||||
/** code generated by avxpacking.py starts here **/
|
||||
""")
|
||||
|
||||
print("""typedef void (*avxpackblockfnc)(const uint32_t * pin, __m256i * compressed);""")
|
||||
print("""typedef void (*avxunpackblockfnc)(const __m256i * compressed, uint32_t * pout);""")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def plurial(number):
|
||||
if(number <> 1):
|
||||
return "s"
|
||||
else :
|
||||
return ""
|
||||
|
||||
print("")
|
||||
print("static void avxpackblock0(const uint32_t * pin, __m256i * compressed) {");
|
||||
print(" (void)compressed;");
|
||||
print(" (void) pin; /* we consumed {0} 32-bit integer{1} */ ".format(howmany(0),plurial(howmany(0))));
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
for bit in range(1,33):
|
||||
print("")
|
||||
print("/* we are going to pack {0} {1}-bit values, touching {2} 256-bit words, using {3} bytes */ ".format(howmany(bit),bit,howmanywords(bit),howmanybytes(bit)))
|
||||
print("static void avxpackblock{0}(const uint32_t * pin, __m256i * compressed) {{".format(bit));
|
||||
print(" const __m256i * in = (const __m256i *) pin;");
|
||||
print(" /* we are going to touch {0} 256-bit word{1} */ ".format(howmanywords(bit),plurial(howmanywords(bit))));
|
||||
if(howmanywords(bit) == 1):
|
||||
print(" __m256i w0;")
|
||||
else:
|
||||
print(" __m256i w0, w1;")
|
||||
if( (bit & (bit-1)) <> 0) : print(" __m256i tmp; /* used to store inputs at word boundary */")
|
||||
oldword = 0
|
||||
for j in range(howmany(bit)/8):
|
||||
firstword = j * bit / 32
|
||||
if(firstword > oldword):
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(oldword,oldword%2))
|
||||
oldword = firstword
|
||||
secondword = (j * bit + bit - 1)/32
|
||||
firstshift = (j*bit) % 32
|
||||
if( firstword == secondword):
|
||||
if(firstshift == 0):
|
||||
print(" w{0} = _mm256_lddqu_si256 (in + {1});".format(firstword%2,j))
|
||||
else:
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32(_mm256_lddqu_si256 (in + {1}) , {2}));".format(firstword%2,j,firstshift))
|
||||
else:
|
||||
print(" tmp = _mm256_lddqu_si256 (in + {0});".format(j))
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32(tmp , {2}));".format(firstword%2,j,firstshift))
|
||||
secondshift = 32-firstshift
|
||||
print(" w{0} = _mm256_srli_epi32(tmp,{2});".format(secondword%2,j,secondshift))
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(secondword,secondword%2))
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
|
||||
print("")
|
||||
print("static void avxpackblockmask0(const uint32_t * pin, __m256i * compressed) {");
|
||||
print(" (void)compressed;");
|
||||
print(" (void) pin; /* we consumed {0} 32-bit integer{1} */ ".format(howmany(0),plurial(howmany(0))));
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
for bit in range(1,33):
|
||||
print("")
|
||||
print("/* we are going to pack {0} {1}-bit values, touching {2} 256-bit words, using {3} bytes */ ".format(howmany(bit),bit,howmanywords(bit),howmanybytes(bit)))
|
||||
print("static void avxpackblockmask{0}(const uint32_t * pin, __m256i * compressed) {{".format(bit));
|
||||
print(" /* we are going to touch {0} 256-bit word{1} */ ".format(howmanywords(bit),plurial(howmanywords(bit))));
|
||||
if(howmanywords(bit) == 1):
|
||||
print(" __m256i w0;")
|
||||
else:
|
||||
print(" __m256i w0, w1;")
|
||||
print(" const __m256i * in = (const __m256i *) pin;");
|
||||
if(bit < 32): print(" const __m256i mask = _mm256_set1_epi32({0});".format((1<<bit)-1));
|
||||
def maskfnc(x):
|
||||
if(bit == 32): return x
|
||||
return " _mm256_and_si256 ( mask, {0}) ".format(x)
|
||||
if( (bit & (bit-1)) <> 0) : print(" __m256i tmp; /* used to store inputs at word boundary */")
|
||||
oldword = 0
|
||||
for j in range(howmany(bit)/8):
|
||||
firstword = j * bit / 32
|
||||
if(firstword > oldword):
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(oldword,oldword%2))
|
||||
oldword = firstword
|
||||
secondword = (j * bit + bit - 1)/32
|
||||
firstshift = (j*bit) % 32
|
||||
loadstr = maskfnc(" _mm256_lddqu_si256 (in + {0}) ".format(j))
|
||||
if( firstword == secondword):
|
||||
if(firstshift == 0):
|
||||
print(" w{0} = {1};".format(firstword%2,loadstr))
|
||||
else:
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32({1} , {2}));".format(firstword%2,loadstr,firstshift))
|
||||
else:
|
||||
print(" tmp = {0};".format(loadstr))
|
||||
print(" w{0} = _mm256_or_si256(w{0},_mm256_slli_epi32(tmp , {2}));".format(firstword%2,j,firstshift))
|
||||
secondshift = 32-firstshift
|
||||
print(" w{0} = _mm256_srli_epi32(tmp,{2});".format(secondword%2,j,secondshift))
|
||||
print(" _mm256_storeu_si256(compressed + {0}, w{1});".format(secondword,secondword%2))
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
|
||||
print("static void avxunpackblock0(const __m256i * compressed, uint32_t * pout) {");
|
||||
print(" (void) compressed;");
|
||||
print(" memset(pout,0,{0});".format(howmany(0)));
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
for bit in range(1,33):
|
||||
print("")
|
||||
print("/* we packed {0} {1}-bit values, touching {2} 256-bit words, using {3} bytes */ ".format(howmany(bit),bit,howmanywords(bit),howmanybytes(bit)))
|
||||
print("static void avxunpackblock{0}(const __m256i * compressed, uint32_t * pout) {{".format(bit));
|
||||
print(" /* we are going to access {0} 256-bit word{1} */ ".format(howmanywords(bit),plurial(howmanywords(bit))));
|
||||
if(howmanywords(bit) == 1):
|
||||
print(" __m256i w0;")
|
||||
else:
|
||||
print(" __m256i w0, w1;")
|
||||
print(" __m256i * out = (__m256i *) pout;");
|
||||
if(bit < 32): print(" const __m256i mask = _mm256_set1_epi32({0});".format((1<<bit)-1));
|
||||
maskstr = " _mm256_and_si256 ( mask, {0}) "
|
||||
if (bit == 32) : maskstr = " {0} " # no need
|
||||
oldword = 0
|
||||
print(" w0 = _mm256_lddqu_si256 (compressed);")
|
||||
for j in range(howmany(bit)/8):
|
||||
firstword = j * bit / 32
|
||||
secondword = (j * bit + bit - 1)/32
|
||||
if(secondword > oldword):
|
||||
print(" w{0} = _mm256_lddqu_si256 (compressed + {1});".format(secondword%2,secondword))
|
||||
oldword = secondword
|
||||
firstshift = (j*bit) % 32
|
||||
firstshiftstr = "_mm256_srli_epi32( w{0} , "+str(firstshift)+") "
|
||||
if(firstshift == 0):
|
||||
firstshiftstr =" w{0} " # no need
|
||||
wfirst = firstshiftstr.format(firstword%2)
|
||||
if( firstword == secondword):
|
||||
if(firstshift + bit <> 32):
|
||||
wfirst = maskstr.format(wfirst)
|
||||
print(" _mm256_storeu_si256(out + {0}, {1});".format(j,wfirst))
|
||||
else:
|
||||
secondshift = (32-firstshift)
|
||||
wsecond = "_mm256_slli_epi32( w{0} , {1} ) ".format((firstword+1)%2,secondshift)
|
||||
wfirstorsecond = " _mm256_or_si256 ({0},{1}) ".format(wfirst,wsecond)
|
||||
wfirstorsecond = maskstr.format(wfirstorsecond)
|
||||
print(" _mm256_storeu_si256(out + {0},\n {1});".format(j,wfirstorsecond))
|
||||
print("}");
|
||||
print("")
|
||||
|
||||
|
||||
print("static avxpackblockfnc avxfuncPackArr[] = {")
|
||||
for bit in range(0,32):
|
||||
print("&avxpackblock{0},".format(bit))
|
||||
print("&avxpackblock32")
|
||||
print("};")
|
||||
|
||||
print("static avxpackblockfnc avxfuncPackMaskArr[] = {")
|
||||
for bit in range(0,32):
|
||||
print("&avxpackblockmask{0},".format(bit))
|
||||
print("&avxpackblockmask32")
|
||||
print("};")
|
||||
|
||||
|
||||
print("static avxunpackblockfnc avxfuncUnpackArr[] = {")
|
||||
for bit in range(0,32):
|
||||
print("&avxunpackblock{0},".format(bit))
|
||||
print("&avxunpackblock32")
|
||||
print("};")
|
||||
print("/** code generated by avxpacking.py ends here **/")
|
||||
@@ -1,152 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
from math import ceil
|
||||
|
||||
print("""
|
||||
/**
|
||||
* Blablabla
|
||||
*
|
||||
*/
|
||||
|
||||
""");
|
||||
|
||||
def mask(bit):
|
||||
return str((1 << bit) - 1)
|
||||
|
||||
for length in [32]:
|
||||
print("""
|
||||
static __m128i iunpackFOR0(__m128i initOffset, const __m128i * _in , uint32_t * _out) {
|
||||
__m128i *out = (__m128i*)(_out);
|
||||
int i;
|
||||
(void) _in;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
_mm_store_si128(out++, initOffset);
|
||||
_mm_store_si128(out++, initOffset);
|
||||
_mm_store_si128(out++, initOffset);
|
||||
_mm_store_si128(out++, initOffset);
|
||||
}
|
||||
|
||||
return initOffset;
|
||||
}
|
||||
|
||||
""")
|
||||
print("""
|
||||
|
||||
static void ipackFOR0(__m128i initOffset , const uint32_t * _in , __m128i * out ) {
|
||||
(void) initOffset;
|
||||
(void) _in;
|
||||
(void) out;
|
||||
}
|
||||
""")
|
||||
for bit in range(1,33):
|
||||
offsetVar = " initOffset";
|
||||
print("""
|
||||
static void ipackFOR"""+str(bit)+"""(__m128i """+offsetVar+""", const uint32_t * _in, __m128i * out) {
|
||||
const __m128i *in = (const __m128i*)(_in);
|
||||
__m128i OutReg;
|
||||
|
||||
""");
|
||||
|
||||
if (bit != 32):
|
||||
print(" __m128i CurrIn = _mm_load_si128(in);");
|
||||
print(" __m128i InReg = _mm_sub_epi32(CurrIn, initOffset);");
|
||||
else:
|
||||
print(" __m128i InReg = _mm_load_si128(in);");
|
||||
print(" (void) initOffset;");
|
||||
|
||||
|
||||
inwordpointer = 0
|
||||
valuecounter = 0
|
||||
for k in range(ceil((length * bit) / 32)):
|
||||
if(valuecounter == length): break
|
||||
for x in range(inwordpointer,32,bit):
|
||||
if(x!=0) :
|
||||
print(" OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, " + str(x) + "));");
|
||||
else:
|
||||
print(" OutReg = InReg; ");
|
||||
if((x+bit>=32) ):
|
||||
while(inwordpointer<32):
|
||||
inwordpointer += bit
|
||||
print(" _mm_store_si128(out, OutReg);");
|
||||
print("");
|
||||
|
||||
if(valuecounter + 1 < length):
|
||||
print(" ++out;")
|
||||
inwordpointer -= 32;
|
||||
if(inwordpointer>0):
|
||||
print(" OutReg = _mm_srli_epi32(InReg, " + str(bit) + " - " + str(inwordpointer) + ");");
|
||||
if(valuecounter + 1 < length):
|
||||
print(" ++in;")
|
||||
|
||||
if (bit != 32):
|
||||
print(" CurrIn = _mm_load_si128(in);");
|
||||
print(" InReg = _mm_sub_epi32(CurrIn, initOffset);");
|
||||
else:
|
||||
print(" InReg = _mm_load_si128(in);");
|
||||
print("");
|
||||
valuecounter = valuecounter + 1
|
||||
if(valuecounter == length): break
|
||||
assert(valuecounter == length)
|
||||
print("\n}\n\n""")
|
||||
|
||||
for bit in range(1,32):
|
||||
offsetVar = " initOffset";
|
||||
print("""\n
|
||||
static __m128i iunpackFOR"""+str(bit)+"""(__m128i """+offsetVar+""", const __m128i* in, uint32_t * _out) {
|
||||
""");
|
||||
print(""" __m128i* out = (__m128i*)(_out);
|
||||
__m128i InReg = _mm_load_si128(in);
|
||||
__m128i OutReg;
|
||||
__m128i tmp;
|
||||
const __m128i mask = _mm_set1_epi32((1U<<"""+str(bit)+""")-1);
|
||||
|
||||
""");
|
||||
|
||||
MainText = "";
|
||||
|
||||
MainText += "\n";
|
||||
inwordpointer = 0
|
||||
valuecounter = 0
|
||||
for k in range(ceil((length * bit) / 32)):
|
||||
for x in range(inwordpointer,32,bit):
|
||||
if(valuecounter == length): break
|
||||
if (x > 0):
|
||||
MainText += " tmp = _mm_srli_epi32(InReg," + str(x) +");\n";
|
||||
else:
|
||||
MainText += " tmp = InReg;\n";
|
||||
if(x+bit<32):
|
||||
MainText += " OutReg = _mm_and_si128(tmp, mask);\n";
|
||||
else:
|
||||
MainText += " OutReg = tmp;\n";
|
||||
if((x+bit>=32) ):
|
||||
while(inwordpointer<32):
|
||||
inwordpointer += bit
|
||||
if(valuecounter + 1 < length):
|
||||
MainText += " ++in;"
|
||||
MainText += " InReg = _mm_load_si128(in);\n";
|
||||
inwordpointer -= 32;
|
||||
if(inwordpointer>0):
|
||||
MainText += " OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, " + str(bit) + "-" + str(inwordpointer) + "), mask));\n\n";
|
||||
if (bit != 32):
|
||||
MainText += " OutReg = _mm_add_epi32(OutReg, initOffset);\n";
|
||||
MainText += " _mm_store_si128(out++, OutReg);\n\n";
|
||||
MainText += "";
|
||||
valuecounter = valuecounter + 1
|
||||
if(valuecounter == length): break
|
||||
assert(valuecounter == length)
|
||||
print(MainText)
|
||||
print(" return initOffset;");
|
||||
print("\n}\n\n")
|
||||
print("""
|
||||
static __m128i iunpackFOR32(__m128i initvalue , const __m128i* in, uint32_t * _out) {
|
||||
__m128i * mout = (__m128i *)_out;
|
||||
__m128i invec;
|
||||
size_t k;
|
||||
for(k = 0; k < 128/4; ++k) {
|
||||
invec = _mm_load_si128(in++);
|
||||
_mm_store_si128(mout++, invec);
|
||||
}
|
||||
return invec;
|
||||
}
|
||||
""")
|
||||
@@ -1,40 +0,0 @@
|
||||
EXPORTS
|
||||
simdpack
|
||||
simdpackwithoutmask
|
||||
simdunpack
|
||||
bits
|
||||
maxbits
|
||||
maxbits_length
|
||||
simdmin
|
||||
simdmin_length
|
||||
simdmaxmin
|
||||
simdmaxmin_length
|
||||
simdmaxbitsd1
|
||||
simdmaxbitsd1_length
|
||||
simdpackd1
|
||||
simdpackwithoutmaskd1
|
||||
simdunpackd1
|
||||
simdsearchd1
|
||||
simdsearchwithlengthd1
|
||||
simdselectd1
|
||||
simdpackFOR
|
||||
simdselectFOR
|
||||
simdsearchwithlengthFOR
|
||||
simdunpackFOR
|
||||
simdmin_length
|
||||
simdmaxmin
|
||||
simdmaxmin_length
|
||||
simdpack_length
|
||||
simdpackFOR_length
|
||||
simdunpackFOR_length
|
||||
simdpack_shortlength
|
||||
simdfastsetFOR
|
||||
simdfastset
|
||||
simdfastsetd1
|
||||
simdunpack_length
|
||||
simdunpack_shortlength
|
||||
simdsearchwithlengthFOR
|
||||
simdscand1
|
||||
simdfastsetd1fromprevious
|
||||
simdfastsetd1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,234 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
|
||||
#include "simdcomputil.h"
|
||||
#ifdef __SSE4_1__
|
||||
#include <smmintrin.h>
|
||||
#endif
|
||||
#include <assert.h>
|
||||
|
||||
#define Delta(curr, prev) \
|
||||
_mm_sub_epi32(curr, \
|
||||
_mm_or_si128(_mm_slli_si128(curr, 4), _mm_srli_si128(prev, 12)))
|
||||
|
||||
/* returns the integer logarithm of v (bit width) */
|
||||
uint32_t bits(const uint32_t v) {
|
||||
#ifdef _MSC_VER
|
||||
unsigned long answer;
|
||||
if (v == 0) {
|
||||
return 0;
|
||||
}
|
||||
_BitScanReverse(&answer, v);
|
||||
return answer + 1;
|
||||
#else
|
||||
return v == 0 ? 0 : 32 - __builtin_clz(v); /* assume GCC-like compiler if not microsoft */
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
static uint32_t maxbitas32int(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_or_si128(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_or_si128(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
uint32_t ans = _mm_cvtsi128_si32(_tmp2);
|
||||
return bits(ans);
|
||||
}
|
||||
|
||||
SIMDCOMP_PURE uint32_t maxbits(const uint32_t * begin) {
|
||||
const __m128i* pin = (const __m128i*)(begin);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_or_si128(accumulator,newvec);
|
||||
}
|
||||
return maxbitas32int(accumulator);
|
||||
}
|
||||
static uint32_t orasint(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_or_si128(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_or_si128(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
return _mm_cvtsi128_si32(_tmp2);
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
|
||||
static uint32_t minasint(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_min_epu32(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_min_epu32(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
return _mm_cvtsi128_si32(_tmp2);
|
||||
}
|
||||
|
||||
static uint32_t maxasint(const __m128i accumulator) {
|
||||
const __m128i _tmp1 = _mm_max_epu32(_mm_srli_si128(accumulator, 8), accumulator); /* (A,B,C,D) xor (0,0,A,B) = (A,B,C xor A,D xor B)*/
|
||||
const __m128i _tmp2 = _mm_max_epu32(_mm_srli_si128(_tmp1, 4), _tmp1); /* (A,B,C xor A,D xor B) xor (0,0,0,C xor A)*/
|
||||
return _mm_cvtsi128_si32(_tmp2);
|
||||
}
|
||||
|
||||
uint32_t simdmin(const uint32_t * in) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_min_epu32(accumulator,newvec);
|
||||
}
|
||||
return minasint(accumulator);
|
||||
}
|
||||
|
||||
void simdmaxmin(const uint32_t * in, uint32_t * getmin, uint32_t * getmax) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i minaccumulator = _mm_loadu_si128(pin);
|
||||
__m128i maxaccumulator = minaccumulator;
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
minaccumulator = _mm_min_epu32(minaccumulator,newvec);
|
||||
maxaccumulator = _mm_max_epu32(maxaccumulator,newvec);
|
||||
}
|
||||
*getmin = minasint(minaccumulator);
|
||||
*getmax = maxasint(maxaccumulator);
|
||||
}
|
||||
|
||||
|
||||
uint32_t simdmin_length(const uint32_t * in, uint32_t length) {
|
||||
uint32_t currentmin = 0xFFFFFFFF;
|
||||
uint32_t lengthdividedby4 = length / 4;
|
||||
uint32_t offset = lengthdividedby4 * 4;
|
||||
uint32_t k;
|
||||
if (lengthdividedby4 > 0) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
k = 1;
|
||||
for(; 4*k < lengthdividedby4 * 4; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_min_epu32(accumulator,newvec);
|
||||
}
|
||||
currentmin = minasint(accumulator);
|
||||
}
|
||||
for (k = offset; k < length; ++k)
|
||||
if (in[k] < currentmin)
|
||||
currentmin = in[k];
|
||||
return currentmin;
|
||||
}
|
||||
|
||||
void simdmaxmin_length(const uint32_t * in, uint32_t length, uint32_t * getmin, uint32_t * getmax) {
|
||||
uint32_t lengthdividedby4 = length / 4;
|
||||
uint32_t offset = lengthdividedby4 * 4;
|
||||
uint32_t k;
|
||||
*getmin = 0xFFFFFFFF;
|
||||
*getmax = 0;
|
||||
if (lengthdividedby4 > 0) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i minaccumulator = _mm_loadu_si128(pin);
|
||||
__m128i maxaccumulator = minaccumulator;
|
||||
k = 1;
|
||||
for(; 4*k < lengthdividedby4 * 4; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
minaccumulator = _mm_min_epu32(minaccumulator,newvec);
|
||||
maxaccumulator = _mm_max_epu32(maxaccumulator,newvec);
|
||||
}
|
||||
*getmin = minasint(minaccumulator);
|
||||
*getmax = maxasint(maxaccumulator);
|
||||
}
|
||||
for (k = offset; k < length; ++k) {
|
||||
if (in[k] < *getmin)
|
||||
*getmin = in[k];
|
||||
if (in[k] > *getmax)
|
||||
*getmax = in[k];
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
SIMDCOMP_PURE uint32_t maxbits_length(const uint32_t * in,uint32_t length) {
|
||||
uint32_t k;
|
||||
uint32_t lengthdividedby4 = length / 4;
|
||||
uint32_t offset = lengthdividedby4 * 4;
|
||||
uint32_t bigxor = 0;
|
||||
if(lengthdividedby4 > 0) {
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i accumulator = _mm_loadu_si128(pin);
|
||||
k = 1;
|
||||
for(; 4*k < 4*lengthdividedby4; ++k) {
|
||||
__m128i newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_or_si128(accumulator,newvec);
|
||||
}
|
||||
bigxor = orasint(accumulator);
|
||||
}
|
||||
for(k = offset; k < length; ++k)
|
||||
bigxor |= in[k];
|
||||
return bits(bigxor);
|
||||
}
|
||||
|
||||
|
||||
/* maxbit over 128 integers (SIMDBlockSize) with provided initial value */
|
||||
uint32_t simdmaxbitsd1(uint32_t initvalue, const uint32_t * in) {
|
||||
__m128i initoffset = _mm_set1_epi32 (initvalue);
|
||||
const __m128i* pin = (const __m128i*)(in);
|
||||
__m128i newvec = _mm_loadu_si128(pin);
|
||||
__m128i accumulator = Delta(newvec , initoffset);
|
||||
__m128i oldvec = newvec;
|
||||
uint32_t k = 1;
|
||||
for(; 4*k < SIMDBlockSize; ++k) {
|
||||
newvec = _mm_loadu_si128(pin+k);
|
||||
accumulator = _mm_or_si128(accumulator,Delta(newvec , oldvec));
|
||||
oldvec = newvec;
|
||||
}
|
||||
initoffset = oldvec;
|
||||
return maxbitas32int(accumulator);
|
||||
}
|
||||
|
||||
|
||||
/* maxbit over |length| integers with provided initial value */
|
||||
uint32_t simdmaxbitsd1_length(uint32_t initvalue, const uint32_t * in,
|
||||
uint32_t length) {
|
||||
__m128i newvec;
|
||||
__m128i oldvec;
|
||||
__m128i initoffset;
|
||||
__m128i accumulator;
|
||||
const __m128i *pin;
|
||||
uint32_t tmparray[4];
|
||||
uint32_t k = 1;
|
||||
uint32_t acc;
|
||||
|
||||
assert(length > 0);
|
||||
|
||||
pin = (const __m128i *)(in);
|
||||
initoffset = _mm_set1_epi32(initvalue);
|
||||
switch (length) {
|
||||
case 1:
|
||||
newvec = _mm_set1_epi32(in[0]);
|
||||
break;
|
||||
case 2:
|
||||
newvec = _mm_setr_epi32(in[0], in[1], in[1], in[1]);
|
||||
break;
|
||||
case 3:
|
||||
newvec = _mm_setr_epi32(in[0], in[1], in[2], in[2]);
|
||||
break;
|
||||
default:
|
||||
newvec = _mm_loadu_si128(pin);
|
||||
break;
|
||||
}
|
||||
accumulator = Delta(newvec, initoffset);
|
||||
oldvec = newvec;
|
||||
|
||||
/* process 4 integers and build an accumulator */
|
||||
while (k * 4 + 4 <= length) {
|
||||
newvec = _mm_loadu_si128(pin + k);
|
||||
accumulator = _mm_or_si128(accumulator, Delta(newvec, oldvec));
|
||||
oldvec = newvec;
|
||||
k++;
|
||||
}
|
||||
|
||||
/* extract the accumulator as an integer */
|
||||
_mm_storeu_si128((__m128i *)(tmparray), accumulator);
|
||||
acc = tmparray[0] | tmparray[1] | tmparray[2] | tmparray[3];
|
||||
|
||||
/* now process the remaining integers */
|
||||
for (k *= 4; k < length; k++)
|
||||
acc |= in[k] - (k == 0 ? initvalue : in[k - 1]);
|
||||
|
||||
/* return the number of bits */
|
||||
return bits(acc);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,900 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "simdcomp.h"
|
||||
|
||||
|
||||
|
||||
int testshortpack() {
|
||||
int bit;
|
||||
size_t i;
|
||||
size_t length;
|
||||
__m128i * bb;
|
||||
srand(0);
|
||||
printf("testshortpack\n");
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((1 << bit) - 1);
|
||||
}
|
||||
for (length = 0; length <= N; ++length) {
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
bb = simdpack_shortlength(data, length, (__m128i *) buffer,
|
||||
bit);
|
||||
if((bb - (__m128i *) buffer) * sizeof(__m128i) != (unsigned) simdpack_compressedbytes(length,bit)) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
simdunpack_shortlength((__m128i *) buffer, length,
|
||||
backdata, bit);
|
||||
for (i = 0; i < length; ++i) {
|
||||
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int testlongpack() {
|
||||
int bit;
|
||||
size_t i;
|
||||
size_t length;
|
||||
__m128i * bb;
|
||||
srand(0);
|
||||
printf("testlongpack\n");
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
const size_t N = 2048;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((1 << bit) - 1);
|
||||
}
|
||||
for (length = 0; length <= N; ++length) {
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
bb = simdpack_length(data, length, (__m128i *) buffer,
|
||||
bit);
|
||||
if((bb - (__m128i *) buffer) * sizeof(__m128i) != (unsigned) simdpack_compressedbytes(length,bit)) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
simdunpack_length((__m128i *) buffer, length,
|
||||
backdata, bit);
|
||||
for (i = 0; i < length; ++i) {
|
||||
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int testset() {
|
||||
int bit;
|
||||
size_t i;
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
srand(0);
|
||||
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf("simple set %d \n",bit);
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((1 << bit) - 1);
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
simdpack(data, (__m128i *) buffer, bit);
|
||||
simdunpack((__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
for(i = N ; i > 0; i--) {
|
||||
simdfastset((__m128i *) buffer, bit, data[N - i], i - 1);
|
||||
}
|
||||
simdunpack((__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[N - i - 1]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
simdpack(data, (__m128i *) buffer, bit);
|
||||
for(i = 1 ; i <= N; i++) {
|
||||
simdfastset((__m128i *) buffer, bit, data[i - 1], i - 1);
|
||||
}
|
||||
simdunpack((__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
|
||||
int testsetd1() {
|
||||
int bit;
|
||||
size_t i;
|
||||
uint32_t newvalue;
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * datazeroes = malloc(N * sizeof(uint32_t));
|
||||
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
srand(0);
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf("simple set d1 %d \n",bit);
|
||||
data[0] = rand() & ((1 << bit) - 1);
|
||||
datazeroes[0] = 0;
|
||||
|
||||
for (i = 1; i < N; ++i) {
|
||||
data[i] = data[i - 1] + (rand() & ((1 << bit) - 1));
|
||||
datazeroes[i] = 0;
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
simdpackd1(0,datazeroes, (__m128i *) buffer, bit);
|
||||
for(i = 1 ; i <= N; i++) {
|
||||
simdfastsetd1(0,(__m128i *) buffer, bit, data[i - 1], i - 1);
|
||||
newvalue = simdselectd1(0, (const __m128i *) buffer, bit,i - 1);
|
||||
if( newvalue != data[i-1] ) {
|
||||
printf("bad set-select\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
simdunpackd1(0,(__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i])
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
free(datazeroes);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int testsetFOR() {
|
||||
int bit;
|
||||
size_t i;
|
||||
uint32_t newvalue;
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * datazeroes = malloc(N * sizeof(uint32_t));
|
||||
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
srand(0);
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf("simple set FOR %d \n",bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = (rand() & ((1 << bit) - 1));
|
||||
datazeroes[i] = 0;
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
simdpackFOR(0,datazeroes, (__m128i *) buffer, bit);
|
||||
for(i = 1 ; i <= N; i++) {
|
||||
simdfastsetFOR(0,(__m128i *) buffer, bit, data[i - 1], i - 1);
|
||||
newvalue = simdselectFOR(0, (const __m128i *) buffer, bit,i - 1);
|
||||
if( newvalue != data[i-1] ) {
|
||||
printf("bad set-select\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
simdunpackFOR(0,(__m128i *) buffer, backdata, bit);
|
||||
for (i = 0; i < N; ++i) {
|
||||
if (data[i] != backdata[i])
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
free(datazeroes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int testshortFORpack() {
|
||||
int bit;
|
||||
size_t i;
|
||||
__m128i * rb;
|
||||
size_t length;
|
||||
uint32_t offset = 7;
|
||||
srand(0);
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
const size_t N = 128;
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * buffer = malloc((2 * N + 1024) * sizeof(uint32_t));
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = (rand() & ((1 << bit) - 1)) + offset;
|
||||
}
|
||||
for (length = 0; length <= N; ++length) {
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
rb = simdpackFOR_length(offset,data, length, (__m128i *) buffer,
|
||||
bit);
|
||||
if(((rb - (__m128i *) buffer)*sizeof(__m128i)) != (unsigned) simdpackFOR_compressedbytes(length,bit)) {
|
||||
return -1;
|
||||
}
|
||||
simdunpackFOR_length(offset,(__m128i *) buffer, length,
|
||||
backdata, bit);
|
||||
for (i = 0; i < length; ++i) {
|
||||
|
||||
if (data[i] != backdata[i])
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifdef __AVX2__
|
||||
|
||||
int testbabyavx() {
|
||||
int bit;
|
||||
int trial;
|
||||
unsigned int i,j;
|
||||
const size_t N = AVXBlockSize;
|
||||
srand(0);
|
||||
printf("testbabyavx\n");
|
||||
printf("bit = ");
|
||||
for (bit = 0; bit < 32; ++bit) {
|
||||
printf(" %d ",bit);
|
||||
fflush(stdout);
|
||||
for(trial = 0; trial < 100; ++trial) {
|
||||
uint32_t * data = malloc(N * sizeof(uint32_t)+ 64 * sizeof(uint32_t));
|
||||
uint32_t * backdata = malloc(N * sizeof(uint32_t) + 64 * sizeof(uint32_t) );
|
||||
__m256i * buffer = malloc((2 * N + 1024) * sizeof(uint32_t) + 32);
|
||||
|
||||
for (i = 0; i < N; ++i) {
|
||||
data[i] = rand() & ((uint32_t)(1 << bit) - 1);
|
||||
}
|
||||
for (i = 0; i < N; ++i) {
|
||||
backdata[i] = 0;
|
||||
}
|
||||
if(avxmaxbits(data) != maxbits_length(data,N)) {
|
||||
printf("avxmaxbits is buggy\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
avxpackwithoutmask(data, buffer, bit);
|
||||
avxunpack(buffer, backdata, bit);
|
||||
for (i = 0; i < AVXBlockSize; ++i) {
|
||||
if (data[i] != backdata[i]) {
|
||||
printf("bug\n");
|
||||
for (j = 0; j < N; ++j) {
|
||||
if (data[j] != backdata[j]) {
|
||||
printf("data[%d]=%d v.s. backdata[%d]=%d\n",j,data[j],j,backdata[j]);
|
||||
} else {
|
||||
printf("data[%d]=%d\n",j,data[j]);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
free(backdata);
|
||||
free(buffer);
|
||||
}
|
||||
}
|
||||
printf("\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int testavx2() {
|
||||
int N = 5000 * AVXBlockSize, gap;
|
||||
__m256i * buffer = malloc(AVXBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(AVXBlockSize * sizeof(uint32_t));
|
||||
for (gap = 1; gap <= 387420489; gap *= 3) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
for (k = 0; k < N; ++k)
|
||||
datain[k] = k * gap;
|
||||
for (k = 0; k * AVXBlockSize < N; ++k) {
|
||||
/*
|
||||
First part works for general arrays (sorted or unsorted)
|
||||
*/
|
||||
int j;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b = avxmaxbits(datain + k * AVXBlockSize);
|
||||
if(avxmaxbits(datain + k * AVXBlockSize) != maxbits_length(datain + k * AVXBlockSize,AVXBlockSize)) {
|
||||
printf("avxmaxbits is buggy %d %d \n",
|
||||
avxmaxbits(datain + k * AVXBlockSize),
|
||||
maxbits_length(datain + k * AVXBlockSize,AVXBlockSize));
|
||||
return -1;
|
||||
}
|
||||
printf("bit width = %d\n",b);
|
||||
|
||||
|
||||
/* we read 256 integers at "datain + k * AVXBlockSize" and
|
||||
write b 256-bit vectors at "buffer" */
|
||||
avxpackwithoutmask(datain + k * AVXBlockSize, buffer, b);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
avxunpack(buffer, backbuffer, b);/* uncompressed */
|
||||
for (j = 0; j < AVXBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * AVXBlockSize + j]) {
|
||||
int i;
|
||||
printf("bug in avxpack\n");
|
||||
for(i = 0; i < AVXBlockSize; ++i) {
|
||||
printf("data[%d]=%d got back %d %s\n",i,
|
||||
datain[k * AVXBlockSize + i],backbuffer[i],
|
||||
datain[k * AVXBlockSize + i]!=backbuffer[i]?"bug":"");
|
||||
}
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
#endif /* avx2 */
|
||||
|
||||
int test() {
|
||||
int N = 5000 * SIMDBlockSize, gap;
|
||||
__m128i * buffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
for (gap = 1; gap <= 387420489; gap *= 3) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
for (k = 0; k < N; ++k)
|
||||
datain[k] = k * gap;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
/*
|
||||
First part works for general arrays (sorted or unsorted)
|
||||
*/
|
||||
int j;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b = maxbits(datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmask(datain + k * SIMDBlockSize, buffer, b);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpack(buffer, backbuffer, b);/* uncompressed */
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdpack\n");
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
/*
|
||||
next part assumes that the data is sorted (uses differential coding)
|
||||
*/
|
||||
uint32_t offset = 0;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b1 = simdmaxbitsd1(offset,
|
||||
datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b1 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmaskd1(offset, datain + k * SIMDBlockSize, buffer,
|
||||
b1);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpackd1(offset, buffer, backbuffer, b1);
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdpack d1\n");
|
||||
return -3;
|
||||
}
|
||||
}
|
||||
offset = datain[k * SIMDBlockSize + SIMDBlockSize - 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
int testFOR() {
|
||||
int N = 5000 * SIMDBlockSize, gap;
|
||||
__m128i * buffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t tmax, tmin, tb;
|
||||
for (gap = 1; gap <= 387420489; gap *= 2) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
for (k = 0; k < N; ++k)
|
||||
datain[k] = k * gap;
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
int j;
|
||||
simdmaxmin_length(datain + k * SIMDBlockSize,SIMDBlockSize,&tmin,&tmax);
|
||||
/* we compute the bit width */
|
||||
tb = bits(tmax - tmin);
|
||||
|
||||
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b 128-bit vectors at "buffer" */
|
||||
simdpackFOR(tmin,datain + k * SIMDBlockSize, buffer, tb);
|
||||
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
uint32_t selectedvalue = simdselectFOR(tmin,buffer,tb,j);
|
||||
if (selectedvalue != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdselectFOR\n");
|
||||
return -3;
|
||||
}
|
||||
}
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpackFOR(tmin,buffer, backbuffer, tb);/* uncompressed */
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
if (backbuffer[j] != datain[k * SIMDBlockSize + j]) {
|
||||
printf("bug in simdpackFOR\n");
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define MAX 300
|
||||
int test_simdmaxbitsd1_length() {
|
||||
uint32_t result, buffer[MAX + 1];
|
||||
int i, j;
|
||||
|
||||
memset(&buffer[0], 0xff, sizeof(buffer));
|
||||
|
||||
/* this test creates buffers of different length; each buffer is
|
||||
* initialized to result in the following deltas:
|
||||
* length 1: 2
|
||||
* length 2: 1 2
|
||||
* length 3: 1 1 2
|
||||
* length 4: 1 1 1 2
|
||||
* length 5: 1 1 1 1 2
|
||||
* etc. Each sequence's "maxbits" is 2. */
|
||||
for (i = 0; i < MAX; i++) {
|
||||
for (j = 0; j < i; j++)
|
||||
buffer[j] = j + 1;
|
||||
buffer[i] = i + 2;
|
||||
|
||||
result = simdmaxbitsd1_length(0, &buffer[0], i + 1);
|
||||
if (result != 2) {
|
||||
printf("simdmaxbitsd1_length: unexpected result %u in loop %d\n",
|
||||
result, i);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
printf("simdmaxbitsd1_length: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int uint32_cmp(const void *a, const void *b)
|
||||
{
|
||||
const uint32_t *ia = (const uint32_t *)a;
|
||||
const uint32_t *ib = (const uint32_t *)b;
|
||||
if(*ia < *ib)
|
||||
return -1;
|
||||
else if (*ia > *ib)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
int test_simdpackedsearch() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t result = 0;
|
||||
int b, i;
|
||||
uint32_t init = 0;
|
||||
__m128i initial = _mm_set1_epi32(init);
|
||||
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++)
|
||||
buffer[i] = (uint32_t)(i + 1);
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 1; b <= 32; b++) {
|
||||
uint32_t out[128];
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(init, buffer, (__m128i *)out, b);
|
||||
initial = _mm_setzero_si128();
|
||||
printf("simdsearchd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
initial = _mm_set1_epi32(init);
|
||||
assert(simdsearchd1(&initial, (__m128i *)out, b, 0, &result) == 0);
|
||||
assert(result > 0);
|
||||
|
||||
for (i = 1; i <= 128; i++) {
|
||||
initial = _mm_set1_epi32(init);
|
||||
assert(simdsearchd1(&initial, (__m128i *)out, b,
|
||||
(uint32_t)i, &result) == i - 1);
|
||||
assert(result == (unsigned)i);
|
||||
}
|
||||
initial = _mm_set1_epi32(init);
|
||||
assert(simdsearchd1(&initial, (__m128i *)out, b, 200, &result)
|
||||
== 128);
|
||||
assert(result > 200);
|
||||
}
|
||||
printf("simdsearchd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedsearchFOR() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t result = 0;
|
||||
int b;
|
||||
uint32_t i;
|
||||
uint32_t maxv, tmin, tmax, tb;
|
||||
uint32_t out[128];
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 1; b <= 32; b++) {
|
||||
/* initialize the buffer */
|
||||
maxv = (b == 32)
|
||||
? 0xFFFFFFFF
|
||||
: ((1U<<b) - 1);
|
||||
for (i = 0; i < 128; i++)
|
||||
buffer[i] = maxv * (i + 1) / 128;
|
||||
simdmaxmin_length(buffer,SIMDBlockSize,&tmin,&tmax);
|
||||
/* we compute the bit width */
|
||||
tb = bits(tmax - tmin);
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackFOR(tmin, buffer, (__m128i *)out, tb);
|
||||
printf("simdsearchd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(buffer[i] == simdselectFOR(tmin, (__m128i *)out, tb,i));
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
int x = simdsearchwithlengthFOR(tmin, (__m128i *)out, tb,
|
||||
128,buffer[i], &result) ;
|
||||
assert(simdselectFOR(tmin, (__m128i *)out, tb,x) == buffer[x]);
|
||||
assert(simdselectFOR(tmin, (__m128i *)out, tb,x) == result);
|
||||
assert(buffer[x] == result);
|
||||
assert(result == buffer[i]);
|
||||
assert(buffer[x] == buffer[i]);
|
||||
}
|
||||
}
|
||||
printf("simdsearchFOR: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedsearch_advanced() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t backbuffer[128];
|
||||
uint32_t out[128];
|
||||
uint32_t result = 0;
|
||||
uint32_t b, i;
|
||||
uint32_t init = 0;
|
||||
__m128i initial = _mm_set1_epi32(init);
|
||||
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = init;
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)(1431655765 * i + 0xFFFFFFFF)) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
|
||||
qsort(buffer,128, sizeof(uint32_t), uint32_cmp);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(init, buffer)<=b);
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(init, buffer, (__m128i *)out, b);
|
||||
simdunpackd1(init, (__m128i *)out, backbuffer, b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(buffer[i] == backbuffer[i]);
|
||||
}
|
||||
|
||||
printf("advanced simdsearchd1: %d bits\n", b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
int pos;
|
||||
initial = _mm_set1_epi32(init);
|
||||
pos = simdsearchd1(&initial, (__m128i *)out, b,
|
||||
buffer[i], &result);
|
||||
assert(pos == simdsearchwithlengthd1(init, (__m128i *)out, b, 128,
|
||||
buffer[i], &result));
|
||||
assert(buffer[pos] == buffer[i]);
|
||||
if(pos > 0)
|
||||
assert(buffer[pos - 1] < buffer[i]);
|
||||
assert(result == buffer[i]);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
int pos;
|
||||
if(buffer[i] == 0) continue;
|
||||
initial = _mm_set1_epi32(init);
|
||||
pos = simdsearchd1(&initial, (__m128i *)out, b,
|
||||
buffer[i] - 1, &result);
|
||||
assert(pos == simdsearchwithlengthd1(init, (__m128i *)out, b, 128,
|
||||
buffer[i] - 1, &result));
|
||||
assert(buffer[pos] >= buffer[i] - 1);
|
||||
if(pos > 0)
|
||||
assert(buffer[pos - 1] < buffer[i] - 1);
|
||||
assert(result == buffer[pos]);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
int pos;
|
||||
if (buffer[i] + 1 == 0)
|
||||
continue;
|
||||
initial = _mm_set1_epi32(init);
|
||||
pos = simdsearchd1(&initial, (__m128i *) out, b,
|
||||
buffer[i] + 1, &result);
|
||||
assert(pos == simdsearchwithlengthd1(init, (__m128i *)out, b, 128,
|
||||
buffer[i] + 1, &result));
|
||||
if(pos == 128) {
|
||||
assert(buffer[i] == buffer[127]);
|
||||
} else {
|
||||
assert(buffer[pos] >= buffer[i] + 1);
|
||||
if (pos > 0)
|
||||
assert(buffer[pos - 1] < buffer[i] + 1);
|
||||
assert(result == buffer[pos]);
|
||||
}
|
||||
}
|
||||
}
|
||||
printf("advanced simdsearchd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedselect() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t initial = 33;
|
||||
int b, i;
|
||||
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++)
|
||||
buffer[i] = (uint32_t)(initial + i);
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 1; b <= 32; b++) {
|
||||
uint32_t out[128];
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
|
||||
printf("simdselectd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
for (i = 0; i < 128; i++) {
|
||||
assert(simdselectd1(initial, (__m128i *)out, b, (uint32_t)i)
|
||||
== initial + i);
|
||||
}
|
||||
}
|
||||
printf("simdselectd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test_simdpackedselect_advanced() {
|
||||
uint32_t buffer[128];
|
||||
uint32_t initial = 33;
|
||||
uint32_t b;
|
||||
int i;
|
||||
|
||||
/* this test creates delta encoded buffers with different bits, then
|
||||
* performs lower bound searches for each key */
|
||||
for (b = 0; b <= 32; b++) {
|
||||
uint32_t prev = initial;
|
||||
uint32_t out[128];
|
||||
/* initialize the buffer */
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = ((uint32_t)(165576 * i)) ;
|
||||
if(b < 32) buffer[i] %= (1<<b);
|
||||
}
|
||||
for (i = 0; i < 128; i++) {
|
||||
buffer[i] = buffer[i] + prev;
|
||||
prev = buffer[i];
|
||||
}
|
||||
|
||||
for (i = 1; i < 128; i++) {
|
||||
if(buffer[i] < buffer[i-1] )
|
||||
buffer[i] = buffer[i-1];
|
||||
}
|
||||
assert(simdmaxbitsd1(initial, buffer)<=b);
|
||||
|
||||
for (i = 0; i < 128; i++) {
|
||||
out[i] = 0; /* memset would do too */
|
||||
}
|
||||
|
||||
/* delta-encode to 'i' bits */
|
||||
simdpackwithoutmaskd1(initial, buffer, (__m128i *)out, b);
|
||||
|
||||
printf("simdselectd1: %d bits\n", b);
|
||||
|
||||
/* now perform the searches */
|
||||
for (i = 0; i < 128; i++) {
|
||||
uint32_t valretrieved = simdselectd1(initial, (__m128i *)out, b, (uint32_t)i);
|
||||
assert(valretrieved == buffer[i]);
|
||||
}
|
||||
}
|
||||
printf("advanced simdselectd1: ok\n");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
int main() {
|
||||
int r;
|
||||
r = testsetFOR();
|
||||
if (r) {
|
||||
printf("test failure 1\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
#ifdef __SSE4_1__
|
||||
r = testsetd1();
|
||||
if (r) {
|
||||
printf("test failure 2\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
r = testset();
|
||||
if (r) {
|
||||
printf("test failure 3\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = testshortFORpack();
|
||||
if (r) {
|
||||
printf("test failure 4\n");
|
||||
return r;
|
||||
}
|
||||
r = testshortpack();
|
||||
if (r) {
|
||||
printf("test failure 5\n");
|
||||
return r;
|
||||
}
|
||||
r = testlongpack();
|
||||
if (r) {
|
||||
printf("test failure 6\n");
|
||||
return r;
|
||||
}
|
||||
#ifdef __SSE4_1__
|
||||
r = test_simdpackedsearchFOR();
|
||||
if (r) {
|
||||
printf("test failure 7\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = testFOR();
|
||||
if (r) {
|
||||
printf("test failure 8\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
#ifdef __AVX2__
|
||||
r= testbabyavx();
|
||||
if (r) {
|
||||
printf("test failure baby avx\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = testavx2();
|
||||
if (r) {
|
||||
printf("test failure 9 avx\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
r = test();
|
||||
if (r) {
|
||||
printf("test failure 9\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdmaxbitsd1_length();
|
||||
if (r) {
|
||||
printf("test failure 10\n");
|
||||
return r;
|
||||
}
|
||||
#ifdef __SSE4_1__
|
||||
r = test_simdpackedsearch();
|
||||
if (r) {
|
||||
printf("test failure 11\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdpackedsearch_advanced();
|
||||
if (r) {
|
||||
printf("test failure 12\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdpackedselect();
|
||||
if (r) {
|
||||
printf("test failure 13\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = test_simdpackedselect_advanced();
|
||||
if (r) {
|
||||
printf("test failure 14\n");
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
printf("All tests OK!\n");
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
/**
|
||||
* This code is released under a BSD License.
|
||||
*/
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include "simdcomp.h"
|
||||
|
||||
|
||||
#define get_random_char() (uint8_t)(rand() % 256);
|
||||
|
||||
|
||||
int main() {
|
||||
int N = 5000 * SIMDBlockSize, gap;
|
||||
__m128i * buffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * backbuffer = malloc(SIMDBlockSize * sizeof(uint32_t));
|
||||
|
||||
srand(time(NULL));
|
||||
|
||||
for (gap = 1; gap <= 387420489; gap *= 3) {
|
||||
int k;
|
||||
printf(" gap = %u \n", gap);
|
||||
|
||||
/* simulate some random character string, don't care about endiannes */
|
||||
for (k = 0; k < N; ++k) {
|
||||
uint8_t _tmp[4];
|
||||
|
||||
_tmp[0] = get_random_char();
|
||||
_tmp[1] = get_random_char();
|
||||
_tmp[2] = get_random_char();
|
||||
_tmp[3] = get_random_char();
|
||||
|
||||
memmove(&datain[k], _tmp, 4);
|
||||
}
|
||||
for (k = 0; k * SIMDBlockSize < N; ++k) {
|
||||
/*
|
||||
First part works for general arrays (sorted or unsorted)
|
||||
*/
|
||||
int j;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b = maxbits(datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmask(datain + k * SIMDBlockSize, buffer, b);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpack(buffer, backbuffer, b);/* uncompressed */
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
uint8_t chars_back[4];
|
||||
uint8_t chars_in[4];
|
||||
|
||||
memmove(chars_back, &backbuffer[j], 4);
|
||||
memmove(chars_in, &datain[k * SIMDBlockSize + j], 4);
|
||||
|
||||
if (chars_in[0] != chars_back[0]
|
||||
|| chars_in[1] != chars_back[1]
|
||||
|| chars_in[2] != chars_back[2]
|
||||
|| chars_in[3] != chars_back[3]) {
|
||||
printf("bug in simdpack\n");
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
/*
|
||||
next part assumes that the data is sorted (uses differential coding)
|
||||
*/
|
||||
uint32_t offset = 0;
|
||||
/* we compute the bit width */
|
||||
const uint32_t b1 = simdmaxbitsd1(offset,
|
||||
datain + k * SIMDBlockSize);
|
||||
/* we read 128 integers at "datain + k * SIMDBlockSize" and
|
||||
write b1 128-bit vectors at "buffer" */
|
||||
simdpackwithoutmaskd1(offset, datain + k * SIMDBlockSize, buffer,
|
||||
b1);
|
||||
/* we read back b1 128-bit vectors at "buffer" and write 128 integers at backbuffer */
|
||||
simdunpackd1(offset, buffer, backbuffer, b1);
|
||||
for (j = 0; j < SIMDBlockSize; ++j) {
|
||||
uint8_t chars_back[4];
|
||||
uint8_t chars_in[4];
|
||||
|
||||
memmove(chars_back, &backbuffer[j], 4);
|
||||
memmove(chars_in, &datain[k * SIMDBlockSize + j], 4);
|
||||
|
||||
if (chars_in[0] != chars_back[0]
|
||||
|| chars_in[1] != chars_back[1]
|
||||
|| chars_in[2] != chars_back[2]
|
||||
|| chars_in[3] != chars_back[3]) {
|
||||
printf("bug in simdpack\n");
|
||||
return -3;
|
||||
}
|
||||
}
|
||||
offset = datain[k * SIMDBlockSize + SIMDBlockSize - 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
free(buffer);
|
||||
free(datain);
|
||||
free(backbuffer);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
42
cpp/simdcomp_wrapper.c
vendored
42
cpp/simdcomp_wrapper.c
vendored
@@ -1,42 +0,0 @@
|
||||
#include "simdcomp.h"
|
||||
#include "simdcomputil.h"
|
||||
|
||||
// assumes datain has a size of 128 uint32
|
||||
// and that buffer is large enough to host the data.
|
||||
size_t compress_sorted(
|
||||
const uint32_t* datain,
|
||||
uint8_t* output,
|
||||
const uint32_t offset) {
|
||||
const uint32_t b = simdmaxbitsd1(offset, datain);
|
||||
*output++ = b;
|
||||
simdpackwithoutmaskd1(offset, datain, (__m128i *) output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
// assumes datain has a size of 128 uint32
|
||||
// and that buffer is large enough to host the data.
|
||||
size_t uncompress_sorted(
|
||||
const uint8_t* compressed_data,
|
||||
uint32_t* output,
|
||||
uint32_t offset) {
|
||||
const uint32_t b = *compressed_data++;
|
||||
simdunpackd1(offset, (__m128i *)compressed_data, output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
size_t compress_unsorted(
|
||||
const uint32_t* datain,
|
||||
uint8_t* output) {
|
||||
const uint32_t b = maxbits(datain);
|
||||
*output++ = b;
|
||||
simdpackwithoutmask(datain, (__m128i *) output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
|
||||
size_t uncompress_unsorted(
|
||||
const uint8_t* compressed_data,
|
||||
uint32_t* output) {
|
||||
const uint32_t b = *compressed_data++;
|
||||
simdunpack((__m128i *)compressed_data, output, b);
|
||||
return 1 + b * sizeof(__m128i);
|
||||
}
|
||||
32
cpp/streamvbyte/.gitignore
vendored
32
cpp/streamvbyte/.gitignore
vendored
@@ -1,32 +0,0 @@
|
||||
# Object files
|
||||
*.o
|
||||
*.ko
|
||||
*.obj
|
||||
*.elf
|
||||
|
||||
# Precompiled Headers
|
||||
*.gch
|
||||
*.pch
|
||||
|
||||
# Libraries
|
||||
*.lib
|
||||
*.a
|
||||
*.la
|
||||
*.lo
|
||||
|
||||
# Shared objects (inc. Windows DLLs)
|
||||
*.dll
|
||||
*.so
|
||||
*.so.*
|
||||
*.dylib
|
||||
|
||||
# Executables
|
||||
*.exe
|
||||
*.out
|
||||
*.app
|
||||
*.i*86
|
||||
*.x86_64
|
||||
*.hex
|
||||
|
||||
# Debug files
|
||||
*.dSYM/
|
||||
@@ -1,7 +0,0 @@
|
||||
language: c
|
||||
sudo: false
|
||||
compiler:
|
||||
- gcc
|
||||
- clang
|
||||
|
||||
script: make && ./unit
|
||||
@@ -1,202 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
streamvbyte
|
||||
===========
|
||||
[](https://travis-ci.org/lemire/streamvbyte)
|
||||
|
||||
StreamVByte is a new integer compression technique that applies SIMD instructions (vectorization) to
|
||||
Google's Group Varint approach. The net result is faster than other byte-oriented compression
|
||||
techniques.
|
||||
|
||||
The approach is patent-free, the code is available under the Apache License.
|
||||
|
||||
|
||||
It includes fast differential coding.
|
||||
|
||||
It assumes a recent Intel processor (e.g., haswell or better) .
|
||||
|
||||
The code should build using most standard-compliant C99 compilers. The provided makefile
|
||||
expects a Linux-like system.
|
||||
|
||||
|
||||
Usage:
|
||||
|
||||
make
|
||||
./unit
|
||||
|
||||
See example.c for an example.
|
||||
|
||||
Short code sample:
|
||||
```C
|
||||
// suppose that datain is an array of uint32_t integers
|
||||
size_t compsize = streamvbyte_encode(datain, N, compressedbuffer); // encoding
|
||||
// here the result is stored in compressedbuffer using compsize bytes
|
||||
streamvbyte_decode(compressedbuffer, recovdata, N); // decoding (fast)
|
||||
```
|
||||
|
||||
If the values are sorted, then it might be preferable to use differential coding:
|
||||
```C
|
||||
// suppose that datain is an array of uint32_t integers
|
||||
size_t compsize = streamvbyte_delta_encode(datain, N, compressedbuffer,0); // encoding
|
||||
// here the result is stored in compressedbuffer using compsize bytes
|
||||
streamvbyte_delta_decode(compressedbuffer, recovdata, N,0); // decoding (fast)
|
||||
```
|
||||
You have to know how many integers were coded when you decompress. You can store this
|
||||
information along with the compressed stream.
|
||||
|
||||
See also
|
||||
--------
|
||||
* SIMDCompressionAndIntersection: A C++ library to compress and intersect sorted lists of integers using SIMD instructions https://github.com/lemire/SIMDCompressionAndIntersect
|
||||
* The FastPFOR C++ library : Fast integer compression https://github.com/lemire/FastPFor
|
||||
* High-performance dictionary coding https://github.com/lemire/dictionary
|
||||
* LittleIntPacker: C library to pack and unpack short arrays of integers as fast as possible https://github.com/lemire/LittleIntPacker
|
||||
* The SIMDComp library: A simple C library for compressing lists of integers using binary packing https://github.com/lemire/simdcomp
|
||||
* MaskedVByte: Fast decoder for VByte-compressed integers https://github.com/lemire/MaskedVByte
|
||||
* CSharpFastPFOR: A C# integer compression library https://github.com/Genbox/CSharpFastPFOR
|
||||
* JavaFastPFOR: A java integer compression library https://github.com/lemire/JavaFastPFOR
|
||||
* Encoding: Integer Compression Libraries for Go https://github.com/zhenjl/encoding
|
||||
* FrameOfReference is a C++ library dedicated to frame-of-reference (FOR) compression: https://github.com/lemire/FrameOfReference
|
||||
* libvbyte: A fast implementation for varbyte 32bit/64bit integer compression https://github.com/cruppstahl/libvbyte
|
||||
* TurboPFor is a C library that offers lots of interesting optimizations. Well worth checking! (GPL license) https://github.com/powturbo/TurboPFor
|
||||
* Oroch is a C++ library that offers a usable API (MIT license) https://github.com/ademakov/Oroch
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "streamvbyte.h"
|
||||
|
||||
int main() {
|
||||
int N = 5000;
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint8_t * compressedbuffer = malloc(N * sizeof(uint32_t));
|
||||
uint32_t * recovdata = malloc(N * sizeof(uint32_t));
|
||||
for (int k = 0; k < N; ++k)
|
||||
datain[k] = 120;
|
||||
size_t compsize = streamvbyte_encode(datain, N, compressedbuffer); // encoding
|
||||
// here the result is stored in compressedbuffer using compsize bytes
|
||||
size_t compsize2 = streamvbyte_decode(compressedbuffer, recovdata,
|
||||
N); // decoding (fast)
|
||||
assert(compsize == compsize2);
|
||||
free(datain);
|
||||
free(compressedbuffer);
|
||||
free(recovdata);
|
||||
printf("Compressed %d integers down to %d bytes.\n",N,(int) compsize);
|
||||
return 0;
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
|
||||
#ifndef VARINTDECODE_H_
|
||||
#define VARINTDECODE_H_
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#include <inttypes.h>
|
||||
#include <stdint.h>// please use a C99-compatible compiler
|
||||
#include <stddef.h>
|
||||
|
||||
|
||||
// Encode an array of a given length read from in to bout in varint format.
|
||||
// Returns the number of bytes written.
|
||||
size_t streamvbyte_encode(const uint32_t *in, uint32_t length, uint8_t *out);
|
||||
|
||||
// Read "length" 32-bit integers in varint format from in, storing the result in out.
|
||||
// Returns the number of bytes read.
|
||||
size_t streamvbyte_decode(const uint8_t* in, uint32_t* out, uint32_t length);
|
||||
|
||||
|
||||
#endif /* VARINTDECODE_H_ */
|
||||
@@ -1,24 +0,0 @@
|
||||
/*
|
||||
* streamvbytedelta.h
|
||||
*
|
||||
* Created on: Apr 14, 2016
|
||||
* Author: lemire
|
||||
*/
|
||||
|
||||
#ifndef INCLUDE_STREAMVBYTEDELTA_H_
|
||||
#define INCLUDE_STREAMVBYTEDELTA_H_
|
||||
|
||||
|
||||
// Encode an array of a given length read from in to bout in StreamVByte format.
|
||||
// Returns the number of bytes written.
|
||||
// this version uses differential coding (coding differences between values) starting at prev (you can often set prev to zero)
|
||||
size_t streamvbyte_delta_encode(const uint32_t *in, uint32_t length, uint8_t *out, uint32_t prev);
|
||||
|
||||
// Read "length" 32-bit integers in StreamVByte format from in, storing the result in out.
|
||||
// Returns the number of bytes read.
|
||||
// this version uses differential coding (coding differences between values) starting at prev (you can often set prev to zero)
|
||||
size_t streamvbyte_delta_decode(const uint8_t* in, uint32_t* out, uint32_t length, uint32_t prev);
|
||||
|
||||
|
||||
|
||||
#endif /* INCLUDE_STREAMVBYTEDELTA_H_ */
|
||||
@@ -1,58 +0,0 @@
|
||||
# minimalist makefile
|
||||
.SUFFIXES:
|
||||
#
|
||||
.SUFFIXES: .cpp .o .c .h
|
||||
|
||||
CFLAGS = -fPIC -march=native -std=c99 -O3 -Wall -Wextra -pedantic -Wshadow
|
||||
LDFLAGS = -shared
|
||||
LIBNAME=libstreamvbyte.so.0.0.1
|
||||
all: unit $(LIBNAME)
|
||||
test:
|
||||
./unit
|
||||
install: $(OBJECTS)
|
||||
cp $(LIBNAME) /usr/local/lib
|
||||
ln -s /usr/local/lib/$(LIBNAME) /usr/local/lib/libstreamvbyte.so
|
||||
ldconfig
|
||||
cp $(HEADERS) /usr/local/include
|
||||
|
||||
|
||||
|
||||
HEADERS=./include/streamvbyte.h ./include/streamvbytedelta.h
|
||||
|
||||
uninstall:
|
||||
for h in $(HEADERS) ; do rm /usr/local/$$h; done
|
||||
rm /usr/local/lib/$(LIBNAME)
|
||||
rm /usr/local/lib/libstreamvbyte.so
|
||||
ldconfig
|
||||
|
||||
|
||||
OBJECTS= streamvbyte.o streamvbytedelta.o
|
||||
|
||||
|
||||
|
||||
streamvbytedelta.o: ./src/streamvbytedelta.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/streamvbytedelta.c -Iinclude
|
||||
|
||||
|
||||
streamvbyte.o: ./src/streamvbyte.c $(HEADERS)
|
||||
$(CC) $(CFLAGS) -c ./src/streamvbyte.c -Iinclude
|
||||
|
||||
|
||||
|
||||
$(LIBNAME): $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o $(LIBNAME) $(OBJECTS) $(LDFLAGS)
|
||||
|
||||
|
||||
|
||||
|
||||
example: ./example.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o example ./example.c -Iinclude $(OBJECTS)
|
||||
|
||||
unit: ./tests/unit.c $(HEADERS) $(OBJECTS)
|
||||
$(CC) $(CFLAGS) -o unit ./tests/unit.c -Iinclude $(OBJECTS)
|
||||
|
||||
dynunit: ./tests/unit.c $(HEADERS) $(LIBNAME)
|
||||
$(CC) $(CFLAGS) -o dynunit ./tests/unit.c -Iinclude -lstreamvbyte
|
||||
|
||||
clean:
|
||||
rm -f unit *.o $(LIBNAME) example
|
||||
@@ -1,495 +0,0 @@
|
||||
#include "streamvbyte.h"
|
||||
#if defined(_MSC_VER)
|
||||
/* Microsoft C/C++-compatible compiler */
|
||||
#include <intrin.h>
|
||||
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
|
||||
/* GCC-compatible compiler, targeting x86/x86-64 */
|
||||
#include <x86intrin.h>
|
||||
#elif defined(__GNUC__) && defined(__ARM_NEON__)
|
||||
/* GCC-compatible compiler, targeting ARM with NEON */
|
||||
#include <arm_neon.h>
|
||||
#elif defined(__GNUC__) && defined(__IWMMXT__)
|
||||
/* GCC-compatible compiler, targeting ARM with WMMX */
|
||||
#include <mmintrin.h>
|
||||
#elif (defined(__GNUC__) || defined(__xlC__)) && (defined(__VEC__) || defined(__ALTIVEC__))
|
||||
/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
|
||||
#include <altivec.h>
|
||||
#elif defined(__GNUC__) && defined(__SPE__)
|
||||
/* GCC-compatible compiler, targeting PowerPC with SPE */
|
||||
#include <spe.h>
|
||||
#endif
|
||||
|
||||
static uint8_t lengthTable[256] = { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9,
|
||||
10, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 6, 7, 8, 9, 7, 8,
|
||||
9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10,
|
||||
11, 12, 10, 11, 12, 13, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
|
||||
11, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10,
|
||||
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11,
|
||||
12, 10, 11, 12, 13, 11, 12, 13, 14, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
|
||||
11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12,
|
||||
13, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 9, 10,
|
||||
11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15, 7, 8, 9, 10, 8,
|
||||
9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11, 12,
|
||||
10, 11, 12, 13, 11, 12, 13, 14, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12,
|
||||
13, 14, 12, 13, 14, 15, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15,
|
||||
13, 14, 15, 16 };
|
||||
|
||||
static uint8_t shuffleTable[256][16] = { { 0, -1, -1, -1, 1, -1, -1, -1, 2, -1,
|
||||
-1, -1, 3, -1, -1, -1 }, // 1111
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 2111
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 3111
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 4111
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 1211
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 2211
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 3211
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 4211
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 1311
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 2311
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 3311
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, -1, -1, -1 }, // 4311
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, -1, -1, -1 }, // 1411
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, -1, -1, -1 }, // 2411
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, -1, -1, -1 }, // 3411
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, -1, -1, -1 }, // 4411
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1 }, // 1121
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 2121
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 3121
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 4121
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 1221
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 2221
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 3221
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 4221
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 1321
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 2321
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 3321
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, -1, -1, -1 }, // 4321
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, -1, -1, -1 }, // 1421
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, -1, -1, -1 }, // 2421
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, -1, -1, -1 }, // 3421
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, -1, -1, -1 }, // 4421
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1 }, // 1131
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 2131
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 3131
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 4131
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 1231
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 2231
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 3231
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 4231
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 1331
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 2331
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 3331
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, -1, -1, -1 }, // 4331
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, -1, -1 }, // 1431
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, -1, -1, -1 }, // 2431
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, -1, -1, -1 }, // 3431
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, -1, -1, -1 }, // 4431
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1 }, // 1141
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 2141
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 3141
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 4141
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 1241
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 2241
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 3241
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 4241
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 1341
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 2341
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 3341
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, -1, -1, -1 }, // 4341
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1 }, // 1441
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1, -1 }, // 2441
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1, -1 }, // 3441
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1, -1 }, // 4441
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1 }, // 1112
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 2112
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 3112
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 4112
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 1212
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 2212
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 3212
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 4212
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 1312
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 2312
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 3312
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, -1, -1 }, // 4312
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, -1, -1 }, // 1412
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, -1, -1 }, // 2412
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, -1, -1 }, // 3412
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, -1, -1 }, // 4412
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1 }, // 1122
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 2122
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 3122
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 4122
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 1222
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 2222
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 3222
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 4222
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 1322
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 2322
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 3322
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, -1, -1 }, // 4322
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, -1, -1 }, // 1422
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, -1, -1 }, // 2422
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, -1, -1 }, // 3422
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, -1, -1 }, // 4422
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1 }, // 1132
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 2132
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 3132
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 4132
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 1232
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 2232
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 3232
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 4232
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 1332
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 2332
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 3332
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, -1, -1 }, // 4332
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, -1, -1 }, // 1432
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, -1, -1 }, // 2432
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, -1, -1 }, // 3432
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, -1, -1 }, // 4432
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1 }, // 1142
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 2142
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 3142
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 4142
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 1242
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 2242
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 3242
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 4242
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 1342
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 2342
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 3342
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, -1, -1 }, // 4342
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1 }, // 1442
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1 }, // 2442
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1 }, // 3442
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1, -1 }, // 4442
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1 }, // 1113
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 2113
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 3113
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 4113
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 1213
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 2213
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 3213
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 4213
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 1313
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 2313
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 3313
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, -1 }, // 4313
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, -1 }, // 1413
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, -1 }, // 2413
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, -1 }, // 3413
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, -1 }, // 4413
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1 }, // 1123
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 2123
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 3123
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 4123
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 1223
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 2223
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 3223
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 4223
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 1323
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 2323
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 3323
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, -1 }, // 4323
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, -1 }, // 1423
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, -1 }, // 2423
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, -1 }, // 3423
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, -1 }, // 4423
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1 }, // 1133
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 2133
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 3133
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 4133
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 1233
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 2233
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 3233
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 4233
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 1333
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 2333
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 3333
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, -1 }, // 4333
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, -1 }, // 1433
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, -1 }, // 2433
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, -1 }, // 3433
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, -1 }, // 4433
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1 }, // 1143
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 2143
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 3143
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 4143
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 1243
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 2243
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 3243
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 4243
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 1343
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 2343
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 3343
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, -1 }, // 4343
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1 }, // 1443
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1 }, // 2443
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1 }, // 3443
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -1 }, // 4443
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6 }, // 1114
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 2114
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 3114
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 4114
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 1214
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 2214
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 3214
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 4214
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 1314
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 2314
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 3314
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, 11 }, // 4314
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, 9 }, // 1414
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, 10 }, // 2414
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, 11 }, // 3414
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, 12 }, // 4414
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7 }, // 1124
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 2124
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 3124
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 4124
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 1224
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 2224
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 3224
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 4224
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 1324
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 2324
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 3324
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, 12 }, // 4324
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, 10 }, // 1424
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11 }, // 2424
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, 12 }, // 3424
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, 13 }, // 4424
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8 }, // 1134
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 2134
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 3134
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 4134
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 1234
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 2234
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 3234
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 4234
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 1334
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 2334
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 3334
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, 13 }, // 4334
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, 11 }, // 1434
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, 12 }, // 2434
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, 13 }, // 3434
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, 14 }, // 4434
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9 }, // 1144
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 2144
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 3144
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 4144
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 1244
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 2244
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 3244
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 4244
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 1344
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 2344
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 3344
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, 14 }, // 4344
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }, // 1444
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, // 2444
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }, // 3444
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } // 4444
|
||||
};
|
||||
|
||||
static uint8_t _encode_data(uint32_t val, uint8_t *__restrict__ *dataPtrPtr) {
|
||||
uint8_t *dataPtr = *dataPtrPtr;
|
||||
uint8_t code;
|
||||
|
||||
if (val < (1 << 8)) { // 1 byte
|
||||
*dataPtr = (uint8_t)(val);
|
||||
*dataPtrPtr += 1;
|
||||
code = 0;
|
||||
} else if (val < (1 << 16)) { // 2 bytes
|
||||
*(uint16_t *) dataPtr = (uint16_t)(val);
|
||||
*dataPtrPtr += 2;
|
||||
code = 1;
|
||||
} else if (val < (1 << 24)) { // 3 bytes
|
||||
*(uint16_t *) dataPtr = (uint16_t)(val);
|
||||
*(dataPtr + 2) = (uint8_t)(val >> 16);
|
||||
*dataPtrPtr += 3;
|
||||
code = 2;
|
||||
} else { // 4 bytes
|
||||
*(uint32_t *) dataPtr = val;
|
||||
*dataPtrPtr += 4;
|
||||
code = 3;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static uint8_t *svb_encode_scalar(const uint32_t *in,
|
||||
uint8_t *__restrict__ keyPtr, uint8_t *__restrict__ dataPtr,
|
||||
uint32_t count) {
|
||||
if (count == 0)
|
||||
return dataPtr; // exit immediately if no data
|
||||
|
||||
uint8_t shift = 0; // cycles 0, 2, 4, 6, 0, 2, 4, 6, ...
|
||||
uint8_t key = 0;
|
||||
for (uint32_t c = 0; c < count; c++) {
|
||||
if (shift == 8) {
|
||||
shift = 0;
|
||||
*keyPtr++ = key;
|
||||
key = 0;
|
||||
}
|
||||
uint32_t val = in[c];
|
||||
uint8_t code = _encode_data(val, &dataPtr);
|
||||
key |= code << shift;
|
||||
shift += 2;
|
||||
}
|
||||
|
||||
*keyPtr = key; // write last key (no increment needed)
|
||||
return dataPtr; // pointer to first unused data byte
|
||||
}
|
||||
|
||||
// Encode an array of a given length read from in to bout in streamvbyte format.
|
||||
// Returns the number of bytes written.
|
||||
size_t streamvbyte_encode(const uint32_t *in, uint32_t count, uint8_t *out) {
|
||||
uint8_t *keyPtr = out;
|
||||
uint32_t keyLen = (count + 3) / 4; // 2-bits rounded to full byte
|
||||
uint8_t *dataPtr = keyPtr + keyLen; // variable byte data after all keys
|
||||
return svb_encode_scalar(in, keyPtr, dataPtr, count) - out;
|
||||
}
|
||||
|
||||
static inline __m128i _decode_avx(uint32_t key,
|
||||
const uint8_t *__restrict__ *dataPtrPtr) {
|
||||
uint8_t len = lengthTable[key];
|
||||
__m128i Data = _mm_loadu_si128((__m128i *) *dataPtrPtr);
|
||||
__m128i Shuf = *(__m128i *) &shuffleTable[key];
|
||||
|
||||
Data = _mm_shuffle_epi8(Data, Shuf);
|
||||
*dataPtrPtr += len;
|
||||
return Data;
|
||||
}
|
||||
|
||||
static inline void _write_avx(uint32_t *out, __m128i Vec) {
|
||||
_mm_storeu_si128((__m128i *) out, Vec);
|
||||
}
|
||||
|
||||
static inline uint32_t _decode_data(const uint8_t **dataPtrPtr, uint8_t code) {
|
||||
const uint8_t *dataPtr = *dataPtrPtr;
|
||||
uint32_t val;
|
||||
|
||||
if (code == 0) { // 1 byte
|
||||
val = (uint32_t) * dataPtr;
|
||||
dataPtr += 1;
|
||||
} else if (code == 1) { // 2 bytes
|
||||
val = (uint32_t) * (uint16_t *) dataPtr;
|
||||
dataPtr += 2;
|
||||
} else if (code == 2) { // 3 bytes
|
||||
val = (uint32_t) * (uint16_t *) dataPtr;
|
||||
val |= *(dataPtr + 2) << 16;
|
||||
dataPtr += 3;
|
||||
} else { // code == 3
|
||||
val = *(uint32_t *) dataPtr; // 4 bytes
|
||||
dataPtr += 4;
|
||||
}
|
||||
|
||||
*dataPtrPtr = dataPtr;
|
||||
return val;
|
||||
}
|
||||
static const uint8_t *svb_decode_scalar(uint32_t *outPtr, const uint8_t *keyPtr,
|
||||
const uint8_t *dataPtr, uint32_t count) {
|
||||
if (count == 0)
|
||||
return dataPtr; // no reads or writes if no data
|
||||
|
||||
uint8_t shift = 0;
|
||||
uint32_t key = *keyPtr++;
|
||||
for (uint32_t c = 0; c < count; c++) {
|
||||
if (shift == 8) {
|
||||
shift = 0;
|
||||
key = *keyPtr++;
|
||||
}
|
||||
uint32_t val = _decode_data(&dataPtr, (key >> shift) & 0x3);
|
||||
*outPtr++ = val;
|
||||
shift += 2;
|
||||
}
|
||||
|
||||
return dataPtr; // pointer to first unused byte after end
|
||||
}
|
||||
|
||||
const uint8_t *svb_decode_avx_simple(uint32_t *out,
|
||||
const uint8_t *__restrict__ keyPtr, const uint8_t *__restrict__ dataPtr,
|
||||
uint64_t count) {
|
||||
|
||||
uint64_t keybytes = count / 4; // number of key bytes
|
||||
__m128i Data;
|
||||
if (keybytes >= 8) {
|
||||
|
||||
int64_t Offset = -(int64_t) keybytes / 8 + 1;
|
||||
|
||||
const uint64_t *keyPtr64 = (const uint64_t *) keyPtr - Offset;
|
||||
uint64_t nextkeys = keyPtr64[Offset];
|
||||
for (; Offset != 0; ++Offset) {
|
||||
uint64_t keys = nextkeys;
|
||||
nextkeys = keyPtr64[Offset + 1];
|
||||
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 4, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 8, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 12, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 16, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 20, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 24, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 28, Data);
|
||||
|
||||
out += 32;
|
||||
}
|
||||
{
|
||||
uint64_t keys = nextkeys;
|
||||
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 4, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 8, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 12, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 16, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 20, Data);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0xFF), &dataPtr);
|
||||
_write_avx(out + 24, Data);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
_write_avx(out + 28, Data);
|
||||
|
||||
out += 32;
|
||||
}
|
||||
}
|
||||
uint64_t consumedkeys = keybytes - (keybytes & 7);
|
||||
return svb_decode_scalar(out, keyPtr + consumedkeys, dataPtr, count & 31);
|
||||
}
|
||||
|
||||
// Read count 32-bit integers in maskedvbyte format from in, storing the result in out. Returns the number of bytes read.
|
||||
size_t streamvbyte_decode(const uint8_t* in, uint32_t* out, uint32_t count) {
|
||||
if (count == 0)
|
||||
return 0;
|
||||
const uint8_t *keyPtr = in; // full list of keys is next
|
||||
uint32_t keyLen = ((count + 3) / 4); // 2-bits per key (rounded up)
|
||||
const uint8_t *dataPtr = keyPtr + keyLen; // data starts at end of keys
|
||||
return svb_decode_avx_simple(out, keyPtr, dataPtr, count) - in;
|
||||
|
||||
}
|
||||
@@ -1,575 +0,0 @@
|
||||
#include "streamvbyte.h"
|
||||
#if defined(_MSC_VER)
|
||||
/* Microsoft C/C++-compatible compiler */
|
||||
#include <intrin.h>
|
||||
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
|
||||
/* GCC-compatible compiler, targeting x86/x86-64 */
|
||||
#include <x86intrin.h>
|
||||
#elif defined(__GNUC__) && defined(__ARM_NEON__)
|
||||
/* GCC-compatible compiler, targeting ARM with NEON */
|
||||
#include <arm_neon.h>
|
||||
#elif defined(__GNUC__) && defined(__IWMMXT__)
|
||||
/* GCC-compatible compiler, targeting ARM with WMMX */
|
||||
#include <mmintrin.h>
|
||||
#elif (defined(__GNUC__) || defined(__xlC__)) && (defined(__VEC__) || defined(__ALTIVEC__))
|
||||
/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
|
||||
#include <altivec.h>
|
||||
#elif defined(__GNUC__) && defined(__SPE__)
|
||||
/* GCC-compatible compiler, targeting PowerPC with SPE */
|
||||
#include <spe.h>
|
||||
#endif
|
||||
|
||||
static uint8_t lengthTable[256] = { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9,
|
||||
10, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 6, 7, 8, 9, 7, 8,
|
||||
9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10,
|
||||
11, 12, 10, 11, 12, 13, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
|
||||
11, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10,
|
||||
8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11,
|
||||
12, 10, 11, 12, 13, 11, 12, 13, 14, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10,
|
||||
11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12,
|
||||
13, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 9, 10,
|
||||
11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15, 7, 8, 9, 10, 8,
|
||||
9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11, 12,
|
||||
10, 11, 12, 13, 11, 12, 13, 14, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12,
|
||||
13, 14, 12, 13, 14, 15, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15,
|
||||
13, 14, 15, 16 };
|
||||
|
||||
static uint8_t shuffleTable[256][16] = { { 0, -1, -1, -1, 1, -1, -1, -1, 2, -1,
|
||||
-1, -1, 3, -1, -1, -1 }, // 1111
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 2111
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 3111
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 4111
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1 }, // 1211
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 2211
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 3211
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 4211
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, -1, -1, -1 }, // 1311
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, -1, -1, -1 }, // 2311
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, -1, -1, -1 }, // 3311
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, -1, -1, -1 }, // 4311
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, -1, -1, -1 }, // 1411
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, -1, -1, -1 }, // 2411
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, -1, -1, -1 }, // 3411
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, -1, -1, -1 }, // 4411
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1 }, // 1121
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 2121
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 3121
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 4121
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, -1, -1, -1 }, // 1221
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 2221
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 3221
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 4221
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, -1, -1, -1 }, // 1321
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, -1, -1, -1 }, // 2321
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, -1, -1, -1 }, // 3321
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, -1, -1, -1 }, // 4321
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, -1, -1, -1 }, // 1421
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, -1, -1, -1 }, // 2421
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, -1, -1, -1 }, // 3421
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, -1, -1, -1 }, // 4421
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1 }, // 1131
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 2131
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 3131
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 4131
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, -1, -1, -1 }, // 1231
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 2231
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 3231
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 4231
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, -1, -1, -1 }, // 1331
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, -1, -1, -1 }, // 2331
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, -1, -1, -1 }, // 3331
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, -1, -1, -1 }, // 4331
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, -1, -1 }, // 1431
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, -1, -1, -1 }, // 2431
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, -1, -1, -1 }, // 3431
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, -1, -1, -1 }, // 4431
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1 }, // 1141
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 2141
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 3141
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 4141
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, -1, -1, -1 }, // 1241
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 2241
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 3241
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 4241
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, -1, -1, -1 }, // 1341
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, -1, -1, -1 }, // 2341
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, -1, -1, -1 }, // 3341
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, -1, -1, -1 }, // 4341
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1 }, // 1441
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1, -1 }, // 2441
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1, -1 }, // 3441
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1, -1 }, // 4441
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1 }, // 1112
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 2112
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 3112
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 4112
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, -1, -1 }, // 1212
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 2212
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 3212
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 4212
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, -1, -1 }, // 1312
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, -1, -1 }, // 2312
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, -1, -1 }, // 3312
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, -1, -1 }, // 4312
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, -1, -1 }, // 1412
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, -1, -1 }, // 2412
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, -1, -1 }, // 3412
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, -1, -1 }, // 4412
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1 }, // 1122
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 2122
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 3122
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 4122
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, -1, -1 }, // 1222
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 2222
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 3222
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 4222
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, -1, -1 }, // 1322
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, -1, -1 }, // 2322
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, -1, -1 }, // 3322
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, -1, -1 }, // 4322
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, -1, -1 }, // 1422
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, -1, -1 }, // 2422
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, -1, -1 }, // 3422
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, -1, -1 }, // 4422
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1 }, // 1132
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 2132
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 3132
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 4132
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, -1, -1 }, // 1232
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 2232
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 3232
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 4232
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, -1, -1 }, // 1332
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, -1, -1 }, // 2332
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, -1, -1 }, // 3332
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, -1, -1 }, // 4332
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, -1, -1 }, // 1432
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, -1, -1 }, // 2432
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, -1, -1 }, // 3432
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, -1, -1 }, // 4432
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1 }, // 1142
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 2142
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 3142
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 4142
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, -1, -1 }, // 1242
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 2242
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 3242
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 4242
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, -1, -1 }, // 1342
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, -1, -1 }, // 2342
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, -1, -1 }, // 3342
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, -1, -1 }, // 4342
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, -1 }, // 1442
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1 }, // 2442
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1 }, // 3442
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1, -1 }, // 4442
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1 }, // 1113
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 2113
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 3113
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 4113
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, -1 }, // 1213
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 2213
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 3213
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 4213
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, -1 }, // 1313
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, -1 }, // 2313
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, -1 }, // 3313
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, -1 }, // 4313
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, -1 }, // 1413
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, -1 }, // 2413
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, -1 }, // 3413
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, -1 }, // 4413
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1 }, // 1123
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 2123
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 3123
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 4123
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, -1 }, // 1223
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 2223
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 3223
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 4223
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, -1 }, // 1323
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, -1 }, // 2323
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, -1 }, // 3323
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, -1 }, // 4323
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, -1 }, // 1423
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, -1 }, // 2423
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, -1 }, // 3423
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, -1 }, // 4423
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1 }, // 1133
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 2133
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 3133
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 4133
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, -1 }, // 1233
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 2233
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 3233
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 4233
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, -1 }, // 1333
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, -1 }, // 2333
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, -1 }, // 3333
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, -1 }, // 4333
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, -1 }, // 1433
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, -1 }, // 2433
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, -1 }, // 3433
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, -1 }, // 4433
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1 }, // 1143
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 2143
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 3143
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 4143
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, -1 }, // 1243
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 2243
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 3243
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 4243
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, -1 }, // 1343
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, -1 }, // 2343
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, -1 }, // 3343
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, -1 }, // 4343
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1 }, // 1443
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1 }, // 2443
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1 }, // 3443
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -1 }, // 4443
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6 }, // 1114
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 2114
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 3114
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 4114
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, -1, -1, -1, 4, 5, 6, 7 }, // 1214
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 2214
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 3214
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 4214
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, -1, -1, -1, 5, 6, 7, 8 }, // 1314
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, -1, -1, -1, 6, 7, 8, 9 }, // 2314
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, -1, -1, -1, 7, 8, 9, 10 }, // 3314
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, -1, -1, -1, 8, 9, 10, 11 }, // 4314
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, -1, -1, 6, 7, 8, 9 }, // 1414
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, -1, -1, -1, 7, 8, 9, 10 }, // 2414
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, -1, -1, -1, 8, 9, 10, 11 }, // 3414
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, 9, 10, 11, 12 }, // 4414
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7 }, // 1124
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 2124
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 3124
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 4124
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, -1, -1, 5, 6, 7, 8 }, // 1224
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 2224
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 3224
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 4224
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, -1, -1, 6, 7, 8, 9 }, // 1324
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, -1, -1, 7, 8, 9, 10 }, // 2324
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, -1, -1, 8, 9, 10, 11 }, // 3324
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, -1, -1, 9, 10, 11, 12 }, // 4324
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, -1, -1, 7, 8, 9, 10 }, // 1424
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11 }, // 2424
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, -1, -1, 9, 10, 11, 12 }, // 3424
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, 10, 11, 12, 13 }, // 4424
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8 }, // 1134
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 2134
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 3134
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 4134
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, -1, 6, 7, 8, 9 }, // 1234
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 2234
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 3234
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 4234
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, 10 }, // 1334
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, -1, 8, 9, 10, 11 }, // 2334
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, 12 }, // 3334
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, -1, 10, 11, 12, 13 }, // 4334
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, -1, 8, 9, 10, 11 }, // 1434
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, -1, 9, 10, 11, 12 }, // 2434
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, -1, 10, 11, 12, 13 }, // 3434
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -1, 11, 12, 13, 14 }, // 4434
|
||||
{ 0, -1, -1, -1, 1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9 }, // 1144
|
||||
{ 0, 1, -1, -1, 2, -1, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 2144
|
||||
{ 0, 1, 2, -1, 3, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 3144
|
||||
{ 0, 1, 2, 3, 4, -1, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 4144
|
||||
{ 0, -1, -1, -1, 1, 2, -1, -1, 3, 4, 5, 6, 7, 8, 9, 10 }, // 1244
|
||||
{ 0, 1, -1, -1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 2244
|
||||
{ 0, 1, 2, -1, 3, 4, -1, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 3244
|
||||
{ 0, 1, 2, 3, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 4244
|
||||
{ 0, -1, -1, -1, 1, 2, 3, -1, 4, 5, 6, 7, 8, 9, 10, 11 }, // 1344
|
||||
{ 0, 1, -1, -1, 2, 3, 4, -1, 5, 6, 7, 8, 9, 10, 11, 12 }, // 2344
|
||||
{ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, 9, 10, 11, 12, 13 }, // 3344
|
||||
{ 0, 1, 2, 3, 4, 5, 6, -1, 7, 8, 9, 10, 11, 12, 13, 14 }, // 4344
|
||||
{ 0, -1, -1, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }, // 1444
|
||||
{ 0, 1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, // 2444
|
||||
{ 0, 1, 2, -1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }, // 3444
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } // 4444
|
||||
};
|
||||
|
||||
static uint8_t _encode_data(uint32_t val, uint8_t *__restrict__ *dataPtrPtr) {
|
||||
uint8_t *dataPtr = *dataPtrPtr;
|
||||
uint8_t code;
|
||||
|
||||
if (val < (1 << 8)) { // 1 byte
|
||||
*dataPtr = (uint8_t)(val);
|
||||
*dataPtrPtr += 1;
|
||||
code = 0;
|
||||
} else if (val < (1 << 16)) { // 2 bytes
|
||||
*(uint16_t *) dataPtr = (uint16_t)(val);
|
||||
*dataPtrPtr += 2;
|
||||
code = 1;
|
||||
} else if (val < (1 << 24)) { // 3 bytes
|
||||
*(uint16_t *) dataPtr = (uint16_t)(val);
|
||||
*(dataPtr + 2) = (uint8_t)(val >> 16);
|
||||
*dataPtrPtr += 3;
|
||||
code = 2;
|
||||
} else { // 4 bytes
|
||||
*(uint32_t *) dataPtr = val;
|
||||
*dataPtrPtr += 4;
|
||||
code = 3;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static uint8_t *svb_encode_scalar_d1_init(const uint32_t *in,
|
||||
uint8_t *__restrict__ keyPtr, uint8_t *__restrict__ dataPtr,
|
||||
uint32_t count, uint32_t prev) {
|
||||
if (count == 0)
|
||||
return dataPtr; // exit immediately if no data
|
||||
|
||||
uint8_t shift = 0; // cycles 0, 2, 4, 6, 0, 2, 4, 6, ...
|
||||
uint8_t key = 0;
|
||||
for (uint32_t c = 0; c < count; c++) {
|
||||
if (shift == 8) {
|
||||
shift = 0;
|
||||
*keyPtr++ = key;
|
||||
key = 0;
|
||||
}
|
||||
uint32_t val = in[c] - prev;
|
||||
prev = in[c];
|
||||
uint8_t code = _encode_data(val, &dataPtr);
|
||||
key |= code << shift;
|
||||
shift += 2;
|
||||
}
|
||||
|
||||
*keyPtr = key; // write last key (no increment needed)
|
||||
return dataPtr; // pointer to first unused data byte
|
||||
}
|
||||
|
||||
size_t streamvbyte_delta_encode(const uint32_t *in, uint32_t count, uint8_t *out,
|
||||
uint32_t prev) {
|
||||
uint8_t *keyPtr = out; // keys come immediately after 32-bit count
|
||||
uint32_t keyLen = (count + 3) / 4; // 2-bits rounded to full byte
|
||||
uint8_t *dataPtr = keyPtr + keyLen; // variable byte data after all keys
|
||||
|
||||
return svb_encode_scalar_d1_init(in, keyPtr, dataPtr, count, prev) - out;
|
||||
|
||||
}
|
||||
|
||||
static inline __m128i _decode_avx(uint32_t key, const uint8_t *__restrict__ *dataPtrPtr) {
|
||||
uint8_t len = lengthTable[key];
|
||||
__m128i Data = _mm_loadu_si128((__m128i *) *dataPtrPtr);
|
||||
__m128i Shuf = *(__m128i *) &shuffleTable[key];
|
||||
|
||||
Data = _mm_shuffle_epi8(Data, Shuf);
|
||||
*dataPtrPtr += len;
|
||||
|
||||
return Data;
|
||||
}
|
||||
#define BroadcastLastXMM 0xFF // bits 0-7 all set to choose highest element
|
||||
|
||||
|
||||
|
||||
static inline void _write_avx(uint32_t *out, __m128i Vec) {
|
||||
_mm_storeu_si128((__m128i *) out, Vec);
|
||||
}
|
||||
|
||||
static __m128i _write_avx_d1(uint32_t *out, __m128i Vec, __m128i Prev) {
|
||||
__m128i Add = _mm_slli_si128(Vec, 4); // Cycle 1: [- A B C] (already done)
|
||||
Prev = _mm_shuffle_epi32(Prev, BroadcastLastXMM); // Cycle 2: [P P P P]
|
||||
Vec = _mm_add_epi32(Vec, Add); // Cycle 2: [A AB BC CD]
|
||||
Add = _mm_slli_si128(Vec, 8); // Cycle 3: [- - A AB]
|
||||
Vec = _mm_add_epi32(Vec, Prev); // Cycle 3: [PA PAB PBC PCD]
|
||||
Vec = _mm_add_epi32(Vec, Add); // Cycle 4: [PA PAB PABC PABCD]
|
||||
|
||||
_write_avx(out, Vec);
|
||||
return Vec;
|
||||
}
|
||||
|
||||
#ifndef _MSC_VER
|
||||
static __m128i High16To32 = {0xFFFF0B0AFFFF0908, 0xFFFF0F0EFFFF0D0C};
|
||||
#else
|
||||
static __m128i High16To32 = {8, 9, -1, -1, 10, 11, -1, -1,
|
||||
12, 13, -1, -1, 14, 15, -1, -1};
|
||||
#endif
|
||||
|
||||
static inline __m128i _write_16bit_avx_d1(uint32_t *out, __m128i Vec, __m128i Prev) {
|
||||
// vec == [A B C D E F G H] (16 bit values)
|
||||
__m128i Add = _mm_slli_si128(Vec, 2); // [- A B C D E F G]
|
||||
Prev = _mm_shuffle_epi32(Prev, BroadcastLastXMM); // [P P P P] (32-bit)
|
||||
Vec = _mm_add_epi32(Vec, Add); // [A AB BC CD DE FG GH]
|
||||
Add = _mm_slli_si128(Vec, 4); // [- - A AB BC CD DE EF]
|
||||
Vec = _mm_add_epi32(Vec, Add); // [A AB ABC ABCD BCDE CDEF DEFG EFGH]
|
||||
__m128i V1 = _mm_cvtepu16_epi32(Vec); // [A AB ABC ABCD] (32-bit)
|
||||
V1 = _mm_add_epi32(V1, Prev); // [PA PAB PABC PABCD] (32-bit)
|
||||
__m128i V2 =
|
||||
_mm_shuffle_epi8(Vec, High16To32); // [BCDE CDEF DEFG EFGH] (32-bit)
|
||||
V2 = _mm_add_epi32(V1, V2); // [PABCDE PABCDEF PABCDEFG PABCDEFGH] (32-bit)
|
||||
_write_avx(out, V1);
|
||||
_write_avx(out + 4, V2);
|
||||
return V2;
|
||||
}
|
||||
|
||||
static inline uint32_t _decode_data(const uint8_t **dataPtrPtr, uint8_t code) {
|
||||
const uint8_t *dataPtr = *dataPtrPtr;
|
||||
uint32_t val;
|
||||
|
||||
if (code == 0) { // 1 byte
|
||||
val = (uint32_t) * dataPtr;
|
||||
dataPtr += 1;
|
||||
} else if (code == 1) { // 2 bytes
|
||||
val = (uint32_t) * (uint16_t *) dataPtr;
|
||||
dataPtr += 2;
|
||||
} else if (code == 2) { // 3 bytes
|
||||
val = (uint32_t) * (uint16_t *) dataPtr;
|
||||
val |= *(dataPtr + 2) << 16;
|
||||
dataPtr += 3;
|
||||
} else { // code == 3
|
||||
val = *(uint32_t *) dataPtr; // 4 bytes
|
||||
dataPtr += 4;
|
||||
}
|
||||
|
||||
*dataPtrPtr = dataPtr;
|
||||
return val;
|
||||
}
|
||||
|
||||
const uint8_t *svb_decode_scalar_d1_init(uint32_t *outPtr, const uint8_t *keyPtr,
|
||||
const uint8_t *dataPtr, uint32_t count,
|
||||
uint32_t prev) {
|
||||
if (count == 0)
|
||||
return dataPtr; // no reads or writes if no data
|
||||
|
||||
uint8_t shift = 0;
|
||||
uint32_t key = *keyPtr++;
|
||||
|
||||
for (uint32_t c = 0; c < count; c++) {
|
||||
if (shift == 8) {
|
||||
shift = 0;
|
||||
key = *keyPtr++;
|
||||
}
|
||||
uint32_t val = _decode_data(&dataPtr, (key >> shift) & 0x3);
|
||||
val += prev;
|
||||
*outPtr++ = val;
|
||||
prev = val;
|
||||
shift += 2;
|
||||
}
|
||||
|
||||
return dataPtr; // pointer to first unused byte after end
|
||||
}
|
||||
|
||||
const uint8_t *svb_decode_avx_d1_init(uint32_t *out, const uint8_t *__restrict__ keyPtr,
|
||||
const uint8_t *__restrict__ dataPtr, uint64_t count, uint32_t prev) {
|
||||
uint64_t keybytes = count / 4; // number of key bytes
|
||||
if (keybytes >= 8) {
|
||||
__m128i Prev = _mm_set1_epi32(prev);
|
||||
__m128i Data;
|
||||
|
||||
int64_t Offset = -(int64_t) keybytes / 8 + 1;
|
||||
|
||||
const uint64_t *keyPtr64 = (const uint64_t *) keyPtr - Offset;
|
||||
uint64_t nextkeys = keyPtr64[Offset];
|
||||
for (; Offset != 0; ++Offset) {
|
||||
uint64_t keys = nextkeys;
|
||||
nextkeys = keyPtr64[Offset + 1];
|
||||
// faster 16-bit delta since we only have 8-bit values
|
||||
if (!keys) { // 32 1-byte ints in a row
|
||||
|
||||
Data = _mm_cvtepu8_epi16(_mm_lddqu_si128((__m128i *) (dataPtr)));
|
||||
Prev = _write_16bit_avx_d1(out, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_lddqu_si128((__m128i *) (dataPtr + 8)));
|
||||
Prev = _write_16bit_avx_d1(out + 8, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_lddqu_si128((__m128i *) (dataPtr + 16)));
|
||||
Prev = _write_16bit_avx_d1(out + 16, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_lddqu_si128((__m128i *) (dataPtr + 24)));
|
||||
Prev = _write_16bit_avx_d1(out + 24, Data, Prev);
|
||||
out += 32;
|
||||
dataPtr += 32;
|
||||
continue;
|
||||
}
|
||||
|
||||
Data = _decode_avx(keys & 0x00FF, &dataPtr);
|
||||
Prev = _write_avx_d1(out, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 4, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 8, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 12, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 16, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 20, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 24, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 28, Data, Prev);
|
||||
|
||||
out += 32;
|
||||
}
|
||||
{
|
||||
uint64_t keys = nextkeys;
|
||||
// faster 16-bit delta since we only have 8-bit values
|
||||
if (!keys) { // 32 1-byte ints in a row
|
||||
Data = _mm_cvtepu8_epi16(_mm_lddqu_si128((__m128i *) (dataPtr)));
|
||||
Prev = _write_16bit_avx_d1(out, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_lddqu_si128((__m128i *) (dataPtr + 8)));
|
||||
Prev = _write_16bit_avx_d1(out + 8, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_lddqu_si128((__m128i *) (dataPtr + 16)));
|
||||
Prev = _write_16bit_avx_d1(out + 16, Data, Prev);
|
||||
Data = _mm_cvtepu8_epi16(
|
||||
_mm_loadl_epi64((__m128i *) (dataPtr + 24)));
|
||||
Prev = _write_16bit_avx_d1(out + 24, Data, Prev);
|
||||
out += 32;
|
||||
dataPtr += 32;
|
||||
|
||||
} else {
|
||||
|
||||
Data = _decode_avx(keys & 0x00FF, &dataPtr);
|
||||
Prev = _write_avx_d1(out, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 4, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 8, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 12, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 16, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 20, Data, Prev);
|
||||
|
||||
keys >>= 16;
|
||||
Data = _decode_avx((keys & 0x00FF), &dataPtr);
|
||||
Prev = _write_avx_d1(out + 24, Data, Prev);
|
||||
Data = _decode_avx((keys & 0xFF00) >> 8, &dataPtr);
|
||||
Prev = _write_avx_d1(out + 28, Data, Prev);
|
||||
|
||||
out += 32;
|
||||
}
|
||||
}
|
||||
prev = out[-1];
|
||||
}
|
||||
uint64_t consumedkeys = keybytes - (keybytes & 7);
|
||||
return svb_decode_scalar_d1_init(out, keyPtr + consumedkeys, dataPtr,
|
||||
count & 31, prev);
|
||||
}
|
||||
|
||||
size_t streamvbyte_delta_decode(const uint8_t* in, uint32_t* out,
|
||||
uint32_t count, uint32_t prev) {
|
||||
uint32_t keyLen = ((count + 3) / 4); // 2-bits per key (rounded up)
|
||||
const uint8_t *keyPtr = in;
|
||||
const uint8_t *dataPtr = keyPtr + keyLen; // data starts at end of keys
|
||||
return svb_decode_avx_d1_init(out, keyPtr, dataPtr, count, prev) - in;
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "streamvbyte.h"
|
||||
#include "streamvbytedelta.h"
|
||||
|
||||
int main() {
|
||||
int N = 4096;
|
||||
uint32_t * datain = malloc(N * sizeof(uint32_t));
|
||||
uint8_t * compressedbuffer = malloc(2 * N * sizeof(uint32_t));
|
||||
uint32_t * recovdata = malloc(N * sizeof(uint32_t));
|
||||
|
||||
for (int length = 0; length <= N;) {
|
||||
printf("length = %d \n", length);
|
||||
for (uint32_t gap = 1; gap <= 387420489; gap *= 3) {
|
||||
for (int k = 0; k < length; ++k)
|
||||
datain[k] = gap;
|
||||
size_t compsize = streamvbyte_encode(datain, length,
|
||||
compressedbuffer);
|
||||
size_t usedbytes = streamvbyte_decode(compressedbuffer, recovdata,
|
||||
length);
|
||||
if (compsize != usedbytes) {
|
||||
printf(
|
||||
"[streamvbyte_decode] code is buggy gap = %d, size mismatch %d %d \n",
|
||||
(int) gap, (int) compsize, (int) usedbytes);
|
||||
return -1;
|
||||
}
|
||||
for (int k = 0; k < length; ++k) {
|
||||
if (recovdata[k] != datain[k]) {
|
||||
printf("[streamvbyte_decode] code is buggy gap = %d\n",
|
||||
(int) gap);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
printf("Delta \n");
|
||||
for (size_t gap = 1; gap <= 531441; gap *= 3) {
|
||||
for (int k = 0; k < length; ++k)
|
||||
datain[k] = gap * k;
|
||||
size_t compsize = streamvbyte_delta_encode(datain, length,
|
||||
compressedbuffer, 0);
|
||||
size_t usedbytes = streamvbyte_delta_decode(compressedbuffer,
|
||||
recovdata, length, 0);
|
||||
if (compsize != usedbytes) {
|
||||
printf(
|
||||
"[streamvbyte_delta_decode] code is buggy gap = %d, size mismatch %d %d \n",
|
||||
(int) gap, (int) compsize, (int) usedbytes);
|
||||
return -1;
|
||||
}
|
||||
for (int k = 0; k < length; ++k) {
|
||||
if (recovdata[k] != datain[k]) {
|
||||
printf(
|
||||
"[streamvbyte_delta_decode] code is buggy gap = %d\n",
|
||||
(int) gap);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (length < 128)
|
||||
++length;
|
||||
else {
|
||||
length *= 2;
|
||||
}
|
||||
}
|
||||
free(datain);
|
||||
free(compressedbuffer);
|
||||
free(recovdata);
|
||||
printf("Code looks good.\n");
|
||||
return 0;
|
||||
}
|
||||
@@ -30,10 +30,12 @@
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre><span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> rustc_serialize;
|
||||
<span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> tantivy;
|
||||
<div class="content"><div class='highlight'><pre><span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> tantivy;
|
||||
<span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> tempdir;
|
||||
|
||||
<span class="hljs-meta">#[macro_use]</span>
|
||||
<span class="hljs-keyword">extern</span> <span class="hljs-keyword">crate</span> serde_json;
|
||||
|
||||
<span class="hljs-keyword">use</span> std::path::Path;
|
||||
<span class="hljs-keyword">use</span> tempdir::TempDir;
|
||||
<span class="hljs-keyword">use</span> tantivy::Index;
|
||||
@@ -108,8 +110,8 @@ be indexed”.</p>
|
||||
<a class="pilcrow" href="#section-5">¶</a>
|
||||
</div>
|
||||
<p>Our first field is title.
|
||||
We want full-text search for it, and we want to be able
|
||||
to retrieve the document after the search.</p>
|
||||
We want full-text search for it, and we also want
|
||||
to be able to retrieve the document after the search.</p>
|
||||
<p>TEXT | STORED is some syntactic sugar to describe
|
||||
that.</p>
|
||||
<p><code>TEXT</code> means the field should be tokenized and indexed,
|
||||
@@ -132,9 +134,12 @@ documents that were selected during the search phase.</p>
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-6">¶</a>
|
||||
</div>
|
||||
<p>Our first field is body.
|
||||
We want full-text search for it, and we want to be able
|
||||
to retrieve the body after the search.</p>
|
||||
<p>Our second field is body.
|
||||
We want full-text search for it, but we do not
|
||||
need to be able to be able to retrieve it
|
||||
for our application. </p>
|
||||
<p>We can make our index lighter and
|
||||
by omitting <code>STORED</code> flag.</p>
|
||||
|
||||
</div>
|
||||
|
||||
@@ -158,7 +163,7 @@ with our schema in the directory.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> index = <span class="hljs-built_in">try!</span>(Index::create(index_path, schema.clone()));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> index = Index::create(index_path, schema.clone())?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -178,7 +183,7 @@ heap for the indexer can increase its throughput.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> index_writer = <span class="hljs-built_in">try!</span>(index.writer(<span class="hljs-number">50_000_000</span>));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> index_writer = index.writer(<span class="hljs-number">50_000_000</span>)?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -214,9 +219,11 @@ one by one in a Document object.</p>
|
||||
|
||||
<span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> old_man_doc = Document::<span class="hljs-keyword">default</span>();
|
||||
old_man_doc.add_text(title, <span class="hljs-string">"The Old Man and the Sea"</span>);
|
||||
old_man_doc.add_text(body,
|
||||
<span class="hljs-string">"He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish."</span>);</pre></div></div>
|
||||
old_man_doc.add_text(
|
||||
body,
|
||||
<span class="hljs-string">"He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish."</span>,
|
||||
);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -243,16 +250,25 @@ one by one in a Document object.</p>
|
||||
<a class="pilcrow" href="#section-12">¶</a>
|
||||
</div>
|
||||
<h3 id="create-a-document-directly-from-json-">Create a document directly from json.</h3>
|
||||
<p>Alternatively, we can use our schema to parse
|
||||
a document object directly from json.</p>
|
||||
<p>Alternatively, we can use our schema to parse a
|
||||
document object directly from json.
|
||||
The document is a string, but we use the <code>json</code> macro
|
||||
from <code>serde_json</code> for the convenience of multi-line support.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre>
|
||||
<span class="hljs-keyword">let</span> mice_and_men_doc = <span class="hljs-built_in">try!</span>(schema.parse_document(r#<span class="hljs-string">"{
|
||||
"</span>title<span class="hljs-string">": "</span>Of Mice and Men<span class="hljs-string">",
|
||||
"</span>body<span class="hljs-string">": "</span>few miles south of Soledad, the Salinas River drops <span class="hljs-keyword">in</span> close to the hillside bank and runs deep and green. The water is warm too, <span class="hljs-keyword">for</span> it has slipped twinkling over the yellow sands <span class="hljs-keyword">in</span> the sunlight before reaching the narrow pool. On one side of the river the golden foothill slopes curve up to the strong and rocky Gabilan Mountains, but on the valley side the water is lined with trees—willows fresh and green with every spring, carrying <span class="hljs-keyword">in</span> their lower leaf junctures the debris of the winter’s flooding; and sycamores with mottled, white,recumbent limbs and branches that arch over the pool<span class="hljs-string">"
|
||||
}"</span>#));
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> json = json!({
|
||||
<span class="hljs-string">"title"</span>: <span class="hljs-string">"Of Mice and Men"</span>,
|
||||
<span class="hljs-string">"body"</span>: <span class="hljs-string">"A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"</span>
|
||||
});
|
||||
<span class="hljs-keyword">let</span> mice_and_men_doc = schema.parse_document(&json.to_string())?;
|
||||
|
||||
index_writer.add_document(mice_and_men_doc);</pre></div></div>
|
||||
|
||||
@@ -271,10 +287,15 @@ The following document has two titles.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> frankenstein_doc = <span class="hljs-built_in">try!</span>(schema.parse_document(r#<span class="hljs-string">"{
|
||||
"</span>title<span class="hljs-string">": ["</span>Frankenstein<span class="hljs-string">", "</span>The Modern Promotheus<span class="hljs-string">"],
|
||||
"</span>body<span class="hljs-string">": "</span>You will rejoice to hear that no disaster has accompanied the commencement of an enterprise which you have regarded with such evil forebodings. I arrived here yesterday, and my first task is to assure my dear sister of my welfare and increasing confidence <span class="hljs-keyword">in</span> the success of my undertaking.<span class="hljs-string">"
|
||||
}"</span>#));
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> json = json!({
|
||||
<span class="hljs-string">"title"</span>: [<span class="hljs-string">"Frankenstein"</span>, <span class="hljs-string">"The Modern Prometheus"</span>],
|
||||
<span class="hljs-string">"body"</span>: <span class="hljs-string">"You will rejoice to hear that no disaster has accompanied the commencement of an \
|
||||
enterprise which you have regarded with such evil forebodings. I arrived here \
|
||||
yesterday, and my first task is to assure my dear sister of my welfare and \
|
||||
increasing confidence in the success of my undertaking."</span>
|
||||
});
|
||||
<span class="hljs-keyword">let</span> frankenstein_doc = schema.parse_document(&json.to_string())?;
|
||||
|
||||
index_writer.add_document(frankenstein_doc);</pre></div></div>
|
||||
|
||||
</li>
|
||||
@@ -313,7 +334,7 @@ the existence of new documents.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-built_in">try!</span>(index_writer.commit());</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> index_writer.commit()?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -349,7 +370,7 @@ after every commit().</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-built_in">try!</span>(index.load_searchers());</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> index.load_searchers()?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -384,7 +405,7 @@ in both title and body.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> query_parser = QueryParser::new(index.schema(), <span class="hljs-built_in">vec!</span>[title, body]);</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> <span class="hljs-keyword">mut</span> query_parser = QueryParser::for_index(index, <span class="hljs-built_in">vec!</span>[title, body]);</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -401,7 +422,7 @@ A ticket has been opened regarding this problem.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> query = <span class="hljs-built_in">try!</span>(query_parser.parse_query(<span class="hljs-string">"sea whale"</span>));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-keyword">let</span> query = query_parser.parse_query(<span class="hljs-string">"sea whale"</span>)?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -451,7 +472,7 @@ is the role of the TopCollector.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> <span class="hljs-built_in">try!</span>(searcher.search(&*query, &<span class="hljs-keyword">mut</span> top_collector));</pre></div></div>
|
||||
<div class="content"><div class='highlight'><pre> searcher.search(&*query, &<span class="hljs-keyword">mut</span> top_collector)?;</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
@@ -488,9 +509,27 @@ a title.</p>
|
||||
|
||||
<div class="content"><div class='highlight'><pre>
|
||||
<span class="hljs-keyword">for</span> doc_address <span class="hljs-keyword">in</span> doc_addresses {
|
||||
<span class="hljs-keyword">let</span> retrieved_doc = <span class="hljs-built_in">try!</span>(searcher.doc(&doc_address));
|
||||
<span class="hljs-keyword">let</span> retrieved_doc = searcher.doc(&doc_address)?;
|
||||
<span class="hljs-built_in">println!</span>(<span class="hljs-string">"{}"</span>, schema.to_json(&retrieved_doc));
|
||||
}
|
||||
}</pre></div></div>
|
||||
|
||||
</li>
|
||||
|
||||
|
||||
<li id="section-26">
|
||||
<div class="annotation">
|
||||
|
||||
<div class="pilwrap ">
|
||||
<a class="pilcrow" href="#section-26">¶</a>
|
||||
</div>
|
||||
<p>Wait for indexing and merging threads to shut down.
|
||||
Usually this isn’t needed, but in <code>main</code> we try to
|
||||
delete the temporary directory and that fails on
|
||||
Windows if the files are still open.</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="content"><div class='highlight'><pre> index_writer.wait_merging_threads()?;
|
||||
|
||||
<span class="hljs-literal">Ok</span>(())
|
||||
}</pre></div></div>
|
||||
|
||||
@@ -20,10 +20,7 @@ fn main() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
|
||||
|
||||
// # Defining the schema
|
||||
//
|
||||
// The Tantivy index requires a very strict schema.
|
||||
@@ -31,13 +28,12 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
// and for each field, its type and "the way it should
|
||||
// be indexed".
|
||||
|
||||
|
||||
// first we need to define a schema ...
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
|
||||
// Our first field is title.
|
||||
// We want full-text search for it, and we want to be able
|
||||
// to retrieve the document after the search.
|
||||
// We want full-text search for it, and we also want
|
||||
// to be able to retrieve the document after the search.
|
||||
//
|
||||
// TEXT | STORED is some syntactic sugar to describe
|
||||
// that.
|
||||
@@ -51,15 +47,17 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
// documents that were selected during the search phase.
|
||||
schema_builder.add_text_field("title", TEXT | STORED);
|
||||
|
||||
// Our first field is body.
|
||||
// We want full-text search for it, and we want to be able
|
||||
// to retrieve the body after the search.
|
||||
// Our second field is body.
|
||||
// We want full-text search for it, but we do not
|
||||
// need to be able to be able to retrieve it
|
||||
// for our application.
|
||||
//
|
||||
// We can make our index lighter and
|
||||
// by omitting `STORED` flag.
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
|
||||
|
||||
// # Indexing documents
|
||||
//
|
||||
// Let's create a brand new index.
|
||||
@@ -68,7 +66,6 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
// with our schema in the directory.
|
||||
let index = Index::create(index_path, schema.clone())?;
|
||||
|
||||
|
||||
// To insert document we need an index writer.
|
||||
// There must be only one writer at a time.
|
||||
// This single `IndexWriter` is already
|
||||
@@ -81,7 +78,6 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
|
||||
|
||||
// ### Create a document "manually".
|
||||
//
|
||||
// We can create a document manually, by setting the fields
|
||||
@@ -91,9 +87,11 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
|
||||
let mut old_man_doc = Document::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
old_man_doc.add_text(body,
|
||||
"He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish.");
|
||||
old_man_doc.add_text(
|
||||
body,
|
||||
"He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish.",
|
||||
);
|
||||
|
||||
// ... and add it to the `IndexWriter`.
|
||||
index_writer.add_document(old_man_doc);
|
||||
@@ -139,7 +137,6 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
// Indexing 5 million articles of the English wikipedia takes
|
||||
// around 4 minutes on my computer!
|
||||
|
||||
|
||||
// ### Committing
|
||||
//
|
||||
// At this point our documents are not searchable.
|
||||
@@ -161,7 +158,6 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
// tantivy behaves as if has rolled back to its last
|
||||
// commit.
|
||||
|
||||
|
||||
// # Searching
|
||||
//
|
||||
// Let's search our index. Start by reloading
|
||||
@@ -179,14 +175,13 @@ fn run_example(index_path: &Path) -> tantivy::Result<()> {
|
||||
// Here, if the user does not specify which
|
||||
// field they want to search, tantivy will search
|
||||
// in both title and body.
|
||||
let query_parser = QueryParser::new(index.schema(), vec![title, body]);
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
|
||||
// QueryParser may fail if the query is not in the right
|
||||
// format. For user facing applications, this can be a problem.
|
||||
// A ticket has been opened regarding this problem.
|
||||
let query = query_parser.parse_query("sea whale")?;
|
||||
|
||||
|
||||
// A query defines a set of documents, as
|
||||
// well as the way they should be scored.
|
||||
//
|
||||
|
||||
1
rustfmt.toml
Normal file
1
rustfmt.toml
Normal file
@@ -0,0 +1 @@
|
||||
use_try_shorthand = true
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
DEST=target/doc/tantivy/docs/
|
||||
mkdir -p $DEST
|
||||
|
||||
for f in $(ls docs/*.md)
|
||||
do
|
||||
rustdoc $f -o $DEST --markdown-css ../../rustdoc.css --markdown-css style.css
|
||||
done
|
||||
|
||||
cp docs/*.css $DEST
|
||||
@@ -1,5 +0,0 @@
|
||||
#/bin/bash
|
||||
valgrind --tool=cachegrind target/release/tantivy-bench -i /data/wiki-index -q ./queries.txt -n 3
|
||||
valgrind --tool=callgrind target/release/tantivy-bench -i /data/wiki-index -q ./queries.txt -n 3
|
||||
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
extern crate regex;
|
||||
|
||||
use std::str::Chars;
|
||||
use std::ascii::AsciiExt;
|
||||
|
||||
pub struct TokenIter<'a> {
|
||||
chars: Chars<'a>,
|
||||
term_buffer: String,
|
||||
}
|
||||
|
||||
fn append_char_lowercase(c: char, term_buffer: &mut String) {
|
||||
term_buffer.push(c.to_ascii_lowercase());
|
||||
}
|
||||
|
||||
pub trait StreamingIterator<'a, T> {
|
||||
fn next(&'a mut self) -> Option<T>;
|
||||
}
|
||||
|
||||
impl<'a, 'b> TokenIter<'b> {
|
||||
fn consume_token(&'a mut self) -> Option<&'a str> {
|
||||
for c in &mut self.chars {
|
||||
if c.is_alphanumeric() {
|
||||
append_char_lowercase(c, &mut self.term_buffer);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Some(&self.term_buffer)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a, 'b> StreamingIterator<'a, &'a str> for TokenIter<'b> {
|
||||
#[inline]
|
||||
fn next(&'a mut self) -> Option<&'a str> {
|
||||
self.term_buffer.clear();
|
||||
// skipping non-letter characters.
|
||||
loop {
|
||||
match self.chars.next() {
|
||||
Some(c) => {
|
||||
if c.is_alphanumeric() {
|
||||
append_char_lowercase(c, &mut self.term_buffer);
|
||||
return self.consume_token();
|
||||
}
|
||||
}
|
||||
None => {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SimpleTokenizer;
|
||||
|
||||
|
||||
impl SimpleTokenizer {
|
||||
pub fn tokenize<'a>(&self, text: &'a str) -> TokenIter<'a> {
|
||||
TokenIter {
|
||||
term_buffer: String::new(),
|
||||
chars: text.chars(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_tokenizer() {
|
||||
let simple_tokenizer = SimpleTokenizer;
|
||||
let mut term_reader = simple_tokenizer.tokenize("hello, happy tax payer!");
|
||||
assert_eq!(term_reader.next().unwrap(), "hello");
|
||||
assert_eq!(term_reader.next().unwrap(), "happy");
|
||||
assert_eq!(term_reader.next().unwrap(), "tax");
|
||||
assert_eq!(term_reader.next().unwrap(), "payer");
|
||||
assert_eq!(term_reader.next(), None);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_tokenizer_empty() {
|
||||
let simple_tokenizer = SimpleTokenizer;
|
||||
let mut term_reader = simple_tokenizer.tokenize("");
|
||||
assert_eq!(term_reader.next(), None);
|
||||
}
|
||||
@@ -5,7 +5,6 @@ use SegmentReader;
|
||||
use DocId;
|
||||
use Score;
|
||||
|
||||
|
||||
/// Collector that does nothing.
|
||||
/// This is used in the chain Collector and will hopefully
|
||||
/// be optimized away by the compiler.
|
||||
@@ -17,6 +16,10 @@ impl Collector for DoNothingCollector {
|
||||
}
|
||||
#[inline]
|
||||
fn collect(&mut self, _doc: DocId, _score: Score) {}
|
||||
#[inline]
|
||||
fn requires_scoring(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Zero-cost abstraction used to collect on multiple collectors.
|
||||
@@ -38,12 +41,13 @@ impl<Left: Collector, Right: Collector> ChainedCollector<Left, Right> {
|
||||
}
|
||||
|
||||
impl<Left: Collector, Right: Collector> Collector for ChainedCollector<Left, Right> {
|
||||
fn set_segment(&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader)
|
||||
-> Result<()> {
|
||||
try!(self.left.set_segment(segment_local_id, segment));
|
||||
try!(self.right.set_segment(segment_local_id, segment));
|
||||
fn set_segment(
|
||||
&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> Result<()> {
|
||||
self.left.set_segment(segment_local_id, segment)?;
|
||||
self.right.set_segment(segment_local_id, segment)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -51,6 +55,10 @@ impl<Left: Collector, Right: Collector> Collector for ChainedCollector<Left, Rig
|
||||
self.left.collect(doc, score);
|
||||
self.right.collect(doc, score);
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
self.left.requires_scoring() || self.right.requires_scoring()
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a `ChainedCollector`
|
||||
@@ -61,7 +69,6 @@ pub fn chain() -> ChainedCollector<DoNothingCollector, DoNothingCollector> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ use SegmentLocalId;
|
||||
|
||||
/// `CountCollector` collector only counts how many
|
||||
/// documents match the query.
|
||||
#[derive(Default)]
|
||||
pub struct CountCollector {
|
||||
count: usize,
|
||||
}
|
||||
@@ -19,12 +20,6 @@ impl CountCollector {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CountCollector {
|
||||
fn default() -> CountCollector {
|
||||
CountCollector { count: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
impl Collector for CountCollector {
|
||||
fn set_segment(&mut self, _: SegmentLocalId, _: &SegmentReader) -> Result<()> {
|
||||
Ok(())
|
||||
@@ -33,23 +28,27 @@ impl Collector for CountCollector {
|
||||
fn collect(&mut self, _: DocId, _: Score) {
|
||||
self.count += 1;
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use test::Bencher;
|
||||
use collector::Collector;
|
||||
use collector::{Collector, CountCollector};
|
||||
|
||||
#[bench]
|
||||
fn build_collector(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let mut count_collector = CountCollector::default();
|
||||
for doc in 0..1_000_000 {
|
||||
count_collector.collect(doc, 1f32);
|
||||
}
|
||||
count_collector.count()
|
||||
});
|
||||
#[test]
|
||||
fn test_count_collector() {
|
||||
let mut count_collector = CountCollector::default();
|
||||
assert_eq!(count_collector.count(), 0);
|
||||
count_collector.collect(0u32, 1f32);
|
||||
assert_eq!(count_collector.count(), 1);
|
||||
assert_eq!(count_collector.count(), 1);
|
||||
count_collector.collect(1u32, 1f32);
|
||||
assert_eq!(count_collector.count(), 2);
|
||||
assert!(!count_collector.requires_scoring());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,117 +1,637 @@
|
||||
use std::cmp::Eq;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::Hash;
|
||||
|
||||
use std::mem;
|
||||
use collector::Collector;
|
||||
use fastfield::FastFieldReader;
|
||||
use fastfield::FacetReader;
|
||||
use schema::Field;
|
||||
use std::cell::UnsafeCell;
|
||||
use schema::Facet;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::collections::Bound;
|
||||
use termdict::TermDictionary;
|
||||
use termdict::TermStreamer;
|
||||
use termdict::TermStreamerBuilder;
|
||||
use std::collections::BTreeSet;
|
||||
use termdict::TermMerger;
|
||||
use docset::SkipResult;
|
||||
use std::{usize, u64};
|
||||
use std::iter::Peekable;
|
||||
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentReader;
|
||||
use SegmentLocalId;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
|
||||
/// Facet collector for i64/u64 fast field
|
||||
pub struct FacetCollector<T>
|
||||
where T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash
|
||||
{
|
||||
counters: HashMap<T::ValueType, u64>,
|
||||
field: Field,
|
||||
ff_reader: Option<T>,
|
||||
struct Hit<'a> {
|
||||
count: u64,
|
||||
facet: &'a Facet,
|
||||
}
|
||||
|
||||
impl<'a> Eq for Hit<'a> {}
|
||||
|
||||
impl<T> FacetCollector<T>
|
||||
where T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash
|
||||
{
|
||||
/// Creates a new facet collector for aggregating a given field.
|
||||
pub fn new(field: Field) -> FacetCollector<T> {
|
||||
FacetCollector {
|
||||
counters: HashMap::new(),
|
||||
field: field,
|
||||
ff_reader: None,
|
||||
}
|
||||
impl<'a> PartialEq<Hit<'a>> for Hit<'a> {
|
||||
fn eq(&self, other: &Hit) -> bool {
|
||||
self.count == other.count
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> PartialOrd<Hit<'a>> for Hit<'a> {
|
||||
fn partial_cmp(&self, other: &Hit) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Collector for FacetCollector<T>
|
||||
where T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash
|
||||
{
|
||||
impl<'a> Ord for Hit<'a> {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
other.count.cmp(&self.count)
|
||||
}
|
||||
}
|
||||
|
||||
struct SegmentFacetCounter {
|
||||
pub facet_reader: FacetReader,
|
||||
pub facet_ords: Vec<u64>,
|
||||
pub facet_counts: Vec<u64>,
|
||||
}
|
||||
|
||||
fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
if facet_bytes.is_empty() {
|
||||
0
|
||||
} else {
|
||||
facet_bytes.iter().cloned().filter(|b| *b == 0u8).count() + 1
|
||||
}
|
||||
}
|
||||
|
||||
/// Collector for faceting
|
||||
///
|
||||
/// The collector collects all facets. You need to configure it
|
||||
/// beforehand with the facet you want to extract.
|
||||
///
|
||||
/// This is done by calling `.add_facet(...)` with the root of the
|
||||
/// facet you want to extract as argument.
|
||||
///
|
||||
/// Facet counts will only be computed for the facet that are direct children
|
||||
/// of such a root facet.
|
||||
///
|
||||
/// For instance, if your index represents books, your hierarchy of facets
|
||||
/// may contain `category`, `language`.
|
||||
///
|
||||
/// The category facet may include `subcategories`. For instance, a book
|
||||
/// could belong to `/category/fiction/fantasy`.
|
||||
///
|
||||
/// If you request the facet counts for `/category`, the result will be
|
||||
/// the breakdown of counts for the direct children of `/category`
|
||||
/// (e.g. `/category/fiction`, `/category/biography`, `/category/personal_development`).
|
||||
///
|
||||
/// Once collection is finished, you can harvest its results in the form
|
||||
/// of a `FacetCounts` object, and extract your face t counts from it.
|
||||
///
|
||||
/// This implementation assumes you are working with a number of facets that
|
||||
/// is much hundreds of time lower than your number of documents.
|
||||
///
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{Facet, SchemaBuilder, TEXT};
|
||||
/// use tantivy::{Index, Result};
|
||||
/// use tantivy::collector::FacetCollector;
|
||||
/// use tantivy::query::AllQuery;
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = SchemaBuilder::new();
|
||||
///
|
||||
/// // Facet have their own specific type.
|
||||
/// // It is not a bad practise to put all of your
|
||||
/// // facet information in the same field.
|
||||
/// let facet = schema_builder.add_facet_field("facet");
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// // a document can be associated to any number of facets
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// facet => Facet::from("/lang/en"),
|
||||
/// facet => Facet::from("/category/fiction/fantasy")
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "Dune",
|
||||
/// facet => Facet::from("/lang/en"),
|
||||
/// facet => Facet::from("/category/fiction/sci-fi")
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "La Vénus d'Ille",
|
||||
/// facet => Facet::from("/lang/fr"),
|
||||
/// facet => Facet::from("/category/fiction/fantasy"),
|
||||
/// facet => Facet::from("/category/fiction/horror")
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// facet => Facet::from("/lang/en"),
|
||||
/// facet => Facet::from("/category/biography")
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
///
|
||||
/// index.load_searchers()?;
|
||||
/// let searcher = index.searcher();
|
||||
///
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// facet_collector.add_facet("/lang");
|
||||
/// facet_collector.add_facet("/category");
|
||||
/// searcher.search(&AllQuery, &mut facet_collector).unwrap();
|
||||
///
|
||||
/// // this object contains count aggregate for all of the facets.
|
||||
/// let counts = facet_collector.harvest();
|
||||
///
|
||||
/// // This lists all of the facet counts
|
||||
/// let facets: Vec<(&Facet, u64)> = counts
|
||||
/// .get("/category")
|
||||
/// .collect();
|
||||
/// assert_eq!(facets, vec![
|
||||
/// (&Facet::from("/category/biography"), 1),
|
||||
/// (&Facet::from("/category/fiction"), 3)
|
||||
/// ]);
|
||||
/// }
|
||||
///
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// facet_collector.add_facet("/category/fiction");
|
||||
/// searcher.search(&AllQuery, &mut facet_collector).unwrap();
|
||||
///
|
||||
/// // this object contains count aggregate for all of the facets.
|
||||
/// let counts = facet_collector.harvest();
|
||||
///
|
||||
/// // This lists all of the facet counts
|
||||
/// let facets: Vec<(&Facet, u64)> = counts
|
||||
/// .get("/category/fiction")
|
||||
/// .collect();
|
||||
/// assert_eq!(facets, vec![
|
||||
/// (&Facet::from("/category/fiction/fantasy"), 2),
|
||||
/// (&Facet::from("/category/fiction/horror"), 1),
|
||||
/// (&Facet::from("/category/fiction/sci-fi"), 1)
|
||||
/// ]);
|
||||
/// }
|
||||
///
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// facet_collector.add_facet("/category/fiction");
|
||||
/// searcher.search(&AllQuery, &mut facet_collector).unwrap();
|
||||
///
|
||||
/// // this object contains count aggregate for all of the facets.
|
||||
/// let counts = facet_collector.harvest();
|
||||
///
|
||||
/// // This lists all of the facet counts
|
||||
/// let facets: Vec<(&Facet, u64)> = counts.top_k("/category/fiction", 1);
|
||||
/// assert_eq!(facets, vec![
|
||||
/// (&Facet::from("/category/fiction/fantasy"), 2)
|
||||
/// ]);
|
||||
/// }
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
pub struct FacetCollector {
|
||||
facet_ords: Vec<u64>,
|
||||
field: Field,
|
||||
ff_reader: Option<UnsafeCell<FacetReader>>,
|
||||
segment_counters: Vec<SegmentFacetCounter>,
|
||||
|
||||
// facet_ord -> collapse facet_id
|
||||
current_segment_collapse_mapping: Vec<usize>,
|
||||
// collapse facet_id -> count
|
||||
current_segment_counts: Vec<u64>,
|
||||
// collapse facet_id -> facet_ord
|
||||
current_collapse_facet_ords: Vec<u64>,
|
||||
|
||||
facets: BTreeSet<Facet>,
|
||||
}
|
||||
|
||||
fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
||||
target: &[u8],
|
||||
collapse_it: &mut Peekable<I>,
|
||||
) -> SkipResult {
|
||||
loop {
|
||||
match collapse_it.peek() {
|
||||
Some(facet_bytes) => match facet_bytes.encoded_bytes().cmp(target) {
|
||||
Ordering::Less => {}
|
||||
Ordering::Greater => {
|
||||
return SkipResult::OverStep;
|
||||
}
|
||||
Ordering::Equal => {
|
||||
return SkipResult::Reached;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
return SkipResult::End;
|
||||
}
|
||||
}
|
||||
collapse_it.next();
|
||||
}
|
||||
}
|
||||
|
||||
impl FacetCollector {
|
||||
/// Create a facet collector to collect the facets
|
||||
/// from a specific facet `Field`.
|
||||
///
|
||||
/// This function does not check whether the field
|
||||
/// is of the proper type.
|
||||
pub fn for_field(field: Field) -> FacetCollector {
|
||||
FacetCollector {
|
||||
facet_ords: Vec::with_capacity(255),
|
||||
segment_counters: Vec::new(),
|
||||
field,
|
||||
ff_reader: None,
|
||||
facets: BTreeSet::new(),
|
||||
|
||||
current_segment_collapse_mapping: Vec::new(),
|
||||
current_collapse_facet_ords: Vec::new(),
|
||||
current_segment_counts: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a facet that we want to record counts
|
||||
///
|
||||
/// Adding facet `Facet::from("/country")` for instance,
|
||||
/// will record the counts of all of the direct children of the facet country
|
||||
/// (e.g. `/country/FR`, `/country/UK`).
|
||||
///
|
||||
/// Adding two facets within which one is the prefix of the other is forbidden.
|
||||
/// If you need the correct number of unique documents for two such facets,
|
||||
/// just add them in separate `FacetCollector`.
|
||||
pub fn add_facet<T>(&mut self, facet_from: T)
|
||||
where
|
||||
Facet: From<T>,
|
||||
{
|
||||
let facet = Facet::from(facet_from);
|
||||
for old_facet in &self.facets {
|
||||
assert!(
|
||||
!old_facet.is_prefix_of(&facet),
|
||||
"Tried to add a facet which is a descendant of an already added facet."
|
||||
);
|
||||
assert!(
|
||||
!facet.is_prefix_of(old_facet),
|
||||
"Tried to add a facet which is an ancestor of an already added facet."
|
||||
);
|
||||
}
|
||||
self.facets.insert(facet);
|
||||
}
|
||||
|
||||
fn set_collapse_mapping(&mut self, facet_reader: &FacetReader) {
|
||||
self.current_segment_collapse_mapping.clear();
|
||||
self.current_collapse_facet_ords.clear();
|
||||
self.current_segment_counts.clear();
|
||||
let mut collapse_facet_it = self.facets.iter().peekable();
|
||||
self.current_collapse_facet_ords.push(0);
|
||||
let mut facet_streamer = facet_reader.facet_dict().range().into_stream();
|
||||
if !facet_streamer.advance() {
|
||||
return;
|
||||
}
|
||||
'outer: loop {
|
||||
// at the begining of this loop, facet_streamer
|
||||
// is positionned on a term that has not been processed yet.
|
||||
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
||||
match skip_result {
|
||||
SkipResult::Reached => {
|
||||
// we reach a facet we decided to collapse.
|
||||
let collapse_depth = facet_depth(facet_streamer.key());
|
||||
let mut collapsed_id = 0;
|
||||
self.current_segment_collapse_mapping.push(0);
|
||||
while facet_streamer.advance() {
|
||||
let depth = facet_depth(facet_streamer.key());
|
||||
if depth <= collapse_depth {
|
||||
continue 'outer;
|
||||
}
|
||||
if depth == collapse_depth + 1 {
|
||||
collapsed_id = self.current_collapse_facet_ords.len();
|
||||
self.current_collapse_facet_ords
|
||||
.push(facet_streamer.term_ord());
|
||||
self.current_segment_collapse_mapping.push(collapsed_id);
|
||||
} else {
|
||||
self.current_segment_collapse_mapping.push(collapsed_id);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
SkipResult::End | SkipResult::OverStep => {
|
||||
self.current_segment_collapse_mapping.push(0);
|
||||
if !facet_streamer.advance() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn finalize_segment(&mut self) {
|
||||
if self.ff_reader.is_some() {
|
||||
self.segment_counters.push(SegmentFacetCounter {
|
||||
facet_reader: self.ff_reader.take().unwrap().into_inner(),
|
||||
facet_ords: mem::replace(&mut self.current_collapse_facet_ords, Vec::new()),
|
||||
facet_counts: mem::replace(&mut self.current_segment_counts, Vec::new()),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the results of the collection.
|
||||
///
|
||||
/// This method does not just return the counters,
|
||||
/// it also translates the facet ordinals of the last segment.
|
||||
pub fn harvest(mut self) -> FacetCounts {
|
||||
self.finalize_segment();
|
||||
|
||||
let collapsed_facet_ords: Vec<&[u64]> = self.segment_counters
|
||||
.iter()
|
||||
.map(|segment_counter| &segment_counter.facet_ords[..])
|
||||
.collect();
|
||||
let collapsed_facet_counts: Vec<&[u64]> = self.segment_counters
|
||||
.iter()
|
||||
.map(|segment_counter| &segment_counter.facet_counts[..])
|
||||
.collect();
|
||||
|
||||
let facet_streams = self.segment_counters
|
||||
.iter()
|
||||
.map(|seg_counts| seg_counts.facet_reader.facet_dict().range().into_stream())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut facet_merger = TermMerger::new(facet_streams);
|
||||
let mut facet_counts = BTreeMap::new();
|
||||
|
||||
while facet_merger.advance() {
|
||||
let count = facet_merger
|
||||
.current_kvs()
|
||||
.iter()
|
||||
.map(|it| {
|
||||
let seg_ord = it.segment_ord;
|
||||
let term_ord = it.streamer.term_ord();
|
||||
collapsed_facet_ords[seg_ord]
|
||||
.binary_search(&term_ord)
|
||||
.map(|collapsed_term_id| {
|
||||
if collapsed_term_id == 0 {
|
||||
0
|
||||
} else {
|
||||
collapsed_facet_counts[seg_ord][collapsed_term_id]
|
||||
}
|
||||
})
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.sum();
|
||||
if count > 0u64 {
|
||||
let bytes = facet_merger.key().to_owned();
|
||||
facet_counts.insert(Facet::from_encoded(bytes), count);
|
||||
}
|
||||
}
|
||||
FacetCounts { facet_counts }
|
||||
}
|
||||
}
|
||||
|
||||
impl Collector for FacetCollector {
|
||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
||||
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
|
||||
self.finalize_segment();
|
||||
let facet_reader = reader.facet_reader(self.field)?;
|
||||
self.set_collapse_mapping(&facet_reader);
|
||||
self.current_segment_counts
|
||||
.resize(self.current_collapse_facet_ords.len(), 0);
|
||||
self.ff_reader = Some(UnsafeCell::new(facet_reader));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, doc: DocId, _: Score) {
|
||||
let val = self.ff_reader
|
||||
.as_ref()
|
||||
.expect("collect() was called before set_segment. This should never happen.")
|
||||
.get(doc);
|
||||
*(self.counters.entry(val).or_insert(0)) += 1;
|
||||
let facet_reader: &mut FacetReader = unsafe {
|
||||
&mut *self.ff_reader
|
||||
.as_ref()
|
||||
.expect("collect() was called before set_segment. This should never happen.")
|
||||
.get()
|
||||
};
|
||||
facet_reader.facet_ords(doc, &mut self.facet_ords);
|
||||
let mut previous_collapsed_ord: usize = usize::MAX;
|
||||
for &facet_ord in &self.facet_ords {
|
||||
let collapsed_ord = self.current_segment_collapse_mapping[facet_ord as usize];
|
||||
self.current_segment_counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord
|
||||
{
|
||||
0
|
||||
} else {
|
||||
1
|
||||
};
|
||||
previous_collapsed_ord = collapsed_ord;
|
||||
}
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Intermediary result of the `FacetCollector` that stores
|
||||
/// the facet counts for all the segments.
|
||||
pub struct FacetCounts {
|
||||
facet_counts: BTreeMap<Facet, u64>,
|
||||
}
|
||||
|
||||
impl FacetCounts {
|
||||
#[allow(needless_lifetimes)] //< compiler fails if we remove the lifetime
|
||||
pub fn get<'a, T>(&'a self, facet_from: T) -> impl Iterator<Item = (&'a Facet, u64)>
|
||||
where
|
||||
Facet: From<T>,
|
||||
{
|
||||
let facet = Facet::from(facet_from);
|
||||
let left_bound = Bound::Excluded(facet.clone());
|
||||
let right_bound = if facet.is_root() {
|
||||
Bound::Unbounded
|
||||
} else {
|
||||
let mut facet_after_bytes = facet.encoded_bytes().to_owned();
|
||||
facet_after_bytes.push(1u8);
|
||||
let facet_after = Facet::from_encoded(facet_after_bytes);
|
||||
Bound::Excluded(facet_after)
|
||||
};
|
||||
|
||||
self.facet_counts
|
||||
.range((left_bound, right_bound))
|
||||
.map(|(facet, count)| (facet, *count))
|
||||
}
|
||||
|
||||
pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)>
|
||||
where
|
||||
Facet: From<T>,
|
||||
{
|
||||
let mut heap = BinaryHeap::with_capacity(k);
|
||||
let mut it = self.get(facet);
|
||||
|
||||
for (facet, count) in (&mut it).take(k) {
|
||||
heap.push(Hit { count, facet });
|
||||
}
|
||||
|
||||
let mut lowest_count: u64 = heap.peek().map(|hit| hit.count).unwrap_or(u64::MIN);
|
||||
for (facet, count) in it {
|
||||
if count > lowest_count {
|
||||
lowest_count = count;
|
||||
if let Some(mut head) = heap.peek_mut() {
|
||||
*head = Hit { count, facet };
|
||||
}
|
||||
}
|
||||
}
|
||||
heap.into_sorted_vec()
|
||||
.into_iter()
|
||||
.map(|hit| (hit.facet, hit.count))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use collector::{chain, FacetCollector};
|
||||
use query::QueryParser;
|
||||
use fastfield::{I64FastFieldReader, U64FastFieldReader};
|
||||
use schema::{self, FAST, STRING};
|
||||
use Index;
|
||||
use test::Bencher;
|
||||
use core::Index;
|
||||
use schema::{Document, Facet, SchemaBuilder};
|
||||
use query::AllQuery;
|
||||
use super::{FacetCollector, FacetCounts};
|
||||
use std::iter;
|
||||
use schema::Field;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
#[test]
|
||||
// create 10 documents, set num field value to 0 or 1 for even/odd ones
|
||||
// make sure we have facet counters correctly filled
|
||||
fn test_facet_collector_results() {
|
||||
|
||||
let mut schema_builder = schema::SchemaBuilder::new();
|
||||
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
|
||||
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
|
||||
let text_field = schema_builder.add_text_field("text", STRING);
|
||||
fn test_facet_collector_drilldown() {
|
||||
let mut schema_builder = SchemaBuilder::new();
|
||||
let facet_field = schema_builder.add_facet_field("facet");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
{
|
||||
for i in 0u64..10u64 {
|
||||
index_writer.add_document(doc!(
|
||||
num_field_i64 => ((i as i64) % 3i64) as i64,
|
||||
num_field_u64 => (i % 2u64) as u64,
|
||||
text_field => "text"
|
||||
));
|
||||
}
|
||||
}
|
||||
assert_eq!(index_writer.commit().unwrap(), 10u64);
|
||||
let mut index_writer = index.writer(3_000_000).unwrap();
|
||||
let num_facets: usize = 3 * 4 * 5;
|
||||
let facets: Vec<Facet> = (0..num_facets)
|
||||
.map(|mut n| {
|
||||
let top = n % 3;
|
||||
n /= 3;
|
||||
let mid = n % 4;
|
||||
n /= 4;
|
||||
let leaf = n % 5;
|
||||
Facet::from(&format!("/top{}/mid{}/leaf{}", top, mid, leaf))
|
||||
})
|
||||
.collect();
|
||||
for i in 0..num_facets * 10 {
|
||||
let mut doc = Document::new();
|
||||
doc.add_facet(facet_field, facets[i % num_facets].clone());
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
|
||||
index_writer.commit().unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let mut ffvf_i64: FacetCollector<I64FastFieldReader> = FacetCollector::new(num_field_i64);
|
||||
let mut ffvf_u64: FacetCollector<U64FastFieldReader> = FacetCollector::new(num_field_u64);
|
||||
|
||||
let mut facet_collector = FacetCollector::for_field(facet_field);
|
||||
facet_collector.add_facet(Facet::from("/top1"));
|
||||
searcher.search(&AllQuery, &mut facet_collector).unwrap();
|
||||
|
||||
let counts: FacetCounts = facet_collector.harvest();
|
||||
{
|
||||
// perform the query
|
||||
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64);
|
||||
let query_parser = QueryParser::new(schema, vec![text_field]);
|
||||
let query = query_parser.parse_query("text:text").unwrap();
|
||||
query.search(&searcher, &mut facet_collectors).unwrap();
|
||||
let facets: Vec<(String, u64)> = counts
|
||||
.get("/top1")
|
||||
.map(|(facet, count)| (facet.to_string(), count))
|
||||
.collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
[
|
||||
("/top1/mid0", 50),
|
||||
("/top1/mid1", 50),
|
||||
("/top1/mid2", 50),
|
||||
("/top1/mid3", 50),
|
||||
].iter()
|
||||
.map(|&(facet_str, count)| (String::from(facet_str), count))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(ffvf_u64.counters[&0], 5);
|
||||
assert_eq!(ffvf_u64.counters[&1], 5);
|
||||
assert_eq!(ffvf_i64.counters[&0], 4);
|
||||
assert_eq!(ffvf_i64.counters[&1], 3);
|
||||
#[test]
|
||||
#[should_panic(expected = "Tried to add a facet which is a descendant of \
|
||||
an already added facet.")]
|
||||
fn test_misused_facet_collector() {
|
||||
let mut facet_collector = FacetCollector::for_field(Field(0));
|
||||
facet_collector.add_facet(Facet::from("/country"));
|
||||
facet_collector.add_facet(Facet::from("/country/europe"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_used_facet_collector() {
|
||||
let mut facet_collector = FacetCollector::for_field(Field(0));
|
||||
facet_collector.add_facet(Facet::from("/country"));
|
||||
facet_collector.add_facet(Facet::from("/countryeurope"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_facet_collector_topk() {
|
||||
let mut schema_builder = SchemaBuilder::new();
|
||||
let facet_field = schema_builder.add_facet_field("facet");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut docs: Vec<Document> = vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
|
||||
.into_iter()
|
||||
.flat_map(|(c, count)| {
|
||||
let facet = Facet::from(&format!("/facet_{}", c));
|
||||
let doc = doc!(facet_field => facet);
|
||||
iter::repeat(doc).take(count)
|
||||
})
|
||||
.collect();
|
||||
thread_rng().shuffle(&mut docs[..]);
|
||||
|
||||
let mut index_writer = index.writer(3_000_000).unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
|
||||
let searcher = index.searcher();
|
||||
|
||||
let mut facet_collector = FacetCollector::for_field(facet_field);
|
||||
facet_collector.add_facet("/");
|
||||
searcher.search(&AllQuery, &mut facet_collector).unwrap();
|
||||
|
||||
let counts: FacetCounts = facet_collector.harvest();
|
||||
{
|
||||
let facets: Vec<(&Facet, u64)> = counts.top_k("/", 3);
|
||||
assert_eq!(
|
||||
facets,
|
||||
vec![
|
||||
(&Facet::from("/facet_b"), 100),
|
||||
(&Facet::from("/facet_e"), 21),
|
||||
(&Facet::from("/facet_d"), 12),
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_facet_collector(b: &mut Bencher) {
|
||||
let mut schema_builder = SchemaBuilder::new();
|
||||
let facet_field = schema_builder.add_facet_field("facet");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut docs = vec![];
|
||||
for val in 0..50 {
|
||||
let facet = Facet::from(&format!("/facet_{}", val));
|
||||
for _ in 0..val * val {
|
||||
docs.push(doc!(facet_field=>facet.clone()));
|
||||
}
|
||||
}
|
||||
// 40425 docs
|
||||
thread_rng().shuffle(&mut docs[..]);
|
||||
|
||||
let mut index_writer = index.writer(3_000_000).unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let searcher = index.searcher();
|
||||
let mut facet_collector = FacetCollector::for_field(facet_field);
|
||||
searcher.search(&AllQuery, &mut facet_collector).unwrap();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
123
src/collector/int_facet_collector.rs
Normal file
123
src/collector/int_facet_collector.rs
Normal file
@@ -0,0 +1,123 @@
|
||||
use std::cmp::Eq;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::Hash;
|
||||
|
||||
use collector::Collector;
|
||||
use fastfield::FastFieldReader;
|
||||
use schema::Field;
|
||||
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentReader;
|
||||
use SegmentLocalId;
|
||||
|
||||
|
||||
/// Facet collector for i64/u64 fast field
|
||||
pub struct IntFacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
counters: HashMap<T::ValueType, u64>,
|
||||
field: Field,
|
||||
ff_reader: Option<T>,
|
||||
}
|
||||
|
||||
|
||||
impl<T> IntFacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
/// Creates a new facet collector for aggregating a given field.
|
||||
pub fn new(field: Field) -> IntFacetCollector<T> {
|
||||
IntFacetCollector {
|
||||
counters: HashMap::new(),
|
||||
field: field,
|
||||
ff_reader: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<T> Collector for IntFacetCollector<T>
|
||||
where
|
||||
T: FastFieldReader,
|
||||
T::ValueType: Eq + Hash,
|
||||
{
|
||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
||||
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, doc: DocId, _: Score) {
|
||||
let val = self.ff_reader
|
||||
.as_ref()
|
||||
.expect(
|
||||
"collect() was called before set_segment. \
|
||||
This should never happen.",
|
||||
)
|
||||
.get(doc);
|
||||
*(self.counters.entry(val).or_insert(0)) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use collector::{chain, IntFacetCollector};
|
||||
use query::QueryParser;
|
||||
use fastfield::{I64FastFieldReader, U64FastFieldReader};
|
||||
use schema::{self, FAST, STRING};
|
||||
use Index;
|
||||
|
||||
#[test]
|
||||
// create 10 documents, set num field value to 0 or 1 for even/odd ones
|
||||
// make sure we have facet counters correctly filled
|
||||
fn test_facet_collector_results() {
|
||||
|
||||
let mut schema_builder = schema::SchemaBuilder::new();
|
||||
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
|
||||
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
|
||||
let text_field = schema_builder.add_text_field("text", STRING);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
{
|
||||
for i in 0u64..10u64 {
|
||||
index_writer.add_document(doc!(
|
||||
num_field_i64 => ((i as i64) % 3i64) as i64,
|
||||
num_field_u64 => (i % 2u64) as u64,
|
||||
text_field => "text"
|
||||
));
|
||||
}
|
||||
}
|
||||
assert_eq!(index_writer.commit().unwrap(), 10u64);
|
||||
}
|
||||
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
|
||||
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);
|
||||
|
||||
{
|
||||
// perform the query
|
||||
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64);
|
||||
let mut query_parser = QueryParser::for_index(index, vec![text_field]);
|
||||
let query = query_parser.parse_query("text:text").unwrap();
|
||||
query.search(&searcher, &mut facet_collectors).unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(ffvf_u64.counters[&0], 5);
|
||||
assert_eq!(ffvf_u64.counters[&1], 5);
|
||||
assert_eq!(ffvf_i64.counters[&0], 4);
|
||||
assert_eq!(ffvf_i64.counters[&1], 3);
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,7 @@
|
||||
/*!
|
||||
Defines how the documents matching a search query should be processed.
|
||||
*/
|
||||
|
||||
use SegmentReader;
|
||||
use SegmentLocalId;
|
||||
use DocId;
|
||||
@@ -51,29 +55,36 @@ pub use self::chained_collector::chain;
|
||||
pub trait Collector {
|
||||
/// `set_segment` is called before beginning to enumerate
|
||||
/// on this segment.
|
||||
fn set_segment(&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader)
|
||||
-> Result<()>;
|
||||
fn set_segment(
|
||||
&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> Result<()>;
|
||||
/// The query pushes the scored document to the collector via this method.
|
||||
fn collect(&mut self, doc: DocId, score: Score);
|
||||
|
||||
/// Returns true iff the collector requires to compute scores for documents.
|
||||
fn requires_scoring(&self) -> bool;
|
||||
}
|
||||
|
||||
|
||||
impl<'a, C: Collector> Collector for &'a mut C {
|
||||
fn set_segment(&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader)
|
||||
-> Result<()> {
|
||||
fn set_segment(
|
||||
&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> Result<()> {
|
||||
(*self).set_segment(segment_local_id, segment)
|
||||
}
|
||||
/// The query pushes the scored document to the collector via this method.
|
||||
fn collect(&mut self, doc: DocId, score: Score) {
|
||||
(*self).collect(doc, score);
|
||||
C::collect(self, doc, score)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
C::requires_scoring(self)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
@@ -83,7 +94,6 @@ pub mod tests {
|
||||
use Score;
|
||||
use core::SegmentReader;
|
||||
use SegmentLocalId;
|
||||
use fastfield::U64FastFieldReader;
|
||||
use fastfield::FastFieldReader;
|
||||
use schema::Field;
|
||||
|
||||
@@ -95,6 +105,7 @@ pub mod tests {
|
||||
offset: DocId,
|
||||
segment_max_doc: DocId,
|
||||
docs: Vec<DocId>,
|
||||
scores: Vec<Score>,
|
||||
}
|
||||
|
||||
impl TestCollector {
|
||||
@@ -102,14 +113,19 @@ pub mod tests {
|
||||
pub fn docs(self) -> Vec<DocId> {
|
||||
self.docs
|
||||
}
|
||||
|
||||
pub fn scores(self) -> Vec<Score> {
|
||||
self.scores
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TestCollector {
|
||||
fn default() -> TestCollector {
|
||||
TestCollector {
|
||||
docs: Vec::new(),
|
||||
offset: 0,
|
||||
segment_max_doc: 0,
|
||||
docs: Vec::new(),
|
||||
scores: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -121,14 +137,16 @@ pub mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
fn collect(&mut self, doc: DocId, score: Score) {
|
||||
self.docs.push(doc + self.offset);
|
||||
self.scores.push(score);
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/// Collects in order all of the fast fields for all of the
|
||||
/// doc in the `DocSet`
|
||||
///
|
||||
@@ -136,14 +154,14 @@ pub mod tests {
|
||||
pub struct FastFieldTestCollector {
|
||||
vals: Vec<u64>,
|
||||
field: Field,
|
||||
ff_reader: Option<U64FastFieldReader>,
|
||||
ff_reader: Option<FastFieldReader<u64>>,
|
||||
}
|
||||
|
||||
impl FastFieldTestCollector {
|
||||
pub fn for_field(field: Field) -> FastFieldTestCollector {
|
||||
FastFieldTestCollector {
|
||||
vals: Vec::new(),
|
||||
field: field,
|
||||
field,
|
||||
ff_reader: None,
|
||||
}
|
||||
}
|
||||
@@ -155,7 +173,7 @@ pub mod tests {
|
||||
|
||||
impl Collector for FastFieldTestCollector {
|
||||
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
|
||||
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
|
||||
self.ff_reader = Some(reader.fast_field_reader(self.field)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -163,18 +181,20 @@ pub mod tests {
|
||||
let val = self.ff_reader.as_ref().unwrap().get(doc);
|
||||
self.vals.push(val);
|
||||
}
|
||||
fn requires_scoring(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[bench]
|
||||
fn build_collector(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let mut count_collector = CountCollector::default();
|
||||
let docs: Vec<u32> = (0..1_000_000).collect();
|
||||
for doc in docs {
|
||||
count_collector.collect(doc, 1f32);
|
||||
}
|
||||
count_collector.count()
|
||||
});
|
||||
let mut count_collector = CountCollector::default();
|
||||
let docs: Vec<u32> = (0..1_000_000).collect();
|
||||
for doc in docs {
|
||||
count_collector.collect(doc, 1f32);
|
||||
}
|
||||
count_collector.count()
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ use Result;
|
||||
use SegmentReader;
|
||||
use SegmentLocalId;
|
||||
|
||||
|
||||
/// Multicollector makes it possible to collect on more than one collector.
|
||||
/// It should only be used for use cases where the Collector types is unknown
|
||||
/// at compile time.
|
||||
@@ -17,18 +16,18 @@ pub struct MultiCollector<'a> {
|
||||
impl<'a> MultiCollector<'a> {
|
||||
/// Constructor
|
||||
pub fn from(collectors: Vec<&'a mut Collector>) -> MultiCollector {
|
||||
MultiCollector { collectors: collectors }
|
||||
MultiCollector { collectors }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a> Collector for MultiCollector<'a> {
|
||||
fn set_segment(&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader)
|
||||
-> Result<()> {
|
||||
fn set_segment(
|
||||
&mut self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> Result<()> {
|
||||
for collector in &mut self.collectors {
|
||||
try!(collector.set_segment(segment_local_id, segment));
|
||||
collector.set_segment(segment_local_id, segment)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -38,10 +37,13 @@ impl<'a> Collector for MultiCollector<'a> {
|
||||
collector.collect(doc, score);
|
||||
}
|
||||
}
|
||||
fn requires_scoring(&self) -> bool {
|
||||
self.collectors
|
||||
.iter()
|
||||
.any(|collector| collector.requires_scoring())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -53,8 +55,8 @@ mod tests {
|
||||
let mut top_collector = TopCollector::with_limit(2);
|
||||
let mut count_collector = CountCollector::default();
|
||||
{
|
||||
let mut collectors = MultiCollector::from(vec![&mut top_collector,
|
||||
&mut count_collector]);
|
||||
let mut collectors =
|
||||
MultiCollector::from(vec![&mut top_collector, &mut count_collector]);
|
||||
collectors.collect(1, 0.2);
|
||||
collectors.collect(2, 0.1);
|
||||
collectors.collect(3, 0.5);
|
||||
|
||||
@@ -39,7 +39,6 @@ impl PartialEq for GlobalScoredDoc {
|
||||
|
||||
impl Eq for GlobalScoredDoc {}
|
||||
|
||||
|
||||
/// The Top Collector keeps track of the K documents
|
||||
/// with the best scores.
|
||||
///
|
||||
@@ -61,7 +60,7 @@ impl TopCollector {
|
||||
panic!("Limit must be strictly greater than 0.");
|
||||
}
|
||||
TopCollector {
|
||||
limit: limit,
|
||||
limit,
|
||||
heap: BinaryHeap::with_capacity(limit),
|
||||
segment_id: 0,
|
||||
}
|
||||
@@ -108,10 +107,9 @@ impl Collector for TopCollector {
|
||||
fn collect(&mut self, doc: DocId, score: Score) {
|
||||
if self.at_capacity() {
|
||||
// It's ok to unwrap as long as a limit of 0 is forbidden.
|
||||
let limit_doc: GlobalScoredDoc =
|
||||
*self.heap
|
||||
.peek()
|
||||
.expect("Top collector with size 0 is forbidden");
|
||||
let limit_doc: GlobalScoredDoc = *self.heap
|
||||
.peek()
|
||||
.expect("Top collector with size 0 is forbidden");
|
||||
if limit_doc.score < score {
|
||||
let mut mut_head = self.heap
|
||||
.peek_mut()
|
||||
@@ -121,16 +119,18 @@ impl Collector for TopCollector {
|
||||
}
|
||||
} else {
|
||||
let wrapped_doc = GlobalScoredDoc {
|
||||
score: score,
|
||||
score,
|
||||
doc_address: DocAddress(self.segment_id, doc),
|
||||
};
|
||||
self.heap.push(wrapped_doc);
|
||||
}
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -179,8 +179,6 @@ mod tests {
|
||||
.collect();
|
||||
assert_eq!(docs, vec![7, 1, 5, 3]);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -3,61 +3,37 @@ use std::io;
|
||||
use common::serialize::BinarySerializable;
|
||||
use std::mem;
|
||||
use std::ops::Deref;
|
||||
use std::ptr;
|
||||
|
||||
/// Computes the number of bits that will be used for bitpacking.
|
||||
///
|
||||
/// In general the target is the minimum number of bits
|
||||
/// required to express the amplitude given in argument.
|
||||
///
|
||||
/// e.g. If the amplitude is 10, we can store all ints on simply 4bits.
|
||||
///
|
||||
/// The logic is slightly more convoluted here as for optimization
|
||||
/// reasons, we want to ensure that a value spawns over at most 8 bytes
|
||||
/// of aligns bytes.
|
||||
///
|
||||
/// Spawning over 9 bytes is possible for instance, if we do
|
||||
/// bitpacking with an amplitude of 63 bits.
|
||||
/// In this case, the second int will start on bit
|
||||
/// 63 (which belongs to byte 7) and ends at byte 15;
|
||||
/// Hence 9 bytes (from byte 7 to byte 15 included).
|
||||
///
|
||||
/// To avoid this, we force the number of bits to 64bits
|
||||
/// when the result is greater than `64-8 = 56 bits`.
|
||||
///
|
||||
/// Note that this only affects rare use cases spawning over
|
||||
/// a very large range of values. Even in this case, it results
|
||||
/// in an extra cost of at most 12% compared to the optimal
|
||||
/// number of bits.
|
||||
pub fn compute_num_bits(amplitude: u64) -> u8 {
|
||||
let amplitude = (64u32 - amplitude.leading_zeros()) as u8;
|
||||
if amplitude <= 64 - 8 { amplitude } else { 64 }
|
||||
}
|
||||
|
||||
pub struct BitPacker {
|
||||
pub(crate) struct BitPacker {
|
||||
mini_buffer: u64,
|
||||
mini_buffer_written: usize,
|
||||
num_bits: usize,
|
||||
}
|
||||
|
||||
impl BitPacker {
|
||||
pub fn new(num_bits: usize) -> BitPacker {
|
||||
pub fn new() -> BitPacker {
|
||||
BitPacker {
|
||||
mini_buffer: 0u64,
|
||||
mini_buffer_written: 0,
|
||||
num_bits: num_bits,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<TWrite: Write>(&mut self, val: u64, output: &mut TWrite) -> io::Result<()> {
|
||||
pub fn write<TWrite: Write>(
|
||||
&mut self,
|
||||
val: u64,
|
||||
num_bits: u8,
|
||||
output: &mut TWrite,
|
||||
) -> io::Result<()> {
|
||||
let val_u64 = val as u64;
|
||||
if self.mini_buffer_written + self.num_bits > 64 {
|
||||
let num_bits = num_bits as usize;
|
||||
if self.mini_buffer_written + num_bits > 64 {
|
||||
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
|
||||
self.mini_buffer.serialize(output)?;
|
||||
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
|
||||
self.mini_buffer_written = self.mini_buffer_written + (self.num_bits as usize) - 64;
|
||||
self.mini_buffer_written = self.mini_buffer_written + num_bits - 64;
|
||||
} else {
|
||||
self.mini_buffer |= val_u64 << self.mini_buffer_written;
|
||||
self.mini_buffer_written += self.num_bits;
|
||||
self.mini_buffer_written += num_bits;
|
||||
if self.mini_buffer_written == 64 {
|
||||
self.mini_buffer.serialize(output)?;
|
||||
self.mini_buffer_written = 0;
|
||||
@@ -67,7 +43,7 @@ impl BitPacker {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
pub fn flush<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
if self.mini_buffer_written > 0 {
|
||||
let num_bytes = (self.mini_buffer_written + 7) / 8;
|
||||
let arr: [u8; 8] = unsafe { mem::transmute::<u64, [u8; 8]>(self.mini_buffer) };
|
||||
@@ -85,10 +61,10 @@ impl BitPacker {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BitUnpacker<Data>
|
||||
where Data: Deref<Target = [u8]>
|
||||
where
|
||||
Data: Deref<Target = [u8]>,
|
||||
{
|
||||
num_bits: usize,
|
||||
mask: u64,
|
||||
@@ -96,24 +72,25 @@ pub struct BitUnpacker<Data>
|
||||
}
|
||||
|
||||
impl<Data> BitUnpacker<Data>
|
||||
where Data: Deref<Target = [u8]>
|
||||
where
|
||||
Data: Deref<Target = [u8]>,
|
||||
{
|
||||
pub fn new(data: Data, num_bits: usize) -> BitUnpacker<Data> {
|
||||
pub fn new(data: Data, num_bits: u8) -> BitUnpacker<Data> {
|
||||
let mask: u64 = if num_bits == 64 {
|
||||
!0u64
|
||||
} else {
|
||||
(1u64 << num_bits) - 1u64
|
||||
};
|
||||
BitUnpacker {
|
||||
num_bits: num_bits,
|
||||
mask: mask,
|
||||
data: data,
|
||||
num_bits: num_bits as usize,
|
||||
mask,
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self, idx: usize) -> u64 {
|
||||
if self.num_bits == 0 {
|
||||
return 0;
|
||||
return 0u64;
|
||||
}
|
||||
let data: &[u8] = &*self.data;
|
||||
let num_bits = self.num_bits;
|
||||
@@ -121,17 +98,40 @@ impl<Data> BitUnpacker<Data>
|
||||
let addr_in_bits = idx * num_bits;
|
||||
let addr = addr_in_bits >> 3;
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
debug_assert!(addr + 8 <= data.len(),
|
||||
"The fast field field should have been padded with 7 bytes.");
|
||||
let val_unshifted_unmasked: u64 = unsafe { *(data[addr..].as_ptr() as *const u64) };
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
(val_shifted & mask)
|
||||
if cfg!(feature = "simdcompression") {
|
||||
// for simdcompression,
|
||||
// the bitpacker is only used for fastfields,
|
||||
// and we expect them to be always padded.
|
||||
debug_assert!(
|
||||
addr + 8 <= data.len(),
|
||||
"The fast field field should have been padded with 7 bytes."
|
||||
);
|
||||
let val_unshifted_unmasked: u64 = unsafe { ptr::read_unaligned(data[addr..].as_ptr() as *const u64) };
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
val_shifted & mask
|
||||
} else {
|
||||
let val_unshifted_unmasked: u64 = if addr + 8 <= data.len() {
|
||||
unsafe { ptr::read_unaligned(data[addr..].as_ptr() as *const u64) }
|
||||
} else {
|
||||
let mut buffer = [0u8; 8];
|
||||
for i in addr..data.len() {
|
||||
buffer[i - addr] += data[i];
|
||||
}
|
||||
unsafe { ptr::read_unaligned(buffer[..].as_ptr() as *const u64) }
|
||||
};
|
||||
let val_shifted = val_unshifted_unmasked >> (bit_shift as u64);
|
||||
val_shifted & mask
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads a range of values from the fast field.
|
||||
///
|
||||
/// The range of values read is from
|
||||
/// `[start..start + output.len()[`
|
||||
pub fn get_range(&self, start: u32, output: &mut [u64]) {
|
||||
if self.num_bits == 0 {
|
||||
for val in output.iter_mut() {
|
||||
*val = 0;
|
||||
*val = 0u64;
|
||||
}
|
||||
} else {
|
||||
let data: &[u8] = &*self.data;
|
||||
@@ -141,52 +141,36 @@ impl<Data> BitUnpacker<Data>
|
||||
for output_val in output.iter_mut() {
|
||||
let addr = addr_in_bits >> 3;
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
let val_unshifted_unmasked: u64 = unsafe { *(data[addr..].as_ptr() as *const u64) };
|
||||
let val_unshifted_unmasked: u64 = unsafe { ptr::read_unaligned(data[addr..].as_ptr() as *const u64) };
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
*output_val = val_shifted & mask;
|
||||
addr_in_bits += num_bits;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::{BitPacker, BitUnpacker, compute_num_bits};
|
||||
use super::{BitPacker, BitUnpacker};
|
||||
|
||||
#[test]
|
||||
fn test_compute_num_bits() {
|
||||
assert_eq!(compute_num_bits(1), 1u8);
|
||||
assert_eq!(compute_num_bits(0), 0u8);
|
||||
assert_eq!(compute_num_bits(2), 2u8);
|
||||
assert_eq!(compute_num_bits(3), 2u8);
|
||||
assert_eq!(compute_num_bits(4), 3u8);
|
||||
assert_eq!(compute_num_bits(255), 8u8);
|
||||
assert_eq!(compute_num_bits(256), 9u8);
|
||||
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
|
||||
}
|
||||
|
||||
fn create_fastfield_bitpacker(len: usize, num_bits: usize) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
|
||||
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
|
||||
let mut data = Vec::new();
|
||||
let mut bitpacker = BitPacker::new(num_bits);
|
||||
let max_val: u64 = (1 << num_bits) - 1;
|
||||
let mut bitpacker = BitPacker::new();
|
||||
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
|
||||
let vals: Vec<u64> = (0u64..len as u64)
|
||||
.map(|i| if max_val == 0 { 0 } else { i % max_val })
|
||||
.collect();
|
||||
for &val in &vals {
|
||||
bitpacker.write(val, &mut data).unwrap();
|
||||
bitpacker.write(val, num_bits, &mut data).unwrap();
|
||||
}
|
||||
bitpacker.close(&mut data).unwrap();
|
||||
assert_eq!(data.len(), (num_bits * len + 7) / 8 + 7);
|
||||
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
|
||||
let bitunpacker = BitUnpacker::new(data, num_bits);
|
||||
(bitunpacker, vals)
|
||||
}
|
||||
|
||||
fn test_bitpacker_util(len: usize, num_bits: usize) {
|
||||
fn test_bitpacker_util(len: usize, num_bits: u8) {
|
||||
let (bitunpacker, vals) = create_fastfield_bitpacker(len, num_bits);
|
||||
for (i, val) in vals.iter().enumerate() {
|
||||
assert_eq!(bitunpacker.get(i), *val);
|
||||
|
||||
389
src/common/bitset.rs
Normal file
389
src/common/bitset.rs
Normal file
@@ -0,0 +1,389 @@
|
||||
use std::fmt;
|
||||
use std::u64;
|
||||
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
pub(crate) struct TinySet(u64);
|
||||
|
||||
impl fmt::Debug for TinySet {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.into_iter().collect::<Vec<u32>>().fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TinySetIterator(TinySet);
|
||||
impl Iterator for TinySetIterator {
|
||||
type Item = u32;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.pop_lowest()
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoIterator for TinySet {
|
||||
type Item = u32;
|
||||
type IntoIter = TinySetIterator;
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
TinySetIterator(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl TinySet {
|
||||
/// Returns an empty `TinySet`.
|
||||
pub fn empty() -> TinySet {
|
||||
TinySet(0u64)
|
||||
}
|
||||
|
||||
/// Returns the complement of the set in `[0, 64[`.
|
||||
fn complement(&self) -> TinySet {
|
||||
TinySet(!self.0)
|
||||
}
|
||||
|
||||
/// Returns true iff the `TinySet` contains the element `el`.
|
||||
pub fn contains(&self, el: u32) -> bool {
|
||||
!self.intersect(TinySet::singleton(el)).is_empty()
|
||||
}
|
||||
|
||||
/// Returns the intersection of `self` and `other`
|
||||
pub fn intersect(&self, other: TinySet) -> TinySet {
|
||||
TinySet(self.0 & other.0)
|
||||
}
|
||||
|
||||
/// Creates a new `TinySet` containing only one element
|
||||
/// within `[0; 64[`
|
||||
#[inline(always)]
|
||||
pub fn singleton(el: u32) -> TinySet {
|
||||
TinySet(1u64 << u64::from(el))
|
||||
}
|
||||
|
||||
/// Insert a new element within [0..64[
|
||||
#[inline(always)]
|
||||
pub fn insert(self, el: u32) -> TinySet {
|
||||
self.union(TinySet::singleton(el))
|
||||
}
|
||||
|
||||
/// Insert a new element within [0..64[
|
||||
#[inline(always)]
|
||||
pub fn insert_mut(&mut self, el: u32) -> bool {
|
||||
let old = *self;
|
||||
*self = old.insert(el);
|
||||
old != *self
|
||||
}
|
||||
|
||||
/// Returns the union of two tinysets
|
||||
#[inline(always)]
|
||||
pub fn union(self, other: TinySet) -> TinySet {
|
||||
TinySet(self.0 | other.0)
|
||||
}
|
||||
|
||||
/// Returns true iff the `TinySet` is empty.
|
||||
#[inline(always)]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0 == 0u64
|
||||
}
|
||||
|
||||
/// Returns the lowest element in the `TinySet`
|
||||
/// and removes it.
|
||||
#[inline(always)]
|
||||
pub fn pop_lowest(&mut self) -> Option<u32> {
|
||||
if self.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let lowest = self.0.trailing_zeros() as u32;
|
||||
self.0 ^= TinySet::singleton(lowest).0;
|
||||
Some(lowest)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a `TinySet` than contains all values up
|
||||
/// to limit excluded.
|
||||
///
|
||||
/// The limit is assumed to be strictly lower than 64.
|
||||
pub fn range_lower(upper_bound: u32) -> TinySet {
|
||||
TinySet((1u64 << u64::from(upper_bound % 64u32)) - 1u64)
|
||||
}
|
||||
|
||||
/// Returns a `TinySet` that contains all values greater
|
||||
/// or equal to the given limit, included. (and up to 63)
|
||||
///
|
||||
/// The limit is assumed to be strictly lower than 64.
|
||||
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
|
||||
TinySet::range_lower(from_included).complement()
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.0 = 0u64;
|
||||
}
|
||||
|
||||
pub fn len(&self) -> u32 {
|
||||
self.0.count_ones()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BitSet {
|
||||
tinysets: Box<[TinySet]>,
|
||||
len: usize, //< Technically it should be u32, but we
|
||||
// count multiple inserts.
|
||||
// `usize` guards us from overflow.
|
||||
max_value: u32,
|
||||
}
|
||||
|
||||
fn num_buckets(max_val: u32) -> u32 {
|
||||
(max_val + 63u32) / 64u32
|
||||
}
|
||||
|
||||
impl BitSet {
|
||||
/// Create a new `BitSet` that may contain elements
|
||||
/// within `[0, max_val[`.
|
||||
pub fn with_max_value(max_value: u32) -> BitSet {
|
||||
let num_buckets = num_buckets(max_value);
|
||||
let tinybisets = vec![TinySet::empty(); num_buckets as usize].into_boxed_slice();
|
||||
BitSet {
|
||||
tinysets: tinybisets,
|
||||
len: 0,
|
||||
max_value,
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes all elements from the `BitSet`.
|
||||
pub fn clear(&mut self) {
|
||||
for tinyset in self.tinysets.iter_mut() {
|
||||
*tinyset = TinySet::empty();
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of elements in the `BitSet`.
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
/// Inserts an element in the `BitSet`
|
||||
pub fn insert(&mut self, el: u32) {
|
||||
// we do not check saturated els.
|
||||
let higher = el / 64u32;
|
||||
let lower = el % 64u32;
|
||||
self.len += if self.tinysets[higher as usize].insert_mut(lower) {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns true iff the elements is in the `BitSet`.
|
||||
pub fn contains(&self, el: u32) -> bool {
|
||||
self.tinyset(el / 64u32).contains(el % 64)
|
||||
}
|
||||
|
||||
/// Returns the first non-empty `TinySet` associated to a bucket lower
|
||||
/// or greater than bucket.
|
||||
///
|
||||
/// Reminder: the tiny set with the bucket `bucket`, represents the
|
||||
/// elements from `bucket * 64` to `(bucket+1) * 64`.
|
||||
pub(crate) fn first_non_empty_bucket(&self, bucket: u32) -> Option<u32> {
|
||||
self.tinysets[bucket as usize..]
|
||||
.iter()
|
||||
.cloned()
|
||||
.position(|tinyset| !tinyset.is_empty())
|
||||
.map(|delta_bucket| bucket + delta_bucket as u32)
|
||||
}
|
||||
|
||||
pub fn max_value(&self) -> u32 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
/// Returns the tiny bitset representing the
|
||||
/// the set restricted to the number range from
|
||||
/// `bucket * 64` to `(bucket + 1) * 64`.
|
||||
pub(crate) fn tinyset(&self, bucket: u32) -> TinySet {
|
||||
self.tinysets[bucket as usize]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
extern crate test;
|
||||
use tests;
|
||||
use std::collections::HashSet;
|
||||
use super::BitSet;
|
||||
use super::TinySet;
|
||||
use tests::generate_nonunique_unsorted;
|
||||
use std::collections::BTreeSet;
|
||||
use query::BitSetDocSet;
|
||||
use docset::DocSet;
|
||||
|
||||
#[test]
|
||||
fn test_tiny_set() {
|
||||
assert!(TinySet::empty().is_empty());
|
||||
{
|
||||
let mut u = TinySet::empty().insert(1u32);
|
||||
assert_eq!(u.pop_lowest(), Some(1u32));
|
||||
assert!(u.pop_lowest().is_none())
|
||||
}
|
||||
{
|
||||
let mut u = TinySet::empty().insert(1u32).insert(1u32);
|
||||
assert_eq!(u.pop_lowest(), Some(1u32));
|
||||
assert!(u.pop_lowest().is_none())
|
||||
}
|
||||
{
|
||||
let mut u = TinySet::empty().insert(2u32);
|
||||
assert_eq!(u.pop_lowest(), Some(2u32));
|
||||
u.insert_mut(1u32);
|
||||
assert_eq!(u.pop_lowest(), Some(1u32));
|
||||
assert!(u.pop_lowest().is_none());
|
||||
}
|
||||
{
|
||||
let mut u = TinySet::empty().insert(63u32);
|
||||
assert_eq!(u.pop_lowest(), Some(63u32));
|
||||
assert!(u.pop_lowest().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset() {
|
||||
let test_against_hashset = |els: &[u32], max_value: u32| {
|
||||
let mut hashset: HashSet<u32> = HashSet::new();
|
||||
let mut bitset = BitSet::with_max_value(max_value);
|
||||
for &el in els {
|
||||
assert!(el < max_value);
|
||||
hashset.insert(el);
|
||||
bitset.insert(el);
|
||||
}
|
||||
for el in 0..max_value {
|
||||
assert_eq!(hashset.contains(&el), bitset.contains(el));
|
||||
}
|
||||
assert_eq!(bitset.max_value(), max_value);
|
||||
};
|
||||
|
||||
test_against_hashset(&[], 0);
|
||||
test_against_hashset(&[], 1);
|
||||
test_against_hashset(&[0u32], 1);
|
||||
test_against_hashset(&[0u32], 100);
|
||||
test_against_hashset(&[1u32, 2u32], 4);
|
||||
test_against_hashset(&[99u32], 100);
|
||||
test_against_hashset(&[63u32], 64);
|
||||
test_against_hashset(&[62u32, 63u32], 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset_large() {
|
||||
let arr = generate_nonunique_unsorted(1_000_000, 50_000);
|
||||
let mut btreeset: BTreeSet<u32> = BTreeSet::new();
|
||||
let mut bitset = BitSet::with_max_value(1_000_000);
|
||||
for el in arr {
|
||||
btreeset.insert(el);
|
||||
bitset.insert(el);
|
||||
}
|
||||
for i in 0..1_000_000 {
|
||||
assert_eq!(btreeset.contains(&i), bitset.contains(i));
|
||||
}
|
||||
assert_eq!(btreeset.len(), bitset.len());
|
||||
let mut bitset_docset = BitSetDocSet::from(bitset);
|
||||
for el in btreeset.into_iter() {
|
||||
bitset_docset.advance();
|
||||
assert_eq!(bitset_docset.doc(), el);
|
||||
}
|
||||
assert!(!bitset_docset.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset_num_buckets() {
|
||||
use super::num_buckets;
|
||||
assert_eq!(num_buckets(0u32), 0);
|
||||
assert_eq!(num_buckets(1u32), 1);
|
||||
assert_eq!(num_buckets(64u32), 1);
|
||||
assert_eq!(num_buckets(65u32), 2);
|
||||
assert_eq!(num_buckets(128u32), 2);
|
||||
assert_eq!(num_buckets(129u32), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tinyset_range() {
|
||||
assert_eq!(
|
||||
TinySet::range_lower(3).into_iter().collect::<Vec<u32>>(),
|
||||
[0, 1, 2]
|
||||
);
|
||||
assert!(TinySet::range_lower(0).is_empty());
|
||||
assert_eq!(
|
||||
TinySet::range_lower(63).into_iter().collect::<Vec<u32>>(),
|
||||
(0u32..63u32).collect::<Vec<_>>()
|
||||
);
|
||||
assert_eq!(
|
||||
TinySet::range_lower(1).into_iter().collect::<Vec<u32>>(),
|
||||
[0]
|
||||
);
|
||||
assert_eq!(
|
||||
TinySet::range_lower(2).into_iter().collect::<Vec<u32>>(),
|
||||
[0, 1]
|
||||
);
|
||||
assert_eq!(
|
||||
TinySet::range_greater_or_equal(3)
|
||||
.into_iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
(3u32..64u32).collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset_len() {
|
||||
let mut bitset = BitSet::with_max_value(1_000);
|
||||
assert_eq!(bitset.len(), 0);
|
||||
bitset.insert(3u32);
|
||||
assert_eq!(bitset.len(), 1);
|
||||
bitset.insert(103u32);
|
||||
assert_eq!(bitset.len(), 2);
|
||||
bitset.insert(3u32);
|
||||
assert_eq!(bitset.len(), 2);
|
||||
bitset.insert(103u32);
|
||||
assert_eq!(bitset.len(), 2);
|
||||
bitset.insert(104u32);
|
||||
assert_eq!(bitset.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bitset_clear() {
|
||||
let mut bitset = BitSet::with_max_value(1_000);
|
||||
let els = tests::sample(1_000, 0.01f32);
|
||||
for &el in &els {
|
||||
bitset.insert(el);
|
||||
}
|
||||
assert!(els.iter().all(|el| bitset.contains(*el)));
|
||||
bitset.clear();
|
||||
for el in 0u32..1000u32 {
|
||||
assert!(!bitset.contains(el));
|
||||
}
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyset_pop(b: &mut test::Bencher) {
|
||||
b.iter(|| {
|
||||
let mut tinyset = TinySet::singleton(test::black_box(31u32));
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
tinyset.pop_lowest();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyset_sum(b: &mut test::Bencher) {
|
||||
let tiny_set = TinySet::empty().insert(10u32).insert(14u32).insert(21u32);
|
||||
b.iter(|| {
|
||||
assert_eq!(test::black_box(tiny_set).into_iter().sum::<u32>(), 45u32);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tinyarr_sum(b: &mut test::Bencher) {
|
||||
let v = [10u32, 14u32, 21u32];
|
||||
b.iter(|| test::black_box(v).iter().cloned().sum::<u32>());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_bitset_initialize(b: &mut test::Bencher) {
|
||||
b.iter(|| BitSet::with_max_value(1_000_000));
|
||||
}
|
||||
|
||||
}
|
||||
225
src/common/composite_file.rs
Normal file
225
src/common/composite_file.rs
Normal file
@@ -0,0 +1,225 @@
|
||||
use std::io::Write;
|
||||
use common::CountingWriter;
|
||||
use std::collections::HashMap;
|
||||
use schema::Field;
|
||||
use common::VInt;
|
||||
use directory::WritePtr;
|
||||
use std::io::{self, Read};
|
||||
use directory::ReadOnlySource;
|
||||
use common::BinarySerializable;
|
||||
|
||||
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
|
||||
pub struct FileAddr {
|
||||
field: Field,
|
||||
idx: usize,
|
||||
}
|
||||
|
||||
impl FileAddr {
|
||||
fn new(field: Field, idx: usize) -> FileAddr {
|
||||
FileAddr { field, idx }
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for FileAddr {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.field.serialize(writer)?;
|
||||
VInt(self.idx as u64).serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let field = Field::deserialize(reader)?;
|
||||
let idx = VInt::deserialize(reader)?.0 as usize;
|
||||
Ok(FileAddr {
|
||||
field,
|
||||
idx,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A `CompositeWrite` is used to write a `CompositeFile`.
|
||||
pub struct CompositeWrite<W = WritePtr> {
|
||||
write: CountingWriter<W>,
|
||||
offsets: HashMap<FileAddr, usize>,
|
||||
}
|
||||
|
||||
impl<W: Write> CompositeWrite<W> {
|
||||
/// Crate a new API writer that writes a composite file
|
||||
/// in a given write.
|
||||
pub fn wrap(w: W) -> CompositeWrite<W> {
|
||||
CompositeWrite {
|
||||
write: CountingWriter::wrap(w),
|
||||
offsets: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Start writing a new field.
|
||||
pub fn for_field(&mut self, field: Field) -> &mut CountingWriter<W> {
|
||||
self.for_field_with_idx(field, 0)
|
||||
}
|
||||
|
||||
/// Start writing a new field.
|
||||
pub fn for_field_with_idx(&mut self, field: Field, idx: usize) -> &mut CountingWriter<W> {
|
||||
let offset = self.write.written_bytes();
|
||||
let file_addr = FileAddr::new(field, idx);
|
||||
assert!(!self.offsets.contains_key(&file_addr));
|
||||
self.offsets.insert(file_addr, offset);
|
||||
&mut self.write
|
||||
}
|
||||
|
||||
/// Close the composite file.
|
||||
///
|
||||
/// An index of the different field offsets
|
||||
/// will be written as a footer.
|
||||
pub fn close(mut self) -> io::Result<()> {
|
||||
let footer_offset = self.write.written_bytes();
|
||||
VInt(self.offsets.len() as u64).serialize(&mut self.write)?;
|
||||
|
||||
let mut offset_fields: Vec<_> = self.offsets
|
||||
.iter()
|
||||
.map(|(file_addr, offset)| (*offset, *file_addr))
|
||||
.collect();
|
||||
|
||||
offset_fields.sort();
|
||||
|
||||
let mut prev_offset = 0;
|
||||
for (offset, file_addr) in offset_fields {
|
||||
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
|
||||
file_addr.serialize(&mut self.write)?;
|
||||
prev_offset = offset;
|
||||
}
|
||||
|
||||
let footer_len = (self.write.written_bytes() - footer_offset) as u32;
|
||||
footer_len.serialize(&mut self.write)?;
|
||||
self.write.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A composite file is an abstraction to store a
|
||||
/// file partitioned by field.
|
||||
///
|
||||
/// The file needs to be written field by field.
|
||||
/// A footer describes the start and stop offsets
|
||||
/// for each field.
|
||||
#[derive(Clone)]
|
||||
pub struct CompositeFile {
|
||||
data: ReadOnlySource,
|
||||
offsets_index: HashMap<FileAddr, (usize, usize)>,
|
||||
}
|
||||
|
||||
impl CompositeFile {
|
||||
/// Opens a composite file stored in a given
|
||||
/// `ReadOnlySource`.
|
||||
pub fn open(data: &ReadOnlySource) -> io::Result<CompositeFile> {
|
||||
let end = data.len();
|
||||
let footer_len_data = data.slice_from(end - 4);
|
||||
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
|
||||
|
||||
let footer_start = end - 4 - footer_len;
|
||||
let footer_data = data.slice(footer_start, footer_start + footer_len);
|
||||
let mut footer_buffer = footer_data.as_slice();
|
||||
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
||||
|
||||
let mut file_addrs = vec![];
|
||||
let mut offsets = vec![];
|
||||
|
||||
let mut field_index = HashMap::new();
|
||||
|
||||
let mut offset = 0;
|
||||
for _ in 0..num_fields {
|
||||
offset += VInt::deserialize(&mut footer_buffer)?.0 as usize;
|
||||
let file_addr = FileAddr::deserialize(&mut footer_buffer)?;
|
||||
offsets.push(offset);
|
||||
file_addrs.push(file_addr);
|
||||
}
|
||||
offsets.push(footer_start);
|
||||
for i in 0..num_fields {
|
||||
let file_addr = file_addrs[i];
|
||||
let start_offset = offsets[i];
|
||||
let end_offset = offsets[i + 1];
|
||||
field_index.insert(file_addr, (start_offset, end_offset));
|
||||
}
|
||||
|
||||
Ok(CompositeFile {
|
||||
data: data.slice_to(footer_start),
|
||||
offsets_index: field_index,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a composite file that stores
|
||||
/// no fields.
|
||||
pub fn empty() -> CompositeFile {
|
||||
CompositeFile {
|
||||
offsets_index: HashMap::new(),
|
||||
data: ReadOnlySource::empty(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `ReadOnlySource` associated
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read(&self, field: Field) -> Option<ReadOnlySource> {
|
||||
self.open_read_with_idx(field, 0)
|
||||
}
|
||||
|
||||
/// Returns the `ReadOnlySource` associated
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
|
||||
self.offsets_index
|
||||
.get(&FileAddr { field, idx, })
|
||||
.map(|&(from, to)| self.data.slice(from, to))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use std::io::Write;
|
||||
use super::{CompositeFile, CompositeWrite};
|
||||
use directory::{Directory, RAMDirectory};
|
||||
use schema::Field;
|
||||
use common::VInt;
|
||||
use common::BinarySerializable;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_composite_file() {
|
||||
let path = Path::new("test_path");
|
||||
let mut directory = RAMDirectory::create();
|
||||
{
|
||||
let w = directory.open_write(path).unwrap();
|
||||
let mut composite_write = CompositeWrite::wrap(w);
|
||||
{
|
||||
let mut write_0 = composite_write.for_field(Field(0u32));
|
||||
VInt(32431123u64).serialize(&mut write_0).unwrap();
|
||||
write_0.flush().unwrap();
|
||||
}
|
||||
|
||||
{
|
||||
let mut write_4 = composite_write.for_field(Field(4u32));
|
||||
VInt(2).serialize(&mut write_4).unwrap();
|
||||
write_4.flush().unwrap();
|
||||
}
|
||||
composite_write.close().unwrap();
|
||||
}
|
||||
{
|
||||
let r = directory.open_read(path).unwrap();
|
||||
let composite_file = CompositeFile::open(&r).unwrap();
|
||||
{
|
||||
let file0 = composite_file.open_read(Field(0u32)).unwrap();
|
||||
let mut file0_buf = file0.as_slice();
|
||||
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
|
||||
assert_eq!(file0_buf.len(), 0);
|
||||
assert_eq!(payload_0, 32431123u64);
|
||||
}
|
||||
{
|
||||
let file4 = composite_file.open_read(Field(4u32)).unwrap();
|
||||
let mut file4_buf = file4.as_slice();
|
||||
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
|
||||
assert_eq!(file4_buf.len(), 0);
|
||||
assert_eq!(payload_4, 2u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,8 +1,7 @@
|
||||
use std::io::Write;
|
||||
use std::io;
|
||||
|
||||
|
||||
pub struct CountingWriter<W: Write> {
|
||||
pub struct CountingWriter<W> {
|
||||
underlying: W,
|
||||
written_bytes: usize,
|
||||
}
|
||||
@@ -10,7 +9,7 @@ pub struct CountingWriter<W: Write> {
|
||||
impl<W: Write> CountingWriter<W> {
|
||||
pub fn wrap(underlying: W) -> CountingWriter<W> {
|
||||
CountingWriter {
|
||||
underlying: underlying,
|
||||
underlying,
|
||||
written_bytes: 0,
|
||||
}
|
||||
}
|
||||
@@ -37,8 +36,6 @@ impl<W: Write> Write for CountingWriter<W> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
|
||||
@@ -1,23 +1,61 @@
|
||||
mod serialize;
|
||||
mod timer;
|
||||
mod serialize;
|
||||
mod vint;
|
||||
mod counting_writer;
|
||||
mod composite_file;
|
||||
pub mod bitpacker;
|
||||
mod bitset;
|
||||
|
||||
pub use self::serialize::BinarySerializable;
|
||||
pub use self::timer::Timing;
|
||||
pub use self::timer::TimerTree;
|
||||
pub use self::timer::OpenTimer;
|
||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||
pub use self::serialize::{BinarySerializable, FixedSize};
|
||||
pub use self::vint::VInt;
|
||||
pub use self::counting_writer::CountingWriter;
|
||||
pub use self::bitset::BitSet;
|
||||
pub(crate) use self::bitset::TinySet;
|
||||
pub use byteorder::LittleEndian as Endianness;
|
||||
|
||||
use std::io;
|
||||
|
||||
/// Create a default io error given a string.
|
||||
pub fn make_io_err(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
/// Computes the number of bits that will be used for bitpacking.
|
||||
///
|
||||
/// In general the target is the minimum number of bits
|
||||
/// required to express the amplitude given in argument.
|
||||
///
|
||||
/// e.g. If the amplitude is 10, we can store all ints on simply 4bits.
|
||||
///
|
||||
/// The logic is slightly more convoluted here as for optimization
|
||||
/// reasons, we want to ensure that a value spawns over at most 8 bytes
|
||||
/// of aligns bytes.
|
||||
///
|
||||
/// Spanning over 9 bytes is possible for instance, if we do
|
||||
/// bitpacking with an amplitude of 63 bits.
|
||||
/// In this case, the second int will start on bit
|
||||
/// 63 (which belongs to byte 7) and ends at byte 15;
|
||||
/// Hence 9 bytes (from byte 7 to byte 15 included).
|
||||
///
|
||||
/// To avoid this, we force the number of bits to 64bits
|
||||
/// when the result is greater than `64-8 = 56 bits`.
|
||||
///
|
||||
/// Note that this only affects rare use cases spawning over
|
||||
/// a very large range of values. Even in this case, it results
|
||||
/// in an extra cost of at most 12% compared to the optimal
|
||||
/// number of bits.
|
||||
pub(crate) fn compute_num_bits(n: u64) -> u8 {
|
||||
let amplitude = (64u32 - n.leading_zeros()) as u8;
|
||||
if amplitude <= 64 - 8 {
|
||||
amplitude
|
||||
} else {
|
||||
64
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_power_of_2(n: usize) -> bool {
|
||||
(n > 0) && (n & (n - 1) == 0)
|
||||
}
|
||||
|
||||
/// Create a default io error given a string.
|
||||
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
}
|
||||
|
||||
/// Has length trait
|
||||
pub trait HasLen {
|
||||
@@ -32,7 +70,6 @@ pub trait HasLen {
|
||||
|
||||
const HIGHEST_BIT: u64 = 1 << 63;
|
||||
|
||||
|
||||
/// Maps a `i64` to `u64`
|
||||
///
|
||||
/// For simplicity, tantivy internally handles `i64` as `u64`.
|
||||
@@ -64,11 +101,11 @@ pub fn u64_to_i64(val: u64) -> i64 {
|
||||
(val ^ HIGHEST_BIT) as i64
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
pub(crate) mod test {
|
||||
|
||||
use super::{i64_to_u64, u64_to_i64};
|
||||
use super::{compute_num_bits, i64_to_u64, u64_to_i64};
|
||||
pub use super::serialize::test::fixed_size_test;
|
||||
|
||||
fn test_i64_converter_helper(val: i64) {
|
||||
assert_eq!(u64_to_i64(i64_to_u64(val)), val);
|
||||
@@ -85,4 +122,16 @@ mod test {
|
||||
test_i64_converter_helper(i);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_num_bits() {
|
||||
assert_eq!(compute_num_bits(1), 1u8);
|
||||
assert_eq!(compute_num_bits(0), 0u8);
|
||||
assert_eq!(compute_num_bits(2), 2u8);
|
||||
assert_eq!(compute_num_bits(3), 2u8);
|
||||
assert_eq!(compute_num_bits(4), 3u8);
|
||||
assert_eq!(compute_num_bits(255), 8u8);
|
||||
assert_eq!(compute_num_bits(256), 9u8);
|
||||
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,25 @@
|
||||
use byteorder::{ReadBytesExt, WriteBytesExt};
|
||||
use byteorder::LittleEndian as Endianness;
|
||||
use common::Endianness;
|
||||
use std::fmt;
|
||||
use std::io::Write;
|
||||
use std::io::Read;
|
||||
use std::io;
|
||||
use common::VInt;
|
||||
|
||||
|
||||
|
||||
/// Trait for a simple binary serialization.
|
||||
pub trait BinarySerializable: fmt::Debug + Sized {
|
||||
/// Serialize
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>;
|
||||
/// Deserialize
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self>;
|
||||
}
|
||||
|
||||
/// `FixedSize` marks a `BinarySerializable` as
|
||||
/// always serializing to the same size.
|
||||
pub trait FixedSize: BinarySerializable {
|
||||
const SIZE_IN_BYTES: usize;
|
||||
}
|
||||
|
||||
impl BinarySerializable for () {
|
||||
fn serialize<W: Write>(&self, _: &mut W) -> io::Result<()> {
|
||||
Ok(())
|
||||
@@ -22,6 +29,10 @@ impl BinarySerializable for () {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for () {
|
||||
const SIZE_IN_BYTES: usize = 0;
|
||||
}
|
||||
|
||||
impl<T: BinarySerializable> BinarySerializable for Vec<T> {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.len() as u64).serialize(writer)?;
|
||||
@@ -41,7 +52,6 @@ impl<T: BinarySerializable> BinarySerializable for Vec<T> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<Left: BinarySerializable, Right: BinarySerializable> BinarySerializable for (Left, Right) {
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
self.0.serialize(write)?;
|
||||
@@ -62,6 +72,9 @@ impl BinarySerializable for u32 {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for u32 {
|
||||
const SIZE_IN_BYTES: usize = 4;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
@@ -72,6 +85,10 @@ impl BinarySerializable for u64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for u64 {
|
||||
const SIZE_IN_BYTES: usize = 8;
|
||||
}
|
||||
|
||||
impl BinarySerializable for i64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_i64::<Endianness>(*self)
|
||||
@@ -81,6 +98,9 @@ impl BinarySerializable for i64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for i64 {
|
||||
const SIZE_IN_BYTES: usize = 8;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u8 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
@@ -91,6 +111,10 @@ impl BinarySerializable for u8 {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for u8 {
|
||||
const SIZE_IN_BYTES: usize = 1;
|
||||
}
|
||||
|
||||
impl BinarySerializable for String {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let data: &[u8] = self.as_bytes();
|
||||
@@ -108,65 +132,79 @@ impl BinarySerializable for String {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
pub mod test {
|
||||
|
||||
use common::VInt;
|
||||
use super::*;
|
||||
|
||||
fn serialize_test<T: BinarySerializable + Eq>(v: T, num_bytes: usize) {
|
||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||
let mut buffer = Vec::new();
|
||||
O::default().serialize(&mut buffer).unwrap();
|
||||
assert_eq!(buffer.len(), O::SIZE_IN_BYTES);
|
||||
}
|
||||
|
||||
fn serialize_test<T: BinarySerializable + Eq>(v: T) -> usize {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
if num_bytes != 0 {
|
||||
v.serialize(&mut buffer).unwrap();
|
||||
assert_eq!(buffer.len(), num_bytes);
|
||||
} else {
|
||||
v.serialize(&mut buffer).unwrap();
|
||||
}
|
||||
v.serialize(&mut buffer).unwrap();
|
||||
let num_bytes = buffer.len();
|
||||
let mut cursor = &buffer[..];
|
||||
let deser = T::deserialize(&mut cursor).unwrap();
|
||||
assert_eq!(deser, v);
|
||||
num_bytes
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_u8() {
|
||||
serialize_test(3u8, 1);
|
||||
serialize_test(5u8, 1);
|
||||
fixed_size_test::<u8>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_u32() {
|
||||
serialize_test(3u32, 4);
|
||||
serialize_test(5u32, 4);
|
||||
serialize_test(u32::max_value(), 4);
|
||||
fixed_size_test::<u32>();
|
||||
assert_eq!(4, serialize_test(3u32));
|
||||
assert_eq!(4, serialize_test(5u32));
|
||||
assert_eq!(4, serialize_test(u32::max_value()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_i64() {
|
||||
fixed_size_test::<i64>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_u64() {
|
||||
fixed_size_test::<u64>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_string() {
|
||||
serialize_test(String::from(""), 1);
|
||||
serialize_test(String::from("ぽよぽよ"), 1 + 3 * 4);
|
||||
serialize_test(String::from("富士さん見える。"), 1 + 3 * 8);
|
||||
assert_eq!(serialize_test(String::from("")), 1);
|
||||
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
|
||||
assert_eq!(
|
||||
serialize_test(String::from("富士さん見える。")),
|
||||
1 + 3 * 8
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_vec() {
|
||||
let v: Vec<u8> = Vec::new();
|
||||
serialize_test(v, 1);
|
||||
serialize_test(vec![1u32, 3u32], 1 + 4 * 2);
|
||||
assert_eq!(serialize_test(Vec::<u8>::new()), 1);
|
||||
assert_eq!(serialize_test(vec![1u32, 3u32]), 1 + 4 * 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_vint() {
|
||||
for i in 0..10_000 {
|
||||
serialize_test(VInt(i as u64), 0);
|
||||
serialize_test(VInt(i as u64));
|
||||
}
|
||||
serialize_test(VInt(7u64), 1);
|
||||
serialize_test(VInt(127u64), 1);
|
||||
serialize_test(VInt(128u64), 2);
|
||||
serialize_test(VInt(129u64), 2);
|
||||
serialize_test(VInt(1234u64), 2);
|
||||
serialize_test(VInt(16_383), 2);
|
||||
serialize_test(VInt(16_384), 3);
|
||||
serialize_test(VInt(u64::max_value()), 10);
|
||||
assert_eq!(serialize_test(VInt(7u64)), 1);
|
||||
assert_eq!(serialize_test(VInt(127u64)), 1);
|
||||
assert_eq!(serialize_test(VInt(128u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(129u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(1234u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_383u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_384u64)), 3);
|
||||
assert_eq!(serialize_test(VInt(u64::max_value())), 10);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
use time::PreciseTime;
|
||||
|
||||
pub struct OpenTimer<'a> {
|
||||
name: &'static str,
|
||||
timer_tree: &'a mut TimerTree,
|
||||
start: PreciseTime,
|
||||
depth: u32,
|
||||
}
|
||||
|
||||
impl<'a> OpenTimer<'a> {
|
||||
/// Starts timing a new named subtask
|
||||
///
|
||||
/// The timer is stopped automatically
|
||||
/// when the `OpenTimer` is dropped.
|
||||
pub fn open(&mut self, name: &'static str) -> OpenTimer {
|
||||
OpenTimer {
|
||||
name: name,
|
||||
timer_tree: self.timer_tree,
|
||||
start: PreciseTime::now(),
|
||||
depth: self.depth + 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for OpenTimer<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.timer_tree
|
||||
.timings
|
||||
.push(Timing {
|
||||
name: self.name,
|
||||
duration: self.start
|
||||
.to(PreciseTime::now())
|
||||
.num_microseconds()
|
||||
.unwrap(),
|
||||
depth: self.depth,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Timing recording
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct Timing {
|
||||
name: &'static str,
|
||||
duration: i64,
|
||||
depth: u32,
|
||||
}
|
||||
|
||||
/// Timer tree
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct TimerTree {
|
||||
timings: Vec<Timing>,
|
||||
}
|
||||
|
||||
impl TimerTree {
|
||||
/// Returns the total time elapsed in microseconds
|
||||
pub fn total_time(&self) -> i64 {
|
||||
self.timings.last().unwrap().duration
|
||||
}
|
||||
|
||||
/// Open a new named subtask
|
||||
pub fn open(&mut self, name: &'static str) -> OpenTimer {
|
||||
OpenTimer {
|
||||
name: name,
|
||||
timer_tree: self,
|
||||
start: PreciseTime::now(),
|
||||
depth: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TimerTree {
|
||||
fn default() -> TimerTree {
|
||||
TimerTree { timings: Vec::new() }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_timer() {
|
||||
let mut timer_tree = TimerTree::default();
|
||||
{
|
||||
let mut a = timer_tree.open("a");
|
||||
{
|
||||
let mut ab = a.open("b");
|
||||
{
|
||||
let _abc = ab.open("c");
|
||||
}
|
||||
{
|
||||
let _abd = ab.open("d");
|
||||
}
|
||||
}
|
||||
}
|
||||
assert_eq!(timer_tree.timings.len(), 4);
|
||||
}
|
||||
}
|
||||
@@ -3,8 +3,6 @@ use std::io;
|
||||
use std::io::Write;
|
||||
use std::io::Read;
|
||||
|
||||
|
||||
|
||||
/// Wrapper over a `u64` that serializes as a variable int.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub struct VInt(pub u64);
|
||||
@@ -13,6 +11,10 @@ impl VInt {
|
||||
pub fn val(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn deserialize_u64<R: Read>(reader: &mut R) -> io::Result<u64> {
|
||||
VInt::deserialize(reader).map(|vint| vint.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for VInt {
|
||||
@@ -31,7 +33,6 @@ impl BinarySerializable for VInt {
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
@@ -41,13 +42,18 @@ impl BinarySerializable for VInt {
|
||||
loop {
|
||||
match bytes.next() {
|
||||
Some(Ok(b)) => {
|
||||
result += ((b % 128u8) as u64) << shift;
|
||||
result += u64::from(b % 128u8) << shift;
|
||||
if b & 128u8 != 0u8 {
|
||||
break;
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
_ => return Err(io::Error::new(io::ErrorKind::InvalidData, "Reach end of buffer")),
|
||||
_ => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Reach end of buffer",
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(VInt(result))
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
use super::{BlockEncoder, BlockDecoder};
|
||||
use super::NUM_DOCS_PER_BLOCK;
|
||||
use compression::{VIntEncoder, VIntDecoder};
|
||||
|
||||
pub struct CompositeEncoder {
|
||||
block_encoder: BlockEncoder,
|
||||
output: Vec<u8>,
|
||||
}
|
||||
|
||||
impl CompositeEncoder {
|
||||
pub fn new() -> CompositeEncoder {
|
||||
CompositeEncoder {
|
||||
block_encoder: BlockEncoder::new(),
|
||||
output: Vec::with_capacity(500_000),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compress_sorted(&mut self, vals: &[u32]) -> &[u8] {
|
||||
self.output.clear();
|
||||
let num_blocks = vals.len() / NUM_DOCS_PER_BLOCK;
|
||||
let mut offset = 0u32;
|
||||
for i in 0..num_blocks {
|
||||
let vals_slice = &vals[i * NUM_DOCS_PER_BLOCK..(i + 1) * NUM_DOCS_PER_BLOCK];
|
||||
let block_compressed = self.block_encoder.compress_block_sorted(vals_slice, offset);
|
||||
offset = vals_slice[NUM_DOCS_PER_BLOCK - 1];
|
||||
self.output.extend_from_slice(block_compressed);
|
||||
}
|
||||
let vint_compressed =
|
||||
self.block_encoder
|
||||
.compress_vint_sorted(&vals[num_blocks * NUM_DOCS_PER_BLOCK..], offset);
|
||||
self.output.extend_from_slice(vint_compressed);
|
||||
&self.output
|
||||
}
|
||||
|
||||
pub fn compress_unsorted(&mut self, vals: &[u32]) -> &[u8] {
|
||||
self.output.clear();
|
||||
let num_blocks = vals.len() / NUM_DOCS_PER_BLOCK;
|
||||
for i in 0..num_blocks {
|
||||
let vals_slice = &vals[i * NUM_DOCS_PER_BLOCK..(i + 1) * NUM_DOCS_PER_BLOCK];
|
||||
let block_compressed = self.block_encoder.compress_block_unsorted(vals_slice);
|
||||
self.output.extend_from_slice(block_compressed);
|
||||
}
|
||||
let vint_compressed = self.block_encoder
|
||||
.compress_vint_unsorted(&vals[num_blocks * NUM_DOCS_PER_BLOCK..]);
|
||||
self.output.extend_from_slice(vint_compressed);
|
||||
&self.output
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct CompositeDecoder {
|
||||
block_decoder: BlockDecoder,
|
||||
vals: Vec<u32>,
|
||||
}
|
||||
|
||||
|
||||
impl CompositeDecoder {
|
||||
pub fn new() -> CompositeDecoder {
|
||||
CompositeDecoder {
|
||||
block_decoder: BlockDecoder::new(),
|
||||
vals: Vec::with_capacity(500_000),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uncompress_sorted(&mut self,
|
||||
mut compressed_data: &[u8],
|
||||
uncompressed_len: usize)
|
||||
-> &[u32] {
|
||||
if uncompressed_len > self.vals.capacity() {
|
||||
let extra_capacity = uncompressed_len - self.vals.capacity();
|
||||
self.vals.reserve(extra_capacity);
|
||||
}
|
||||
let mut offset = 0u32;
|
||||
self.vals.clear();
|
||||
let num_blocks = uncompressed_len / NUM_DOCS_PER_BLOCK;
|
||||
for _ in 0..num_blocks {
|
||||
compressed_data = self.block_decoder
|
||||
.uncompress_block_sorted(compressed_data, offset);
|
||||
offset = self.block_decoder.output(NUM_DOCS_PER_BLOCK - 1);
|
||||
self.vals
|
||||
.extend_from_slice(self.block_decoder.output_array());
|
||||
}
|
||||
self.block_decoder
|
||||
.uncompress_vint_sorted(compressed_data,
|
||||
offset,
|
||||
uncompressed_len % NUM_DOCS_PER_BLOCK);
|
||||
self.vals
|
||||
.extend_from_slice(self.block_decoder.output_array());
|
||||
&self.vals
|
||||
}
|
||||
|
||||
pub fn uncompress_unsorted(&mut self,
|
||||
mut compressed_data: &[u8],
|
||||
uncompressed_len: usize)
|
||||
-> &[u32] {
|
||||
self.vals.clear();
|
||||
let num_blocks = uncompressed_len / NUM_DOCS_PER_BLOCK;
|
||||
for _ in 0..num_blocks {
|
||||
compressed_data = self.block_decoder
|
||||
.uncompress_block_unsorted(compressed_data);
|
||||
self.vals
|
||||
.extend_from_slice(self.block_decoder.output_array());
|
||||
}
|
||||
self.block_decoder
|
||||
.uncompress_vint_unsorted(compressed_data, uncompressed_len % NUM_DOCS_PER_BLOCK);
|
||||
self.vals
|
||||
.extend_from_slice(self.block_decoder.output_array());
|
||||
&self.vals
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Vec<u32>> for CompositeDecoder {
|
||||
fn into(self) -> Vec<u32> {
|
||||
self.vals
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use test::Bencher;
|
||||
use super::*;
|
||||
use tests;
|
||||
|
||||
#[test]
|
||||
fn test_composite_unsorted() {
|
||||
let data = tests::generate_array(10_000, 0.1);
|
||||
let mut encoder = CompositeEncoder::new();
|
||||
let compressed = encoder.compress_unsorted(&data);
|
||||
assert!(compressed.len() <= 19_794);
|
||||
let mut decoder = CompositeDecoder::new();
|
||||
let result = decoder.uncompress_unsorted(&compressed, data.len());
|
||||
for i in 0..data.len() {
|
||||
assert_eq!(data[i], result[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_composite_sorted() {
|
||||
let data = tests::generate_array(10_000, 0.1);
|
||||
let mut encoder = CompositeEncoder::new();
|
||||
let compressed = encoder.compress_sorted(&data);
|
||||
assert!(compressed.len() <= 7_826);
|
||||
let mut decoder = CompositeDecoder::new();
|
||||
let result = decoder.uncompress_sorted(&compressed, data.len());
|
||||
for i in 0..data.len() {
|
||||
assert_eq!(data[i], result[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const BENCH_NUM_INTS: usize = 99_968;
|
||||
|
||||
#[bench]
|
||||
fn bench_compress(b: &mut Bencher) {
|
||||
let mut encoder = CompositeEncoder::new();
|
||||
let data = tests::generate_array(BENCH_NUM_INTS, 0.1);
|
||||
b.iter(|| { encoder.compress_sorted(&data); });
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_uncompress(b: &mut Bencher) {
|
||||
let mut encoder = CompositeEncoder::new();
|
||||
let data = tests::generate_array(BENCH_NUM_INTS, 0.1);
|
||||
let compressed = encoder.compress_sorted(&data);
|
||||
let mut decoder = CompositeDecoder::new();
|
||||
b.iter(|| { decoder.uncompress_sorted(compressed, BENCH_NUM_INTS); });
|
||||
}
|
||||
}
|
||||
@@ -1,52 +1,146 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
|
||||
mod composite;
|
||||
pub use self::composite::{CompositeEncoder, CompositeDecoder};
|
||||
mod stream;
|
||||
|
||||
pub const COMPRESSION_BLOCK_SIZE: usize = 128;
|
||||
const COMPRESSED_BLOCK_MAX_SIZE: usize = COMPRESSION_BLOCK_SIZE * 4 + 1;
|
||||
|
||||
pub use self::stream::CompressedIntStream;
|
||||
|
||||
|
||||
#[cfg(not(feature="simdcompression"))]
|
||||
mod pack {
|
||||
mod compression_pack_nosimd;
|
||||
pub use self::compression_pack_nosimd::*;
|
||||
use bitpacking::{BitPacker, BitPacker4x};
|
||||
|
||||
|
||||
/// Returns the size in bytes of a compressed block, given `num_bits`.
|
||||
pub fn compressed_block_size(num_bits: u8) -> usize {
|
||||
1 + (num_bits as usize) * COMPRESSION_BLOCK_SIZE / 8
|
||||
}
|
||||
|
||||
#[cfg(feature="simdcompression")]
|
||||
mod pack {
|
||||
mod compression_pack_simd;
|
||||
pub use self::compression_pack_simd::*;
|
||||
pub struct BlockEncoder {
|
||||
bitpacker: BitPacker4x,
|
||||
pub output: [u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
pub use self::pack::{BlockEncoder, BlockDecoder};
|
||||
impl BlockEncoder {
|
||||
pub fn new() -> BlockEncoder {
|
||||
BlockEncoder {
|
||||
bitpacker: BitPacker4x::new(),
|
||||
output: [0u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
output_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg( any(not(feature="simdcompression"), target_env="msvc") )]
|
||||
mod vint {
|
||||
mod compression_vint_nosimd;
|
||||
pub use self::compression_vint_nosimd::*;
|
||||
pub fn compress_block_sorted(&mut self, block: &[u32], offset: u32) -> &[u8] {
|
||||
let num_bits = self.bitpacker.num_bits_sorted(offset, block);
|
||||
self.output[0] = num_bits;
|
||||
let written_size = 1 + self.bitpacker.compress_sorted(offset, block, &mut self.output[1..], num_bits);
|
||||
&self.output[..written_size]
|
||||
}
|
||||
|
||||
pub fn compress_block_unsorted(&mut self, block: &[u32]) -> &[u8] {
|
||||
let num_bits = self.bitpacker.num_bits(block);
|
||||
self.output[0] = num_bits;
|
||||
let written_size = 1 + self.bitpacker.compress(block, &mut self.output[1..], num_bits);
|
||||
&self.output[..written_size]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg( all(feature="simdcompression", not(target_env="msvc")) )]
|
||||
mod vint {
|
||||
mod compression_vint_simd;
|
||||
pub use self::compression_vint_simd::*;
|
||||
|
||||
pub struct BlockDecoder {
|
||||
bitpacker: BitPacker4x,
|
||||
pub output: [u32; COMPRESSION_BLOCK_SIZE + 1],
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
impl BlockDecoder {
|
||||
pub fn new() -> BlockDecoder {
|
||||
BlockDecoder::with_val(0u32)
|
||||
}
|
||||
|
||||
pub fn with_val(val: u32) -> BlockDecoder {
|
||||
let mut output = [val; COMPRESSION_BLOCK_SIZE + 1];
|
||||
output[COMPRESSION_BLOCK_SIZE] = 0u32;
|
||||
BlockDecoder {
|
||||
bitpacker: BitPacker4x::new(),
|
||||
output,
|
||||
output_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uncompress_block_sorted(&mut self, compressed_data: &[u8], offset: u32) -> usize {
|
||||
let num_bits = compressed_data[0];
|
||||
self.output_len = COMPRESSION_BLOCK_SIZE;
|
||||
1 + self.bitpacker.decompress_sorted(offset, &compressed_data[1..], &mut self.output, num_bits)
|
||||
}
|
||||
|
||||
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> usize {
|
||||
let num_bits = compressed_data[0];
|
||||
self.output_len = COMPRESSION_BLOCK_SIZE;
|
||||
1 + self.bitpacker.decompress(&compressed_data[1..], &mut self.output, num_bits)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn output_array(&self) -> &[u32] {
|
||||
&self.output[..self.output_len]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn output(&self, idx: usize) -> u32 {
|
||||
self.output[idx]
|
||||
}
|
||||
}
|
||||
|
||||
mod vint;
|
||||
|
||||
pub trait VIntEncoder {
|
||||
/// Compresses an array of `u32` integers,
|
||||
/// using [delta-encoding](https://en.wikipedia.org/wiki/Delta_encoding)
|
||||
/// and variable bytes encoding.
|
||||
///
|
||||
/// The method takes an array of ints to compress, and returns
|
||||
/// a `&[u8]` representing the compressed data.
|
||||
///
|
||||
/// The method also takes an offset to give the value of the
|
||||
/// hypothetical previous element in the delta-encoding.
|
||||
fn compress_vint_sorted(&mut self, input: &[u32], offset: u32) -> &[u8];
|
||||
|
||||
/// Compresses an array of `u32` integers,
|
||||
/// using variable bytes encoding.
|
||||
///
|
||||
/// The method takes an array of ints to compress, and returns
|
||||
/// a `&[u8]` representing the compressed data.
|
||||
fn compress_vint_unsorted(&mut self, input: &[u32]) -> &[u8];
|
||||
}
|
||||
|
||||
pub trait VIntDecoder {
|
||||
fn uncompress_vint_sorted<'a>(&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
offset: u32,
|
||||
num_els: usize)
|
||||
-> &'a [u8];
|
||||
fn uncompress_vint_unsorted<'a>(&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
num_els: usize)
|
||||
-> &'a [u8];
|
||||
/// Uncompress an array of `u32` integers,
|
||||
/// that were compressed using [delta-encoding](https://en.wikipedia.org/wiki/Delta_encoding)
|
||||
/// and variable bytes encoding.
|
||||
///
|
||||
/// The method takes a number of int to decompress, and returns
|
||||
/// the amount of bytes that were read to decompress them.
|
||||
///
|
||||
/// The method also takes an offset to give the value of the
|
||||
/// hypothetical previous element in the delta-encoding.
|
||||
///
|
||||
/// For instance, if delta encoded are `1, 3, 9`, and the
|
||||
/// `offset` is 5, then the output will be:
|
||||
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
|
||||
fn uncompress_vint_sorted<'a>(
|
||||
&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
offset: u32,
|
||||
num_els: usize,
|
||||
) -> usize;
|
||||
|
||||
/// Uncompress an array of `u32s`, compressed using variable
|
||||
/// byte encoding.
|
||||
///
|
||||
/// The method takes a number of int to decompress, and returns
|
||||
/// the amount of bytes that were read to decompress them.
|
||||
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize;
|
||||
}
|
||||
|
||||
impl VIntEncoder for BlockEncoder {
|
||||
@@ -60,27 +154,22 @@ impl VIntEncoder for BlockEncoder {
|
||||
}
|
||||
|
||||
impl VIntDecoder for BlockDecoder {
|
||||
fn uncompress_vint_sorted<'a>(&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
offset: u32,
|
||||
num_els: usize)
|
||||
-> &'a [u8] {
|
||||
fn uncompress_vint_sorted<'a>(
|
||||
&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
offset: u32,
|
||||
num_els: usize,
|
||||
) -> usize {
|
||||
self.output_len = num_els;
|
||||
vint::uncompress_sorted(compressed_data, &mut self.output[..num_els], offset)
|
||||
}
|
||||
|
||||
fn uncompress_vint_unsorted<'a>(&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
num_els: usize)
|
||||
-> &'a [u8] {
|
||||
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize {
|
||||
self.output_len = num_els;
|
||||
vint::uncompress_unsorted(compressed_data, &mut self.output[..num_els])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub const NUM_DOCS_PER_BLOCK: usize = 128; //< should be a power of 2 to let the compiler optimize.
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
@@ -95,8 +184,8 @@ pub mod tests {
|
||||
let compressed_data = encoder.compress_block_sorted(&vals, 0);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let remaining_data = decoder.uncompress_block_sorted(compressed_data, 0);
|
||||
assert_eq!(remaining_data.len(), 0);
|
||||
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 0);
|
||||
assert_eq!(consumed_num_bytes, compressed_data.len());
|
||||
}
|
||||
for i in 0..128 {
|
||||
assert_eq!(vals[i], decoder.output(i));
|
||||
@@ -110,8 +199,8 @@ pub mod tests {
|
||||
let compressed_data = encoder.compress_block_sorted(&vals, 10);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let remaining_data = decoder.uncompress_block_sorted(compressed_data, 10);
|
||||
assert_eq!(remaining_data.len(), 0);
|
||||
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 10);
|
||||
assert_eq!(consumed_num_bytes, compressed_data.len());
|
||||
}
|
||||
for i in 0..128 {
|
||||
assert_eq!(vals[i], decoder.output(i));
|
||||
@@ -129,9 +218,9 @@ pub mod tests {
|
||||
compressed.push(173u8);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let remaining_data = decoder.uncompress_block_sorted(&compressed, 10);
|
||||
assert_eq!(remaining_data.len(), 1);
|
||||
assert_eq!(remaining_data[0], 173u8);
|
||||
let consumed_num_bytes = decoder.uncompress_block_sorted(&compressed, 10);
|
||||
assert_eq!(consumed_num_bytes, compressed.len() - 1);
|
||||
assert_eq!(compressed[consumed_num_bytes], 173u8);
|
||||
}
|
||||
for i in 0..n {
|
||||
assert_eq!(vals[i], decoder.output(i));
|
||||
@@ -149,16 +238,15 @@ pub mod tests {
|
||||
compressed.push(173u8);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
{
|
||||
let remaining_data = decoder.uncompress_block_unsorted(&compressed);
|
||||
assert_eq!(remaining_data.len(), 1);
|
||||
assert_eq!(remaining_data[0], 173u8);
|
||||
let consumed_num_bytes = decoder.uncompress_block_unsorted(&compressed);
|
||||
assert_eq!(consumed_num_bytes + 1, compressed.len());
|
||||
assert_eq!(compressed[consumed_num_bytes], 173u8);
|
||||
}
|
||||
for i in 0..n {
|
||||
assert_eq!(vals[i], decoder.output(i));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_encode_vint() {
|
||||
{
|
||||
@@ -169,31 +257,47 @@ pub mod tests {
|
||||
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
||||
assert!(encoded_data.len() <= expected_length);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
let remaining_data =
|
||||
let consumed_num_bytes =
|
||||
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
|
||||
assert_eq!(0, remaining_data.len());
|
||||
assert_eq!(consumed_num_bytes, encoded_data.len());
|
||||
assert_eq!(input, decoder.output_array());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[bench]
|
||||
fn bench_compress(b: &mut Bencher) {
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let data = tests::generate_array(NUM_DOCS_PER_BLOCK, 0.1);
|
||||
b.iter(|| { encoder.compress_block_sorted(&data, 0u32); });
|
||||
let data = tests::generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
|
||||
b.iter(|| {
|
||||
encoder.compress_block_sorted(&data, 0u32);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_uncompress(b: &mut Bencher) {
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let data = tests::generate_array(NUM_DOCS_PER_BLOCK, 0.1);
|
||||
let data = tests::generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
|
||||
let compressed = encoder.compress_block_sorted(&data, 0u32);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
b.iter(|| { decoder.uncompress_block_sorted(compressed, 0u32); });
|
||||
b.iter(|| {
|
||||
decoder.uncompress_block_sorted(compressed, 0u32);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_docs_compression_numbits() {
|
||||
for num_bits in 0..33 {
|
||||
let mut data = [0u32; 128];
|
||||
if num_bits > 0 {
|
||||
data[0] = 1 << (num_bits - 1);
|
||||
}
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let compressed = encoder.compress_block_unsorted(&data);
|
||||
assert_eq!(compressed[0] as usize, num_bits);
|
||||
assert_eq!(compressed.len(), compressed_block_size(compressed[0]));
|
||||
}
|
||||
}
|
||||
|
||||
const NUM_INTS_BENCH_VINT: usize = 10;
|
||||
|
||||
@@ -201,7 +305,9 @@ pub mod tests {
|
||||
fn bench_compress_vint(b: &mut Bencher) {
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let data = tests::generate_array(NUM_INTS_BENCH_VINT, 0.001);
|
||||
b.iter(|| { encoder.compress_vint_sorted(&data, 0u32); });
|
||||
b.iter(|| {
|
||||
encoder.compress_vint_sorted(&data, 0u32);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
@@ -210,7 +316,9 @@ pub mod tests {
|
||||
let data = tests::generate_array(NUM_INTS_BENCH_VINT, 0.001);
|
||||
let compressed = encoder.compress_vint_sorted(&data, 0u32);
|
||||
let mut decoder = BlockDecoder::new();
|
||||
b.iter(|| { decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT); });
|
||||
b.iter(|| {
|
||||
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
use common::bitpacker::compute_num_bits;
|
||||
use common::bitpacker::{BitPacker, BitUnpacker};
|
||||
use std::cmp;
|
||||
use std::io::Write;
|
||||
use super::super::NUM_DOCS_PER_BLOCK;
|
||||
|
||||
const COMPRESSED_BLOCK_MAX_SIZE: usize = NUM_DOCS_PER_BLOCK * 4 + 1;
|
||||
|
||||
pub fn compress_sorted(vals: &mut [u32], mut output: &mut [u8], offset: u32) -> usize {
|
||||
let mut max_delta = 0;
|
||||
{
|
||||
let mut local_offset = offset;
|
||||
for i in 0..NUM_DOCS_PER_BLOCK {
|
||||
let val = vals[i];
|
||||
let delta = val - local_offset;
|
||||
max_delta = cmp::max(max_delta, delta);
|
||||
vals[i] = delta;
|
||||
local_offset = val;
|
||||
}
|
||||
}
|
||||
let num_bits = compute_num_bits(max_delta);
|
||||
output.write_all(&[num_bits]).unwrap();
|
||||
let mut bit_packer = BitPacker::new(num_bits as usize);
|
||||
for val in vals {
|
||||
bit_packer.write(*val, &mut output).unwrap();
|
||||
}
|
||||
1 +
|
||||
bit_packer
|
||||
.close(&mut output)
|
||||
.expect("packing in memory should never fail")
|
||||
}
|
||||
|
||||
|
||||
|
||||
pub struct BlockEncoder {
|
||||
pub output: [u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
pub output_len: usize,
|
||||
input_buffer: [u32; NUM_DOCS_PER_BLOCK],
|
||||
}
|
||||
|
||||
impl BlockEncoder {
|
||||
pub fn new() -> BlockEncoder {
|
||||
BlockEncoder {
|
||||
output: [0u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
output_len: 0,
|
||||
input_buffer: [0u32; NUM_DOCS_PER_BLOCK],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compress_block_sorted(&mut self, vals: &[u32], offset: u32) -> &[u8] {
|
||||
self.input_buffer.clone_from_slice(vals);
|
||||
let compressed_size = compress_sorted(&mut self.input_buffer, &mut self.output, offset);
|
||||
&self.output[..compressed_size]
|
||||
}
|
||||
|
||||
pub fn compress_block_unsorted(&mut self, vals: &[u32]) -> &[u8] {
|
||||
let compressed_size: usize = {
|
||||
let mut output: &mut [u8] = &mut self.output;
|
||||
let max = vals.iter()
|
||||
.cloned()
|
||||
.max()
|
||||
.expect("compress unsorted called with an empty array");
|
||||
let num_bits = compute_num_bits(max);
|
||||
output.write_all(&[num_bits]).unwrap();
|
||||
let mut bit_packer = BitPacker::new(num_bits as usize);
|
||||
for val in vals {
|
||||
bit_packer.write(*val, &mut output).unwrap();
|
||||
}
|
||||
1 +
|
||||
bit_packer
|
||||
.close(&mut output)
|
||||
.expect("packing in memory should never fail")
|
||||
};
|
||||
&self.output[..compressed_size]
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BlockDecoder {
|
||||
pub output: [u32; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
|
||||
impl BlockDecoder {
|
||||
pub fn new() -> BlockDecoder {
|
||||
BlockDecoder::with_val(0u32)
|
||||
}
|
||||
|
||||
pub fn with_val(val: u32) -> BlockDecoder {
|
||||
BlockDecoder {
|
||||
output: [val; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
output_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uncompress_block_sorted<'a>(&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
mut offset: u32)
|
||||
-> &'a [u8] {
|
||||
let consumed_size = {
|
||||
let num_bits = compressed_data[0];
|
||||
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits as usize);
|
||||
for i in 0..NUM_DOCS_PER_BLOCK {
|
||||
let delta = bit_unpacker.get(i);
|
||||
let val = offset + delta;
|
||||
self.output[i] = val;
|
||||
offset = val;
|
||||
}
|
||||
1 + (num_bits as usize * NUM_DOCS_PER_BLOCK + 7) / 8
|
||||
};
|
||||
self.output_len = NUM_DOCS_PER_BLOCK;
|
||||
&compressed_data[consumed_size..]
|
||||
}
|
||||
|
||||
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> &'a [u8] {
|
||||
let num_bits = compressed_data[0];
|
||||
let bit_unpacker = BitUnpacker::new(&compressed_data[1..], num_bits as usize);
|
||||
for i in 0..NUM_DOCS_PER_BLOCK {
|
||||
self.output[i] = bit_unpacker.get(i);
|
||||
}
|
||||
let consumed_size = 1 + (num_bits as usize * NUM_DOCS_PER_BLOCK + 7) / 8;
|
||||
self.output_len = NUM_DOCS_PER_BLOCK;
|
||||
&compressed_data[consumed_size..]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn output_array(&self) -> &[u32] {
|
||||
&self.output[..self.output_len]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn output(&self, idx: usize) -> u32 {
|
||||
self.output[idx]
|
||||
}
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
use super::super::NUM_DOCS_PER_BLOCK;
|
||||
|
||||
const COMPRESSED_BLOCK_MAX_SIZE: usize = NUM_DOCS_PER_BLOCK * 4 + 1;
|
||||
|
||||
mod simdcomp {
|
||||
use libc::size_t;
|
||||
|
||||
extern "C" {
|
||||
pub fn compress_sorted(data: *const u32, output: *mut u8, offset: u32) -> size_t;
|
||||
|
||||
pub fn uncompress_sorted(compressed_data: *const u8,
|
||||
output: *mut u32,
|
||||
offset: u32)
|
||||
-> size_t;
|
||||
|
||||
pub fn compress_unsorted(data: *const u32, output: *mut u8) -> size_t;
|
||||
|
||||
pub fn uncompress_unsorted(compressed_data: *const u8, output: *mut u32) -> size_t;
|
||||
}
|
||||
}
|
||||
|
||||
fn compress_sorted(vals: &[u32], output: &mut [u8], offset: u32) -> usize {
|
||||
unsafe { simdcomp::compress_sorted(vals.as_ptr(), output.as_mut_ptr(), offset) }
|
||||
}
|
||||
|
||||
fn uncompress_sorted(compressed_data: &[u8], output: &mut [u32], offset: u32) -> usize {
|
||||
unsafe { simdcomp::uncompress_sorted(compressed_data.as_ptr(), output.as_mut_ptr(), offset) }
|
||||
}
|
||||
|
||||
fn compress_unsorted(vals: &[u32], output: &mut [u8]) -> usize {
|
||||
unsafe { simdcomp::compress_unsorted(vals.as_ptr(), output.as_mut_ptr()) }
|
||||
}
|
||||
|
||||
fn uncompress_unsorted(compressed_data: &[u8], output: &mut [u32]) -> usize {
|
||||
unsafe { simdcomp::uncompress_unsorted(compressed_data.as_ptr(), output.as_mut_ptr()) }
|
||||
}
|
||||
|
||||
|
||||
pub struct BlockEncoder {
|
||||
pub output: [u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
impl BlockEncoder {
|
||||
pub fn new() -> BlockEncoder {
|
||||
BlockEncoder {
|
||||
output: [0u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
output_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compress_block_sorted(&mut self, vals: &[u32], offset: u32) -> &[u8] {
|
||||
let compressed_size = compress_sorted(vals, &mut self.output, offset);
|
||||
&self.output[..compressed_size]
|
||||
}
|
||||
|
||||
pub fn compress_block_unsorted(&mut self, vals: &[u32]) -> &[u8] {
|
||||
let compressed_size = compress_unsorted(vals, &mut self.output);
|
||||
&self.output[..compressed_size]
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BlockDecoder {
|
||||
pub output: [u32; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
|
||||
impl BlockDecoder {
|
||||
pub fn new() -> BlockDecoder {
|
||||
BlockDecoder::with_val(0u32)
|
||||
}
|
||||
|
||||
pub fn with_val(val: u32) -> BlockDecoder {
|
||||
BlockDecoder {
|
||||
output: [val; COMPRESSED_BLOCK_MAX_SIZE],
|
||||
output_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uncompress_block_sorted<'a>(&mut self,
|
||||
compressed_data: &'a [u8],
|
||||
offset: u32)
|
||||
-> &'a [u8] {
|
||||
let consumed_size = uncompress_sorted(compressed_data, &mut self.output, offset);
|
||||
self.output_len = NUM_DOCS_PER_BLOCK;
|
||||
&compressed_data[consumed_size..]
|
||||
}
|
||||
|
||||
pub fn uncompress_block_unsorted<'a>(&mut self, compressed_data: &'a [u8]) -> &'a [u8] {
|
||||
let consumed_size = uncompress_unsorted(compressed_data, &mut self.output);
|
||||
self.output_len = NUM_DOCS_PER_BLOCK;
|
||||
&compressed_data[consumed_size..]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn output_array(&self) -> &[u32] {
|
||||
&self.output[..self.output_len]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn output(&self, idx: usize) -> u32 {
|
||||
self.output[idx]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::BlockEncoder;
|
||||
|
||||
#[test]
|
||||
fn test_all_docs_compression_len() {
|
||||
let data: Vec<u32> = (0u32..128u32).collect();
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let compressed = encoder.compress_block_sorted(&data, 0u32);
|
||||
assert_eq!(compressed.len(), 17);
|
||||
}
|
||||
}
|
||||
158
src/compression/stream.rs
Normal file
158
src/compression/stream.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
use compression::BlockDecoder;
|
||||
use compression::COMPRESSION_BLOCK_SIZE;
|
||||
use compression::compressed_block_size;
|
||||
use directory::{ReadOnlySource, SourceRead};
|
||||
|
||||
/// Reads a stream of compressed ints.
|
||||
///
|
||||
/// Tantivy uses `CompressedIntStream` to read
|
||||
/// the position file.
|
||||
/// The `.skip(...)` makes it possible to avoid
|
||||
/// decompressing blocks that are not required.
|
||||
pub struct CompressedIntStream {
|
||||
buffer: SourceRead,
|
||||
|
||||
block_decoder: BlockDecoder,
|
||||
cached_addr: usize, // address of the currently decoded block
|
||||
cached_next_addr: usize, // address following the currently decoded block
|
||||
|
||||
addr: usize, // address of the block associated to the current position
|
||||
inner_offset: usize,
|
||||
}
|
||||
|
||||
impl CompressedIntStream {
|
||||
/// Opens a compressed int stream.
|
||||
pub(crate) fn wrap(source: ReadOnlySource) -> CompressedIntStream {
|
||||
CompressedIntStream {
|
||||
buffer: SourceRead::from(source),
|
||||
block_decoder: BlockDecoder::new(),
|
||||
cached_addr: usize::max_value(),
|
||||
cached_next_addr: usize::max_value(),
|
||||
|
||||
addr: 0,
|
||||
inner_offset: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads the block at the given address and return the address of the
|
||||
/// following block
|
||||
pub fn read_block(&mut self, addr: usize) -> usize {
|
||||
if self.cached_addr == addr {
|
||||
// we are already on this block.
|
||||
// no need to read.
|
||||
self.cached_next_addr
|
||||
} else {
|
||||
let next_addr = addr + self.block_decoder.uncompress_block_unsorted(self.buffer.slice_from(addr));
|
||||
self.cached_addr = addr;
|
||||
self.cached_next_addr = next_addr;
|
||||
next_addr
|
||||
}
|
||||
}
|
||||
|
||||
/// Fills a buffer with the next `output.len()` integers.
|
||||
/// This does not consume / advance the stream.
|
||||
pub fn read(&mut self, output: &mut [u32]) {
|
||||
let mut cursor = self.addr;
|
||||
let mut inner_offset = self.inner_offset;
|
||||
let mut num_els: usize = output.len();
|
||||
let mut start = 0;
|
||||
loop {
|
||||
cursor = self.read_block(cursor);
|
||||
let block = &self.block_decoder.output_array()[inner_offset..];
|
||||
let block_len = block.len();
|
||||
if num_els >= block_len {
|
||||
output[start..start + block_len].clone_from_slice(&block);
|
||||
start += block_len;
|
||||
num_els -= block_len;
|
||||
inner_offset = 0;
|
||||
} else {
|
||||
output[start..].clone_from_slice(&block[..num_els]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Skip the next `skip_len` integer.
|
||||
///
|
||||
/// If a full block is skipped, calling
|
||||
/// `.skip(...)` will avoid decompressing it.
|
||||
///
|
||||
/// May panic if the end of the stream is reached.
|
||||
pub fn skip(&mut self, mut skip_len: usize) {
|
||||
loop {
|
||||
let available = COMPRESSION_BLOCK_SIZE - self.inner_offset;
|
||||
if available >= skip_len {
|
||||
self.inner_offset += skip_len;
|
||||
break;
|
||||
} else {
|
||||
skip_len -= available;
|
||||
// entirely skip decompressing some blocks.
|
||||
let num_bits: u8 = self.buffer.get(self.addr);
|
||||
let block_len = compressed_block_size(num_bits);
|
||||
self.addr += block_len;
|
||||
self.inner_offset = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use super::CompressedIntStream;
|
||||
use compression::compressed_block_size;
|
||||
use compression::COMPRESSION_BLOCK_SIZE;
|
||||
use compression::BlockEncoder;
|
||||
use directory::ReadOnlySource;
|
||||
|
||||
fn create_stream_buffer() -> ReadOnlySource {
|
||||
let mut buffer: Vec<u8> = vec![];
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let vals: Vec<u32> = (0u32..1152u32).collect();
|
||||
for chunk in vals.chunks(COMPRESSION_BLOCK_SIZE) {
|
||||
let compressed_block = encoder.compress_block_unsorted(chunk);
|
||||
let num_bits = compressed_block[0];
|
||||
assert_eq!(compressed_block_size(num_bits), compressed_block.len());
|
||||
buffer.extend_from_slice(compressed_block);
|
||||
}
|
||||
if cfg!(simd) {
|
||||
buffer.extend_from_slice(&[0u8; 7]);
|
||||
}
|
||||
ReadOnlySource::from(buffer)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compressed_int_stream() {
|
||||
let buffer = create_stream_buffer();
|
||||
let mut stream = CompressedIntStream::wrap(buffer);
|
||||
let mut block: [u32; COMPRESSION_BLOCK_SIZE] = [0u32; COMPRESSION_BLOCK_SIZE];
|
||||
|
||||
stream.read(&mut block[0..2]);
|
||||
assert_eq!(block[0], 0);
|
||||
assert_eq!(block[1], 1);
|
||||
|
||||
// reading does not consume the stream
|
||||
stream.read(&mut block[0..2]);
|
||||
assert_eq!(block[0], 0);
|
||||
assert_eq!(block[1], 1);
|
||||
stream.skip(2);
|
||||
|
||||
stream.skip(5);
|
||||
stream.read(&mut block[0..3]);
|
||||
stream.skip(3);
|
||||
|
||||
assert_eq!(block[0], 7);
|
||||
assert_eq!(block[1], 8);
|
||||
assert_eq!(block[2], 9);
|
||||
stream.skip(500);
|
||||
stream.read(&mut block[0..3]);
|
||||
stream.skip(3);
|
||||
|
||||
assert_eq!(block[0], 510);
|
||||
assert_eq!(block[1], 511);
|
||||
assert_eq!(block[2], 512);
|
||||
stream.skip(511);
|
||||
stream.read(&mut block[..1]);
|
||||
assert_eq!(block[0], 1024);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,9 @@
|
||||
|
||||
#[inline(always)]
|
||||
pub fn compress_sorted<'a>(input: &[u32], output: &'a mut [u8], mut offset: u32) -> &'a [u8] {
|
||||
pub(crate) fn compress_sorted<'a>(
|
||||
input: &[u32],
|
||||
output: &'a mut [u8],
|
||||
mut offset: u32,
|
||||
) -> &'a [u8] {
|
||||
let mut byte_written = 0;
|
||||
for &v in input {
|
||||
let mut to_encode: u32 = v - offset;
|
||||
@@ -22,7 +25,7 @@ pub fn compress_sorted<'a>(input: &[u32], output: &'a mut [u8], mut offset: u32)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn compress_unsorted<'a>(input: &[u32], output: &'a mut [u8]) -> &'a [u8] {
|
||||
pub(crate) fn compress_unsorted<'a>(input: &[u32], output: &'a mut [u8]) -> &'a [u8] {
|
||||
let mut byte_written = 0;
|
||||
for &v in input {
|
||||
let mut to_encode: u32 = v;
|
||||
@@ -43,10 +46,11 @@ pub fn compress_unsorted<'a>(input: &[u32], output: &'a mut [u8]) -> &'a [u8] {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn uncompress_sorted<'a>(compressed_data: &'a [u8],
|
||||
output: &mut [u32],
|
||||
offset: u32)
|
||||
-> &'a [u8] {
|
||||
pub(crate) fn uncompress_sorted<'a>(
|
||||
compressed_data: &'a [u8],
|
||||
output: &mut [u32],
|
||||
offset: u32,
|
||||
) -> usize {
|
||||
let mut read_byte = 0;
|
||||
let mut result = offset;
|
||||
let num_els = output.len();
|
||||
@@ -63,11 +67,11 @@ pub fn uncompress_sorted<'a>(compressed_data: &'a [u8],
|
||||
}
|
||||
output[i] = result;
|
||||
}
|
||||
&compressed_data[read_byte..]
|
||||
read_byte
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn uncompress_unsorted<'a>(compressed_data: &'a [u8], output: &mut [u32]) -> &'a [u8] {
|
||||
pub(crate) fn uncompress_unsorted<'a>(compressed_data: &'a [u8], output: &mut [u32]) -> usize {
|
||||
let mut read_byte = 0;
|
||||
let num_els = output.len();
|
||||
for i in 0..num_els {
|
||||
@@ -84,5 +88,5 @@ pub fn uncompress_unsorted<'a>(compressed_data: &'a [u8], output: &mut [u32]) ->
|
||||
}
|
||||
output[i] = result;
|
||||
}
|
||||
&compressed_data[read_byte..]
|
||||
read_byte
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
|
||||
mod streamvbyte {
|
||||
|
||||
use libc::size_t;
|
||||
|
||||
extern "C" {
|
||||
pub fn streamvbyte_delta_encode(data: *const u32,
|
||||
num_els: u32,
|
||||
output: *mut u8,
|
||||
offset: u32)
|
||||
-> size_t;
|
||||
|
||||
pub fn streamvbyte_delta_decode(compressed_data: *const u8,
|
||||
output: *mut u32,
|
||||
num_els: u32,
|
||||
offset: u32)
|
||||
-> size_t;
|
||||
|
||||
pub fn streamvbyte_encode(data: *const u32, num_els: u32, output: *mut u8) -> size_t;
|
||||
|
||||
pub fn streamvbyte_decode(compressed_data: *const u8,
|
||||
output: *mut u32,
|
||||
num_els: usize)
|
||||
-> size_t;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[inline(always)]
|
||||
pub fn compress_sorted<'a>(input: &[u32], output: &'a mut [u8], offset: u32) -> &'a [u8] {
|
||||
let compress_length = unsafe {
|
||||
streamvbyte::streamvbyte_delta_encode(input.as_ptr(),
|
||||
input.len() as u32,
|
||||
output.as_mut_ptr(),
|
||||
offset)
|
||||
};
|
||||
&output[..compress_length]
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn compress_unsorted<'a>(input: &[u32], output: &'a mut [u8]) -> &'a [u8] {
|
||||
let compress_length = unsafe {
|
||||
streamvbyte::streamvbyte_encode(input.as_ptr(), input.len() as u32, output.as_mut_ptr())
|
||||
};
|
||||
&output[..compress_length]
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn uncompress_sorted<'a>(compressed_data: &'a [u8],
|
||||
output: &mut [u32],
|
||||
offset: u32)
|
||||
-> &'a [u8] {
|
||||
let consumed_bytes = unsafe {
|
||||
streamvbyte::streamvbyte_delta_decode(compressed_data.as_ptr(),
|
||||
output.as_mut_ptr(),
|
||||
output.len() as u32,
|
||||
offset)
|
||||
};
|
||||
&compressed_data[consumed_bytes..]
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn uncompress_unsorted<'a>(compressed_data: &'a [u8], output: &mut [u32]) -> &'a [u8] {
|
||||
let consumed_bytes = unsafe {
|
||||
streamvbyte::streamvbyte_decode(compressed_data.as_ptr(), output.as_mut_ptr(), output.len())
|
||||
};
|
||||
&compressed_data[consumed_bytes..]
|
||||
}
|
||||
@@ -6,7 +6,11 @@ use std::sync::Arc;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::fmt;
|
||||
use core::SegmentId;
|
||||
use directory::{Directory, MmapDirectory, RAMDirectory};
|
||||
|
||||
|
||||
#[cfg(feature="mmap")]
|
||||
use directory::MmapDirectory;
|
||||
use directory::{Directory, RAMDirectory};
|
||||
use indexer::index_writer::open_index_writer;
|
||||
use core::searcher::Searcher;
|
||||
use std::convert::From;
|
||||
@@ -18,11 +22,13 @@ use core::SegmentMeta;
|
||||
use super::pool::LeasedItem;
|
||||
use std::path::Path;
|
||||
use core::IndexMeta;
|
||||
use indexer::DirectoryLock;
|
||||
use IndexWriter;
|
||||
use directory::ManagedDirectory;
|
||||
use core::META_FILEPATH;
|
||||
use super::segment::create_segment;
|
||||
use indexer::segment_updater::save_new_metas;
|
||||
use tokenizer::TokenizerManager;
|
||||
|
||||
const NUM_SEARCHERS: usize = 12;
|
||||
|
||||
@@ -32,14 +38,14 @@ fn load_metas(directory: &Directory) -> Result<IndexMeta> {
|
||||
serde_json::from_str(&meta_string).chain_err(|| ErrorKind::CorruptedFile(META_FILEPATH.clone()))
|
||||
}
|
||||
|
||||
/// Tantivy's Search Index
|
||||
/// Search Index
|
||||
pub struct Index {
|
||||
directory: ManagedDirectory,
|
||||
schema: Schema,
|
||||
searcher_pool: Arc<Pool<Searcher>>,
|
||||
tokenizers: TokenizerManager,
|
||||
}
|
||||
|
||||
|
||||
impl Index {
|
||||
/// Creates a new index using the `RAMDirectory`.
|
||||
///
|
||||
@@ -48,9 +54,10 @@ impl Index {
|
||||
pub fn create_in_ram(schema: Schema) -> Index {
|
||||
let ram_directory = RAMDirectory::create();
|
||||
// unwrap is ok here
|
||||
let directory = ManagedDirectory::new(ram_directory)
|
||||
.expect("Creating a managed directory from a brand new RAM directory \
|
||||
should never fail.");
|
||||
let directory = ManagedDirectory::new(ram_directory).expect(
|
||||
"Creating a managed directory from a brand new RAM directory \
|
||||
should never fail.",
|
||||
);
|
||||
Index::from_directory(directory, schema).expect("Creating a RAMDirectory should never fail")
|
||||
}
|
||||
|
||||
@@ -58,12 +65,18 @@ impl Index {
|
||||
/// The index will use the `MMapDirectory`.
|
||||
///
|
||||
/// If a previous index was in this directory, then its meta file will be destroyed.
|
||||
pub fn create(directory_path: &Path, schema: Schema) -> Result<Index> {
|
||||
#[cfg(feature="mmap")]
|
||||
pub fn create<P: AsRef<Path>>(directory_path: P, schema: Schema) -> Result<Index> {
|
||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||
let directory = ManagedDirectory::new(mmap_directory)?;
|
||||
Index::from_directory(directory, schema)
|
||||
}
|
||||
|
||||
/// Accessor for the tokenizer manager.
|
||||
pub fn tokenizers(&self) -> &TokenizerManager {
|
||||
&self.tokenizers
|
||||
}
|
||||
|
||||
/// Creates a new index in a temp directory.
|
||||
///
|
||||
/// The index will use the `MMapDirectory` in a newly created directory.
|
||||
@@ -72,6 +85,8 @@ impl Index {
|
||||
///
|
||||
/// The temp directory is only used for testing the `MmapDirectory`.
|
||||
/// For other unit tests, prefer the `RAMDirectory`, see: `create_in_ram`.
|
||||
#[cfg(feature="mmap")]
|
||||
#[cfg(test)]
|
||||
pub fn create_from_tempdir(schema: Schema) -> Result<Index> {
|
||||
let mmap_directory = MmapDirectory::create_from_tempdir()?;
|
||||
let directory = ManagedDirectory::new(mmap_directory)?;
|
||||
@@ -79,37 +94,44 @@ impl Index {
|
||||
}
|
||||
|
||||
/// Creates a new index given a directory and an `IndexMeta`.
|
||||
fn create_from_metas(directory: ManagedDirectory, metas: IndexMeta) -> Result<Index> {
|
||||
fn create_from_metas(directory: ManagedDirectory, metas: &IndexMeta) -> Result<Index> {
|
||||
let schema = metas.schema.clone();
|
||||
let index = Index {
|
||||
directory: directory,
|
||||
schema: schema,
|
||||
directory,
|
||||
schema,
|
||||
searcher_pool: Arc::new(Pool::new()),
|
||||
tokenizers: TokenizerManager::default(),
|
||||
};
|
||||
try!(index.load_searchers());
|
||||
index.load_searchers()?;
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Create a new index from a directory.
|
||||
pub fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> Result<Index> {
|
||||
save_new_metas(schema.clone(), 0, directory.borrow_mut())?;
|
||||
Index::create_from_metas(directory, IndexMeta::with_schema(schema))
|
||||
let metas = IndexMeta::with_schema(schema);
|
||||
Index::create_from_metas(directory, &metas)
|
||||
}
|
||||
|
||||
/// Opens a new directory from an index path.
|
||||
pub fn open(directory_path: &Path) -> Result<Index> {
|
||||
#[cfg(feature="mmap")]
|
||||
pub fn open<P: AsRef<Path>>(directory_path: P) -> Result<Index> {
|
||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||
let directory = ManagedDirectory::new(mmap_directory)?;
|
||||
let metas = try!(load_metas(&directory));
|
||||
Index::create_from_metas(directory, metas)
|
||||
let metas = load_metas(&directory)?;
|
||||
Index::create_from_metas(directory, &metas)
|
||||
}
|
||||
|
||||
/// Returns the index opstamp.
|
||||
///
|
||||
/// The opstamp is the number of documents that have been added
|
||||
/// from the beginning of time, and until the moment of the last commit.
|
||||
pub fn opstamp(&self) -> u64 {
|
||||
load_metas(self.directory()).unwrap().opstamp
|
||||
pub fn open_directory<TDirectory: Directory>(directory: TDirectory) -> Result<Index> {
|
||||
let directory = ManagedDirectory::new(directory)?;
|
||||
let metas = load_metas(&directory)?;
|
||||
Index::create_from_metas(directory, &metas)
|
||||
}
|
||||
|
||||
|
||||
/// Reads the index meta file from the directory.
|
||||
pub fn load_metas(&self) -> Result<IndexMeta> {
|
||||
load_metas(self.directory())
|
||||
}
|
||||
|
||||
/// Open a new index writer. Attempts to acquire a lockfile.
|
||||
@@ -127,14 +149,15 @@ impl Index {
|
||||
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
||||
/// # Panics
|
||||
/// If the heap size per thread is too small, panics.
|
||||
pub fn writer_with_num_threads(&self,
|
||||
num_threads: usize,
|
||||
heap_size_in_bytes: usize)
|
||||
-> Result<IndexWriter> {
|
||||
open_index_writer(self, num_threads, heap_size_in_bytes)
|
||||
pub fn writer_with_num_threads(
|
||||
&self,
|
||||
num_threads: usize,
|
||||
heap_size_in_bytes: usize,
|
||||
) -> Result<IndexWriter> {
|
||||
let directory_lock = DirectoryLock::lock(self.directory().box_clone())?;
|
||||
open_index_writer(self, num_threads, heap_size_in_bytes, directory_lock)
|
||||
}
|
||||
|
||||
|
||||
/// Creates a multithreaded writer
|
||||
/// It just calls `writer_with_num_threads` with the number of cores as `num_threads`
|
||||
///
|
||||
@@ -156,9 +179,9 @@ impl Index {
|
||||
/// Returns the list of segments that are searchable
|
||||
pub fn searchable_segments(&self) -> Result<Vec<Segment>> {
|
||||
Ok(self.searchable_segment_metas()?
|
||||
.into_iter()
|
||||
.map(|segment_meta| self.segment(segment_meta))
|
||||
.collect())
|
||||
.into_iter()
|
||||
.map(|segment_meta| self.segment(segment_meta))
|
||||
.collect())
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
@@ -185,28 +208,29 @@ impl Index {
|
||||
/// Reads the meta.json and returns the list of
|
||||
/// `SegmentMeta` from the last commit.
|
||||
pub fn searchable_segment_metas(&self) -> Result<Vec<SegmentMeta>> {
|
||||
Ok(load_metas(self.directory())?.segments)
|
||||
Ok(self.load_metas()?.segments)
|
||||
}
|
||||
|
||||
/// Returns the list of segment ids that are searchable.
|
||||
pub fn searchable_segment_ids(&self) -> Result<Vec<SegmentId>> {
|
||||
Ok(self.searchable_segment_metas()?
|
||||
.iter()
|
||||
.map(|segment_meta| segment_meta.id())
|
||||
.collect())
|
||||
.iter()
|
||||
.map(|segment_meta| segment_meta.id())
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// Creates a new generation of searchers after
|
||||
|
||||
/// a change of the set of searchable indexes.
|
||||
///
|
||||
/// This needs to be called when a new segment has been
|
||||
/// published or after a merge.
|
||||
pub fn load_searchers(&self) -> Result<()> {
|
||||
let searchable_segments = self.searchable_segments()?;
|
||||
let segment_readers: Vec<SegmentReader> = try!(searchable_segments
|
||||
.into_iter()
|
||||
.map(SegmentReader::open)
|
||||
.collect());
|
||||
let segment_readers: Vec<SegmentReader> = searchable_segments
|
||||
.iter()
|
||||
.map(SegmentReader::open)
|
||||
.collect::<Result<_>>()?;
|
||||
let searchers = (0..NUM_SEARCHERS)
|
||||
.map(|_| Searcher::from(segment_readers.clone()))
|
||||
.collect();
|
||||
@@ -229,7 +253,6 @@ impl Index {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl fmt::Debug for Index {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Index({:?})", self.directory)
|
||||
@@ -241,7 +264,8 @@ impl Clone for Index {
|
||||
Index {
|
||||
directory: self.directory.clone(),
|
||||
schema: self.schema.clone(),
|
||||
searcher_pool: self.searcher_pool.clone(),
|
||||
searcher_pool: Arc::clone(&self.searcher_pool),
|
||||
tokenizers: self.tokenizers.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,27 +1,68 @@
|
||||
use schema::Schema;
|
||||
use core::SegmentMeta;
|
||||
use std::fmt;
|
||||
use serde_json;
|
||||
|
||||
/// Meta information about the `Index`.
|
||||
///
|
||||
/// This object is serialized on disk in the `meta.json` file.
|
||||
/// It keeps information about
|
||||
/// * the searchable segments,
|
||||
/// * the index docstamp
|
||||
/// * the index `docstamp`
|
||||
/// * the schema
|
||||
///
|
||||
#[derive(Clone,Debug,Serialize, Deserialize)]
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct IndexMeta {
|
||||
pub segments: Vec<SegmentMeta>,
|
||||
pub schema: Schema,
|
||||
pub opstamp: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payload: Option<String>,
|
||||
}
|
||||
|
||||
impl IndexMeta {
|
||||
pub fn with_schema(schema: Schema) -> IndexMeta {
|
||||
IndexMeta {
|
||||
segments: vec![],
|
||||
schema: schema,
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for IndexMeta {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
serde_json::ser::to_string(self)
|
||||
.expect("JSON serialization for IndexMeta should never fail.")
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use serde_json;
|
||||
use super::IndexMeta;
|
||||
use schema::{SchemaBuilder, TEXT};
|
||||
|
||||
#[test]
|
||||
fn test_serialize_metas() {
|
||||
let schema = {
|
||||
let mut schema_builder = SchemaBuilder::new();
|
||||
schema_builder.add_text_field("text", TEXT);
|
||||
schema_builder.build()
|
||||
};
|
||||
let index_metas = IndexMeta {
|
||||
segments: Vec::new(),
|
||||
schema: schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
};
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(json, r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#);
|
||||
}
|
||||
}
|
||||
|
||||
184
src/core/inverted_index_reader.rs
Normal file
184
src/core/inverted_index_reader.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use directory::{ReadOnlySource, SourceRead};
|
||||
use termdict::{TermDictionary, TermDictionaryImpl};
|
||||
use postings::{BlockSegmentPostings, SegmentPostings};
|
||||
use postings::TermInfo;
|
||||
use schema::IndexRecordOption;
|
||||
use schema::Term;
|
||||
use compression::CompressedIntStream;
|
||||
use postings::FreqReadingOption;
|
||||
use common::BinarySerializable;
|
||||
use schema::FieldType;
|
||||
|
||||
/// The inverted index reader is in charge of accessing
|
||||
/// the inverted index associated to a specific field.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// It is safe to delete the segment associated to
|
||||
/// an `InvertedIndexReader`. As long as it is open,
|
||||
/// the `ReadOnlySource` it is relying on should
|
||||
/// stay available.
|
||||
///
|
||||
///
|
||||
/// `InvertedIndexReader` are created by calling
|
||||
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
||||
pub struct InvertedIndexReader {
|
||||
termdict: TermDictionaryImpl,
|
||||
postings_source: ReadOnlySource,
|
||||
positions_source: ReadOnlySource,
|
||||
record_option: IndexRecordOption,
|
||||
total_num_tokens: u64
|
||||
}
|
||||
|
||||
impl InvertedIndexReader {
|
||||
pub(crate) fn new(
|
||||
termdict: TermDictionaryImpl,
|
||||
postings_source: ReadOnlySource,
|
||||
positions_source: ReadOnlySource,
|
||||
record_option: IndexRecordOption,
|
||||
) -> InvertedIndexReader {
|
||||
let total_num_tokens_data = postings_source.slice(0, 8);
|
||||
let mut total_num_tokens_cursor = total_num_tokens_data.as_slice();
|
||||
let total_num_tokens = u64::deserialize(&mut total_num_tokens_cursor).unwrap_or(0u64);
|
||||
InvertedIndexReader {
|
||||
termdict,
|
||||
postings_source: postings_source.slice_from(8),
|
||||
positions_source,
|
||||
record_option,
|
||||
total_num_tokens
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an empty `InvertedIndexReader` object, which
|
||||
/// contains no terms at all.
|
||||
pub fn empty(field_type: FieldType) -> InvertedIndexReader {
|
||||
let record_option = field_type
|
||||
.get_index_record_option()
|
||||
.unwrap_or(IndexRecordOption::Basic);
|
||||
InvertedIndexReader {
|
||||
termdict: TermDictionaryImpl::empty(field_type),
|
||||
postings_source: ReadOnlySource::empty(),
|
||||
positions_source: ReadOnlySource::empty(),
|
||||
record_option,
|
||||
total_num_tokens: 0u64
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the term info associated with the term.
|
||||
pub fn get_term_info(&self, term: &Term) -> Option<TermInfo> {
|
||||
self.termdict.get(term.value_bytes())
|
||||
}
|
||||
|
||||
/// Return the term dictionary datastructure.
|
||||
pub fn terms(&self) -> &TermDictionaryImpl {
|
||||
&self.termdict
|
||||
}
|
||||
|
||||
/// Resets the block segment to another position of the postings
|
||||
/// file.
|
||||
///
|
||||
/// This is useful for enumerating through a list of terms,
|
||||
/// and consuming the associated posting lists while avoiding
|
||||
/// reallocating a `BlockSegmentPostings`.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This does not reset the positions list.
|
||||
pub fn reset_block_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
block_postings: &mut BlockSegmentPostings,
|
||||
) {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let end_source = self.postings_source.len();
|
||||
let postings_slice = self.postings_source.slice(offset, end_source);
|
||||
let postings_reader = SourceRead::from(postings_slice);
|
||||
block_postings.reset(term_info.doc_freq as usize, postings_reader);
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
pub fn read_block_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
requested_option: IndexRecordOption,
|
||||
) -> BlockSegmentPostings {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let postings_data = self.postings_source.slice_from(offset);
|
||||
let freq_reading_option = match (self.record_option, requested_option) {
|
||||
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
|
||||
(_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq,
|
||||
(_, _) => FreqReadingOption::ReadFreq,
|
||||
};
|
||||
BlockSegmentPostings::from_data(
|
||||
term_info.doc_freq as usize,
|
||||
SourceRead::from(postings_data),
|
||||
freq_reading_option,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns a posting object given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
pub fn read_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
option: IndexRecordOption,
|
||||
) -> SegmentPostings {
|
||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option);
|
||||
let position_stream = {
|
||||
if option.has_positions() {
|
||||
let position_offset = term_info.positions_offset;
|
||||
let positions_source = self.positions_source.slice_from(position_offset as usize);
|
||||
let mut stream = CompressedIntStream::wrap(positions_source);
|
||||
stream.skip(term_info.positions_inner_offset as usize);
|
||||
Some(stream)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
SegmentPostings::from_block_postings(block_postings, position_stream)
|
||||
}
|
||||
|
||||
/// Returns the total number of tokens recorded for all documents
|
||||
/// (including deleted documents).
|
||||
pub fn total_num_tokens(&self) -> u64 {
|
||||
self.total_num_tokens
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// Returns the segment postings associated with the term, and with the given option,
|
||||
/// or `None` if the term has never been encountered and indexed.
|
||||
///
|
||||
/// If the field was not indexed with the indexing options that cover
|
||||
/// the requested options, the returned `SegmentPostings` the method does not fail
|
||||
/// and returns a `SegmentPostings` with as much information as possible.
|
||||
///
|
||||
/// For instance, requesting `IndexRecordOption::Freq` for a
|
||||
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
||||
/// with `DocId`s and frequencies.
|
||||
pub fn read_postings(&self, term: &Term, option: IndexRecordOption) -> Option<SegmentPostings> {
|
||||
let term_info = get!(self.get_term_info(term));
|
||||
Some(self.read_postings_from_terminfo(&term_info, option))
|
||||
}
|
||||
|
||||
pub(crate) fn read_postings_no_deletes(&self, term: &Term, option: IndexRecordOption) -> Option<SegmentPostings> {
|
||||
let term_info = get!(self.get_term_info(term));
|
||||
Some(self.read_postings_from_terminfo(&term_info, option))
|
||||
}
|
||||
|
||||
|
||||
/// Returns the number of documents containing the term.
|
||||
pub fn doc_freq(&self, term: &Term) -> u32 {
|
||||
self.get_term_info(term)
|
||||
.map(|term_info| term_info.doc_freq)
|
||||
.unwrap_or(0u32)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -7,7 +7,9 @@ mod segment;
|
||||
mod index_meta;
|
||||
mod pool;
|
||||
mod segment_meta;
|
||||
mod inverted_index_reader;
|
||||
|
||||
pub use self::inverted_index_reader::InvertedIndexReader;
|
||||
pub use self::searcher::Searcher;
|
||||
pub use self::segment_component::SegmentComponent;
|
||||
pub use self::segment_id::SegmentId;
|
||||
@@ -18,7 +20,6 @@ pub use self::index::Index;
|
||||
pub use self::segment_meta::SegmentMeta;
|
||||
pub use self::index_meta::IndexMeta;
|
||||
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
lazy_static! {
|
||||
|
||||
@@ -10,44 +10,17 @@ pub struct GenerationItem<T> {
|
||||
item: T,
|
||||
}
|
||||
|
||||
|
||||
// See https://github.com/crossbeam-rs/crossbeam/issues/91
|
||||
struct NonLeakingMsQueue<T> {
|
||||
underlying_queue: MsQueue<T>,
|
||||
}
|
||||
|
||||
impl<T> Default for NonLeakingMsQueue<T> {
|
||||
fn default() -> NonLeakingMsQueue<T> {
|
||||
NonLeakingMsQueue { underlying_queue: MsQueue::new() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> NonLeakingMsQueue<T> {
|
||||
fn pop(&self) -> T {
|
||||
self.underlying_queue.pop()
|
||||
}
|
||||
|
||||
fn push(&self, el: T) {
|
||||
self.underlying_queue.push(el);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for NonLeakingMsQueue<T> {
|
||||
fn drop(&mut self) {
|
||||
while let Some(_popped_item_to_be_dropped) = self.underlying_queue.try_pop() {}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Pool<T> {
|
||||
queue: Arc<NonLeakingMsQueue<GenerationItem<T>>>,
|
||||
queue: Arc<MsQueue<GenerationItem<T>>>,
|
||||
freshest_generation: AtomicUsize,
|
||||
next_generation: AtomicUsize,
|
||||
}
|
||||
|
||||
impl<T> Pool<T> {
|
||||
pub fn new() -> Pool<T> {
|
||||
let queue = Arc::new(MsQueue::new());
|
||||
Pool {
|
||||
queue: Arc::default(),
|
||||
queue,
|
||||
freshest_generation: AtomicUsize::default(),
|
||||
next_generation: AtomicUsize::default(),
|
||||
}
|
||||
@@ -57,7 +30,7 @@ impl<T> Pool<T> {
|
||||
let next_generation = self.next_generation.fetch_add(1, Ordering::SeqCst) + 1;
|
||||
for item in items {
|
||||
let gen_item = GenerationItem {
|
||||
item: item,
|
||||
item,
|
||||
generation: next_generation,
|
||||
};
|
||||
self.queue.push(gen_item);
|
||||
@@ -68,7 +41,7 @@ impl<T> Pool<T> {
|
||||
/// At the exit of this method,
|
||||
/// - freshest_generation has a value greater or equal than generation
|
||||
/// - freshest_generation has a value that has been advertised
|
||||
/// - freshest_generation has
|
||||
/// - freshest_generation has)
|
||||
fn advertise_generation(&self, generation: usize) {
|
||||
// not optimal at all but the easiest to read proof.
|
||||
loop {
|
||||
@@ -76,8 +49,11 @@ impl<T> Pool<T> {
|
||||
if former_generation >= generation {
|
||||
break;
|
||||
}
|
||||
self.freshest_generation
|
||||
.compare_and_swap(former_generation, generation, Ordering::SeqCst);
|
||||
self.freshest_generation.compare_and_swap(
|
||||
former_generation,
|
||||
generation,
|
||||
Ordering::SeqCst,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,21 +67,20 @@ impl<T> Pool<T> {
|
||||
let gen_item = self.queue.pop();
|
||||
if gen_item.generation >= generation {
|
||||
return LeasedItem {
|
||||
gen_item: Some(gen_item),
|
||||
recycle_queue: self.queue.clone(),
|
||||
};
|
||||
gen_item: Some(gen_item),
|
||||
recycle_queue: Arc::clone(&self.queue),
|
||||
};
|
||||
} else {
|
||||
// this searcher is obsolete,
|
||||
// removing it from the pool.
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LeasedItem<T> {
|
||||
gen_item: Option<GenerationItem<T>>,
|
||||
recycle_queue: Arc<NonLeakingMsQueue<GenerationItem<T>>>,
|
||||
recycle_queue: Arc<MsQueue<GenerationItem<T>>>,
|
||||
}
|
||||
|
||||
impl<T> Deref for LeasedItem<T> {
|
||||
@@ -113,18 +88,18 @@ impl<T> Deref for LeasedItem<T> {
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.gen_item
|
||||
.as_ref()
|
||||
.expect("Unwrapping a leased item should never fail")
|
||||
.item // unwrap is safe here
|
||||
.as_ref()
|
||||
.expect("Unwrapping a leased item should never fail")
|
||||
.item // unwrap is safe here
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for LeasedItem<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
&mut self.gen_item
|
||||
.as_mut()
|
||||
.expect("Unwrapping a mut leased item should never fail")
|
||||
.item // unwrap is safe here
|
||||
.as_mut()
|
||||
.expect("Unwrapping a mut leased item should never fail")
|
||||
.item // unwrap is safe here
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,8 +111,6 @@ impl<T> Drop for LeasedItem<T> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
|
||||
@@ -2,15 +2,13 @@ use Result;
|
||||
use core::SegmentReader;
|
||||
use schema::Document;
|
||||
use collector::Collector;
|
||||
use common::TimerTree;
|
||||
use query::Query;
|
||||
use DocId;
|
||||
use DocAddress;
|
||||
use schema::Term;
|
||||
use termdict::TermMerger;
|
||||
use schema::{Field, Term};
|
||||
use termdict::{TermDictionary, TermMerger};
|
||||
use std::sync::Arc;
|
||||
use std::fmt;
|
||||
use postings::TermInfo;
|
||||
|
||||
use core::InvertedIndexReader;
|
||||
|
||||
/// Holds a list of `SegmentReader`s ready for search.
|
||||
///
|
||||
@@ -21,7 +19,6 @@ pub struct Searcher {
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
}
|
||||
|
||||
|
||||
impl Searcher {
|
||||
/// Fetches a document from tantivy's store given a `DocAddress`.
|
||||
///
|
||||
@@ -34,20 +31,20 @@ impl Searcher {
|
||||
}
|
||||
|
||||
/// Returns the overall number of documents in the index.
|
||||
pub fn num_docs(&self) -> DocId {
|
||||
pub fn num_docs(&self) -> u64 {
|
||||
self.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.num_docs())
|
||||
.fold(0u32, |acc, val| acc + val)
|
||||
.map(|segment_reader| segment_reader.num_docs() as u64)
|
||||
.sum::<u64>()
|
||||
}
|
||||
|
||||
/// Return the overall number of documents containing
|
||||
/// the given term.
|
||||
pub fn doc_freq(&self, term: &Term) -> u32 {
|
||||
pub fn doc_freq(&self, term: &Term) -> u64 {
|
||||
self.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.doc_freq(term))
|
||||
.fold(0u32, |acc, val| acc + val)
|
||||
.map(|segment_reader| segment_reader.inverted_index(term.field()).doc_freq(term) as u64)
|
||||
.sum::<u64>()
|
||||
}
|
||||
|
||||
/// Return the list of segment readers
|
||||
@@ -61,27 +58,43 @@ impl Searcher {
|
||||
}
|
||||
|
||||
/// Runs a query on the segment readers wrapped by the searcher
|
||||
pub fn search<C: Collector>(&self, query: &Query, collector: &mut C) -> Result<TimerTree> {
|
||||
pub fn search<C: Collector>(&self, query: &Query, collector: &mut C) -> Result<()> {
|
||||
query.search(self, collector)
|
||||
}
|
||||
|
||||
/// Returns a Stream over all of the sorted unique terms of
|
||||
/// the searcher.
|
||||
///
|
||||
/// This includes all of the fields from all of the segment_readers.
|
||||
/// See [`TermIterator`](struct.TermIterator.html).
|
||||
///
|
||||
/// # Warning
|
||||
/// This API is very likely to change in the future.
|
||||
pub fn terms(&self) -> TermMerger<TermInfo> {
|
||||
TermMerger::from(self.segment_readers())
|
||||
/// Return the field searcher associated to a `Field`.
|
||||
pub fn field(&self, field: Field) -> FieldSearcher {
|
||||
let inv_index_readers = self.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.inverted_index(field))
|
||||
.collect::<Vec<_>>();
|
||||
FieldSearcher::new(inv_index_readers)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FieldSearcher {
|
||||
inv_index_readers: Vec<Arc<InvertedIndexReader>>,
|
||||
}
|
||||
|
||||
impl FieldSearcher {
|
||||
fn new(inv_index_readers: Vec<Arc<InvertedIndexReader>>) -> FieldSearcher {
|
||||
FieldSearcher { inv_index_readers }
|
||||
}
|
||||
|
||||
/// Returns a Stream over all of the sorted unique terms of
|
||||
/// for the given field.
|
||||
pub fn terms(&self) -> TermMerger {
|
||||
let term_streamers: Vec<_> = self.inv_index_readers
|
||||
.iter()
|
||||
.map(|inverted_index| inverted_index.terms().stream())
|
||||
.collect();
|
||||
TermMerger::new(term_streamers)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<SegmentReader>> for Searcher {
|
||||
fn from(segment_readers: Vec<SegmentReader>) -> Searcher {
|
||||
Searcher { segment_readers: segment_readers }
|
||||
Searcher { segment_readers }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::path::PathBuf;
|
||||
use schema::Schema;
|
||||
use std::fmt;
|
||||
use core::SegmentId;
|
||||
use directory::{ReadOnlySource, WritePtr, FileProtection};
|
||||
use directory::{FileProtection, ReadOnlySource, WritePtr};
|
||||
use indexer::segment_serializer::SegmentSerializer;
|
||||
use super::SegmentComponent;
|
||||
use core::Index;
|
||||
@@ -29,13 +29,15 @@ impl fmt::Debug for Segment {
|
||||
///
|
||||
/// The function is here to make it private outside `tantivy`.
|
||||
pub fn create_segment(index: Index, meta: SegmentMeta) -> Segment {
|
||||
Segment {
|
||||
index: index,
|
||||
meta: meta,
|
||||
}
|
||||
Segment { index, meta }
|
||||
}
|
||||
|
||||
impl Segment {
|
||||
/// Returns the index the segment belongs to.
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.index
|
||||
}
|
||||
|
||||
/// Returns our index's schema.
|
||||
pub fn schema(&self) -> Schema {
|
||||
self.index.schema()
|
||||
@@ -64,7 +66,6 @@ impl Segment {
|
||||
self.meta.relative_path(component)
|
||||
}
|
||||
|
||||
|
||||
/// Protects a specific component file from being deleted.
|
||||
///
|
||||
/// Returns a FileProtection object. The file is guaranteed
|
||||
@@ -76,20 +77,22 @@ impl Segment {
|
||||
}
|
||||
|
||||
/// Open one of the component file for a *regular* read.
|
||||
pub fn open_read(&self,
|
||||
component: SegmentComponent)
|
||||
-> result::Result<ReadOnlySource, OpenReadError> {
|
||||
pub fn open_read(
|
||||
&self,
|
||||
component: SegmentComponent,
|
||||
) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
let path = self.relative_path(component);
|
||||
let source = try!(self.index.directory().open_read(&path));
|
||||
let source = self.index.directory().open_read(&path)?;
|
||||
Ok(source)
|
||||
}
|
||||
|
||||
/// Open one of the component file for *regular* write.
|
||||
pub fn open_write(&mut self,
|
||||
component: SegmentComponent)
|
||||
-> result::Result<WritePtr, OpenWriteError> {
|
||||
pub fn open_write(
|
||||
&mut self,
|
||||
component: SegmentComponent,
|
||||
) -> result::Result<WritePtr, OpenWriteError> {
|
||||
let path = self.relative_path(component);
|
||||
let write = try!(self.index.directory_mut().open_write(&path));
|
||||
let write = self.index.directory_mut().open_write(&path)?;
|
||||
Ok(write)
|
||||
}
|
||||
}
|
||||
@@ -125,11 +128,11 @@ mod tests {
|
||||
{
|
||||
let _file_protection = segment.protect_from_delete(SegmentComponent::POSTINGS);
|
||||
assert!(directory.exists(&*path));
|
||||
directory.garbage_collect(living_files.clone());
|
||||
directory.garbage_collect(|| living_files.clone());
|
||||
assert!(directory.exists(&*path));
|
||||
}
|
||||
|
||||
directory.garbage_collect(living_files);
|
||||
directory.garbage_collect(|| living_files);
|
||||
assert!(!directory.exists(&*path));
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
/// Enum describing each component of a tantivy segment.
|
||||
/// Each component is stored in its own file,
|
||||
/// using the pattern `segment_uuid`.`component_extension`,
|
||||
@@ -28,13 +27,15 @@ pub enum SegmentComponent {
|
||||
impl SegmentComponent {
|
||||
/// Iterates through the components.
|
||||
pub fn iterator() -> impl Iterator<Item = &'static SegmentComponent> {
|
||||
static SEGMENT_COMPONENTS: [SegmentComponent; 7] = [SegmentComponent::POSTINGS,
|
||||
SegmentComponent::POSITIONS,
|
||||
SegmentComponent::FASTFIELDS,
|
||||
SegmentComponent::FIELDNORMS,
|
||||
SegmentComponent::TERMS,
|
||||
SegmentComponent::STORE,
|
||||
SegmentComponent::DELETE];
|
||||
static SEGMENT_COMPONENTS: [SegmentComponent; 7] = [
|
||||
SegmentComponent::POSTINGS,
|
||||
SegmentComponent::POSITIONS,
|
||||
SegmentComponent::FASTFIELDS,
|
||||
SegmentComponent::FIELDNORMS,
|
||||
SegmentComponent::TERMS,
|
||||
SegmentComponent::STORE,
|
||||
SegmentComponent::DELETE,
|
||||
];
|
||||
SEGMENT_COMPONENTS.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use uuid::Uuid;
|
||||
use std::fmt;
|
||||
use std::cmp::{Ordering, Ord};
|
||||
use std::cmp::{Ord, Ordering};
|
||||
|
||||
#[cfg(test)]
|
||||
use std::sync::atomic;
|
||||
|
||||
/// Tantivy `SegmentId`.
|
||||
/// Uuid identifying a segment.
|
||||
///
|
||||
/// Tantivy's segment are identified
|
||||
/// by a UUID which is used to prefix the filenames
|
||||
@@ -16,14 +16,12 @@ use std::sync::atomic;
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct SegmentId(Uuid);
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
lazy_static! {
|
||||
static ref AUTO_INC_COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::default();
|
||||
static ref EMPTY_ARR: [u8; 8] = [0u8; 8];
|
||||
}
|
||||
|
||||
|
||||
// During tests, we generate the segment id in a autoincrement manner
|
||||
// for consistency of segment id between run.
|
||||
//
|
||||
@@ -46,7 +44,6 @@ impl SegmentId {
|
||||
SegmentId(create_uuid())
|
||||
}
|
||||
|
||||
|
||||
/// Returns a shorter identifier of the segment.
|
||||
///
|
||||
/// We are using UUID4, so only 6 bits are fixed,
|
||||
@@ -70,7 +67,6 @@ impl fmt::Debug for SegmentId {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl PartialOrd for SegmentId {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
|
||||
@@ -25,7 +25,7 @@ impl SegmentMeta {
|
||||
/// a segment with no deletes and no documents.
|
||||
pub fn new(segment_id: SegmentId) -> SegmentMeta {
|
||||
SegmentMeta {
|
||||
segment_id: segment_id,
|
||||
segment_id,
|
||||
max_doc: 0,
|
||||
deletes: None,
|
||||
}
|
||||
@@ -54,7 +54,6 @@ impl SegmentMeta {
|
||||
SegmentComponent::iterator()
|
||||
.map(|component| self.relative_path(*component))
|
||||
.collect::<HashSet<PathBuf>>()
|
||||
|
||||
}
|
||||
|
||||
/// Returns the relative path of a component of our segment.
|
||||
@@ -64,16 +63,14 @@ impl SegmentMeta {
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
let mut path = self.id().uuid_string();
|
||||
path.push_str(&*match component {
|
||||
SegmentComponent::POSITIONS => ".pos".to_string(),
|
||||
SegmentComponent::POSTINGS => ".idx".to_string(),
|
||||
SegmentComponent::TERMS => ".term".to_string(),
|
||||
SegmentComponent::STORE => ".store".to_string(),
|
||||
SegmentComponent::FASTFIELDS => ".fast".to_string(),
|
||||
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
|
||||
SegmentComponent::DELETE => {
|
||||
format!(".{}.del", self.delete_opstamp().unwrap_or(0))
|
||||
}
|
||||
});
|
||||
SegmentComponent::POSITIONS => ".pos".to_string(),
|
||||
SegmentComponent::POSTINGS => ".idx".to_string(),
|
||||
SegmentComponent::TERMS => ".term".to_string(),
|
||||
SegmentComponent::STORE => ".store".to_string(),
|
||||
SegmentComponent::FASTFIELDS => ".fast".to_string(),
|
||||
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
|
||||
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
|
||||
});
|
||||
PathBuf::from(path)
|
||||
}
|
||||
|
||||
@@ -111,8 +108,8 @@ impl SegmentMeta {
|
||||
#[doc(hidden)]
|
||||
pub fn set_delete_meta(&mut self, num_deleted_docs: u32, opstamp: u64) {
|
||||
self.deletes = Some(DeleteMeta {
|
||||
num_deleted_docs: num_deleted_docs,
|
||||
opstamp: opstamp,
|
||||
});
|
||||
num_deleted_docs,
|
||||
opstamp,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,30 +2,30 @@ use Result;
|
||||
use core::Segment;
|
||||
use core::SegmentId;
|
||||
use core::SegmentComponent;
|
||||
use schema::Term;
|
||||
use std::sync::RwLock;
|
||||
use common::HasLen;
|
||||
use core::SegmentMeta;
|
||||
use fastfield::{self, FastFieldNotAvailableError};
|
||||
use fastfield::DeleteBitSet;
|
||||
use store::StoreReader;
|
||||
use schema::Document;
|
||||
use directory::ReadOnlySource;
|
||||
use DocId;
|
||||
use std::str;
|
||||
use termdict::TermDictionary;
|
||||
use std::cmp;
|
||||
use postings::TermInfo;
|
||||
use termdict::TermDictionaryImpl;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use common::CompositeFile;
|
||||
use std::fmt;
|
||||
use core::InvertedIndexReader;
|
||||
use schema::Field;
|
||||
use postings::SegmentPostingsOption;
|
||||
use postings::{SegmentPostings, BlockSegmentPostings};
|
||||
use fastfield::{FastFieldsReader, FastFieldReader, U64FastFieldReader};
|
||||
use schema::FieldType;
|
||||
use error::ErrorKind;
|
||||
use termdict::TermDictionaryImpl;
|
||||
use fastfield::FacetReader;
|
||||
use fastfield::FastFieldReader;
|
||||
use schema::Schema;
|
||||
use postings::FreqHandler;
|
||||
|
||||
|
||||
use termdict::TermDictionary;
|
||||
use fastfield::{FastValue, MultiValueIntFastFieldReader};
|
||||
use schema::Cardinality;
|
||||
use fieldnorm::FieldNormReader;
|
||||
|
||||
/// Entry point to access all of the datastructures of the `Segment`
|
||||
///
|
||||
@@ -38,17 +38,23 @@ use postings::FreqHandler;
|
||||
/// The segment reader has a very low memory footprint,
|
||||
/// as close to all of the memory data is mmapped.
|
||||
///
|
||||
///
|
||||
/// TODO fix not decoding docfreq
|
||||
#[derive(Clone)]
|
||||
pub struct SegmentReader {
|
||||
inv_idx_reader_cache: Arc<RwLock<HashMap<Field, Arc<InvertedIndexReader>>>>,
|
||||
|
||||
segment_id: SegmentId,
|
||||
segment_meta: SegmentMeta,
|
||||
terms: Arc<TermDictionaryImpl>,
|
||||
postings_data: ReadOnlySource,
|
||||
|
||||
termdict_composite: CompositeFile,
|
||||
postings_composite: CompositeFile,
|
||||
positions_composite: CompositeFile,
|
||||
fast_fields_composite: CompositeFile,
|
||||
fieldnorms_composite: CompositeFile,
|
||||
|
||||
store_reader: StoreReader,
|
||||
fast_fields_reader: Arc<FastFieldsReader>,
|
||||
fieldnorms_reader: Arc<FastFieldsReader>,
|
||||
delete_bitset: DeleteBitSet,
|
||||
positions_data: ReadOnlySource,
|
||||
delete_bitset_opt: Option<DeleteBitSet>,
|
||||
schema: Schema,
|
||||
}
|
||||
|
||||
@@ -73,12 +79,14 @@ impl SegmentReader {
|
||||
/// Return the number of documents that have been
|
||||
/// deleted in the segment.
|
||||
pub fn num_deleted_docs(&self) -> DocId {
|
||||
self.delete_bitset.len() as DocId
|
||||
self.delete_bitset()
|
||||
.map(|delete_set| delete_set.len() as DocId)
|
||||
.unwrap_or(0u32)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn fast_fields_reader(&self) -> &FastFieldsReader {
|
||||
&*self.fast_fields_reader
|
||||
/// Returns true iff some of the documents of the segment have been deleted.
|
||||
pub fn has_deletes(&self) -> bool {
|
||||
self.delete_bitset().is_some()
|
||||
}
|
||||
|
||||
/// Accessor to a segment's fast field reader given a field.
|
||||
@@ -91,20 +99,69 @@ impl SegmentReader {
|
||||
///
|
||||
/// # Panics
|
||||
/// May panic if the index is corrupted.
|
||||
pub fn get_fast_field_reader<TFastFieldReader: FastFieldReader>
|
||||
(&self,
|
||||
field: Field)
|
||||
-> fastfield::Result<TFastFieldReader> {
|
||||
pub fn fast_field_reader<Item: FastValue>(
|
||||
&self,
|
||||
field: Field,
|
||||
) -> fastfield::Result<FastFieldReader<Item>> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
if !TFastFieldReader::is_enabled(field_entry.field_type()) {
|
||||
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::SingleValue)
|
||||
{
|
||||
self.fast_fields_composite
|
||||
.open_read(field)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||
.map(FastFieldReader::open)
|
||||
} else {
|
||||
Err(FastFieldNotAvailableError::new(field_entry))
|
||||
} else {
|
||||
Ok(self.fast_fields_reader
|
||||
.open_reader(field)
|
||||
.expect("Fast field file corrupted."))
|
||||
}
|
||||
}
|
||||
|
||||
/// Accessor to the `MultiValueIntFastFieldReader` associated to a given `Field`.
|
||||
/// May panick if the field is not a multivalued fastfield of the type `Item`.
|
||||
pub fn multi_fast_field_reader<Item: FastValue>(
|
||||
&self,
|
||||
field: Field,
|
||||
) -> fastfield::Result<MultiValueIntFastFieldReader<Item>> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::MultiValues)
|
||||
{
|
||||
let idx_reader = self.fast_fields_composite
|
||||
.open_read_with_idx(field, 0)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||
.map(FastFieldReader::open)?;
|
||||
let vals_reader = self.fast_fields_composite
|
||||
.open_read_with_idx(field, 1)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||
.map(FastFieldReader::open)?;
|
||||
Ok(MultiValueIntFastFieldReader::open(idx_reader, vals_reader))
|
||||
} else {
|
||||
Err(FastFieldNotAvailableError::new(field_entry))
|
||||
}
|
||||
}
|
||||
|
||||
/// Accessor to the `FacetReader` associated to a given `Field`.
|
||||
pub fn facet_reader(&self, field: Field) -> Result<FacetReader> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
||||
return Err(ErrorKind::InvalidArgument(format!(
|
||||
"The field {:?} is not a \
|
||||
hierarchical facet.",
|
||||
field_entry
|
||||
)).into());
|
||||
}
|
||||
let term_ords_reader = self.multi_fast_field_reader(field)?;
|
||||
let termdict_source = self.termdict_composite.open_read(field).ok_or_else(|| {
|
||||
ErrorKind::InvalidArgument(format!(
|
||||
"The field \"{}\" is a hierarchical \
|
||||
but this segment does not seem to have the field term \
|
||||
dictionary.",
|
||||
field_entry.name()
|
||||
))
|
||||
})?;
|
||||
let termdict = TermDictionaryImpl::from_source(termdict_source);
|
||||
let facet_reader = FacetReader::new(term_ords_reader, termdict);
|
||||
Ok(facet_reader)
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `Field norms`'s reader.
|
||||
///
|
||||
/// Field norms are the length (in tokens) of the fields.
|
||||
@@ -113,15 +170,14 @@ impl SegmentReader {
|
||||
///
|
||||
/// They are simply stored as a fast field, serialized in
|
||||
/// the `.fieldnorm` file of the segment.
|
||||
pub fn get_fieldnorms_reader(&self, field: Field) -> Option<U64FastFieldReader> {
|
||||
self.fieldnorms_reader.open_reader(field)
|
||||
}
|
||||
|
||||
/// Returns the number of documents containing the term.
|
||||
pub fn doc_freq(&self, term: &Term) -> u32 {
|
||||
match self.get_term_info(term) {
|
||||
Some(term_info) => term_info.doc_freq,
|
||||
None => 0,
|
||||
pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
|
||||
if let Some(fieldnorm_source) = self.fieldnorms_composite
|
||||
.open_read(field) {
|
||||
FieldNormReader::open(fieldnorm_source)
|
||||
} else {
|
||||
let field_name = self.schema.get_field_name(field);
|
||||
let err_msg= format!("Field norm not found for field {:?}. Was it market as indexed during indexing.", field_name);
|
||||
panic!(err_msg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,51 +187,114 @@ impl SegmentReader {
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
pub fn open(segment: Segment) -> Result<SegmentReader> {
|
||||
|
||||
let source = segment.open_read(SegmentComponent::TERMS)?;
|
||||
let terms = TermDictionaryImpl::from_source(source)?;
|
||||
pub fn open(segment: &Segment) -> Result<SegmentReader> {
|
||||
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
|
||||
let termdict_composite = CompositeFile::open(&termdict_source)?;
|
||||
|
||||
let store_source = segment.open_read(SegmentComponent::STORE)?;
|
||||
let store_reader = StoreReader::from_source(store_source);
|
||||
|
||||
let postings_shared_mmap = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||
let postings_composite = CompositeFile::open(&postings_source)?;
|
||||
|
||||
let fast_field_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
||||
let fast_fields_reader = FastFieldsReader::from_source(fast_field_data)?;
|
||||
let positions_composite = {
|
||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
|
||||
CompositeFile::open(&source)?
|
||||
} else {
|
||||
CompositeFile::empty()
|
||||
}
|
||||
};
|
||||
|
||||
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
||||
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
||||
|
||||
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||
let fieldnorms_reader = FastFieldsReader::from_source(fieldnorms_data)?;
|
||||
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
||||
|
||||
let positions_data = segment
|
||||
.open_read(SegmentComponent::POSITIONS)
|
||||
.unwrap_or_else(|_| ReadOnlySource::empty());
|
||||
|
||||
let delete_bitset = if segment.meta().has_deletes() {
|
||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||
DeleteBitSet::open(delete_data)
|
||||
} else {
|
||||
DeleteBitSet::empty()
|
||||
};
|
||||
let delete_bitset_opt =
|
||||
if segment.meta().has_deletes() {
|
||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||
Some(DeleteBitSet::open(delete_data))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let schema = segment.schema();
|
||||
Ok(SegmentReader {
|
||||
segment_meta: segment.meta().clone(),
|
||||
postings_data: postings_shared_mmap,
|
||||
terms: Arc::new(terms),
|
||||
segment_id: segment.id(),
|
||||
store_reader: store_reader,
|
||||
fast_fields_reader: Arc::new(fast_fields_reader),
|
||||
fieldnorms_reader: Arc::new(fieldnorms_reader),
|
||||
delete_bitset: delete_bitset,
|
||||
positions_data: positions_data,
|
||||
schema: schema,
|
||||
})
|
||||
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
segment_meta: segment.meta().clone(),
|
||||
termdict_composite,
|
||||
postings_composite,
|
||||
fast_fields_composite,
|
||||
fieldnorms_composite,
|
||||
segment_id: segment.id(),
|
||||
store_reader,
|
||||
delete_bitset_opt,
|
||||
positions_composite,
|
||||
schema,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the term dictionary datastructure.
|
||||
pub fn terms(&self) -> &TermDictionaryImpl {
|
||||
&self.terms
|
||||
/// Returns a field reader associated to the field given in argument.
|
||||
/// If the field was not present in the index during indexing time,
|
||||
/// the InvertedIndexReader is empty.
|
||||
///
|
||||
/// The field reader is in charge of iterating through the
|
||||
/// term dictionary associated to a specific field,
|
||||
/// and opening the posting list associated to any term.
|
||||
pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
|
||||
if let Some(inv_idx_reader) = self.inv_idx_reader_cache
|
||||
.read()
|
||||
.expect("Lock poisoned. This should never happen")
|
||||
.get(&field)
|
||||
{
|
||||
return Arc::clone(inv_idx_reader);
|
||||
}
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
let record_option_opt = field_type.get_index_record_option();
|
||||
|
||||
if record_option_opt.is_none() {
|
||||
panic!("Field {:?} does not seem indexed.", field_entry.name());
|
||||
}
|
||||
|
||||
let record_option = record_option_opt.unwrap();
|
||||
|
||||
let postings_source_opt = self.postings_composite.open_read(field);
|
||||
|
||||
if postings_source_opt.is_none() {
|
||||
// no documents in the segment contained this field.
|
||||
// As a result, no data is associated to the inverted index.
|
||||
//
|
||||
// Returns an empty inverted index.
|
||||
return Arc::new(InvertedIndexReader::empty(field_type.clone()));
|
||||
}
|
||||
|
||||
let postings_source = postings_source_opt.unwrap();
|
||||
|
||||
let termdict_source = self.termdict_composite
|
||||
.open_read(field)
|
||||
.expect("Failed to open field term dictionary in composite file. Is the field indexed");
|
||||
|
||||
let positions_source = self.positions_composite
|
||||
.open_read(field)
|
||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||
|
||||
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
||||
TermDictionaryImpl::from_source(termdict_source),
|
||||
postings_source,
|
||||
positions_source,
|
||||
record_option,
|
||||
));
|
||||
|
||||
// by releasing the lock in between, we may end up opening the inverting index
|
||||
// twice, but this is fine.
|
||||
self.inv_idx_reader_cache
|
||||
.write()
|
||||
.expect("Field reader cache lock poisoned. This should never happen.")
|
||||
.insert(field, Arc::clone(&inv_idx_reader));
|
||||
|
||||
inv_idx_reader
|
||||
}
|
||||
|
||||
/// Returns the document (or to be accurate, its stored field)
|
||||
@@ -186,90 +305,6 @@ impl SegmentReader {
|
||||
self.store_reader.get(doc_id)
|
||||
}
|
||||
|
||||
|
||||
/// Returns the segment postings associated with the term, and with the given option,
|
||||
/// or `None` if the term has never been encountered and indexed.
|
||||
///
|
||||
/// If the field was not indexed with the indexing options that cover
|
||||
/// the requested options, the returned `SegmentPostings` the method does not fail
|
||||
/// and returns a `SegmentPostings` with as much information as possible.
|
||||
///
|
||||
/// For instance, requesting `SegmentPostingsOption::FreqAndPositions` for a
|
||||
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
||||
/// with `DocId`s and frequencies.
|
||||
pub fn read_postings(&self,
|
||||
term: &Term,
|
||||
option: SegmentPostingsOption)
|
||||
-> Option<SegmentPostings> {
|
||||
let field = term.field();
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let term_info = get!(self.get_term_info(term));
|
||||
let maximum_option = get!(field_entry.field_type().get_segment_postings_option());
|
||||
let best_effort_option = cmp::min(maximum_option, option);
|
||||
Some(self.read_postings_from_terminfo(&term_info, best_effort_option))
|
||||
}
|
||||
|
||||
|
||||
/// Returns a posting object given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
pub fn read_postings_from_terminfo(&self,
|
||||
term_info: &TermInfo,
|
||||
option: SegmentPostingsOption)
|
||||
-> SegmentPostings {
|
||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option);
|
||||
let delete_bitset = self.delete_bitset.clone();
|
||||
SegmentPostings::from_block_postings(block_postings, delete_bitset)
|
||||
}
|
||||
|
||||
|
||||
/// Returns a block postings given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
pub fn read_block_postings_from_terminfo(&self,
|
||||
term_info: &TermInfo,
|
||||
option: SegmentPostingsOption)
|
||||
-> BlockSegmentPostings {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let postings_data = &self.postings_data[offset..];
|
||||
let freq_handler = match option {
|
||||
SegmentPostingsOption::NoFreq => FreqHandler::new_without_freq(),
|
||||
SegmentPostingsOption::Freq => FreqHandler::new_with_freq(),
|
||||
SegmentPostingsOption::FreqAndPositions => {
|
||||
let offset = term_info.positions_offset as usize;
|
||||
let offseted_position_data = &self.positions_data[offset..];
|
||||
FreqHandler::new_with_freq_and_position(offseted_position_data)
|
||||
}
|
||||
};
|
||||
BlockSegmentPostings::from_data(term_info.doc_freq as usize, postings_data, freq_handler)
|
||||
}
|
||||
|
||||
|
||||
/// Resets the block segment to another position of the postings
|
||||
/// file.
|
||||
///
|
||||
/// This is useful for enumerating through a list of terms,
|
||||
/// and consuming the associated posting lists while avoiding
|
||||
/// reallocating a `BlockSegmentPostings`.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This does not reset the positions list.
|
||||
pub fn reset_block_postings_from_terminfo<'a>(&'a self,
|
||||
term_info: &TermInfo,
|
||||
block_postings: &mut BlockSegmentPostings<'a>) {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let postings_data: &'a [u8] = &self.postings_data[offset..];
|
||||
block_postings.reset(term_info.doc_freq as usize, postings_data);
|
||||
}
|
||||
|
||||
/// Returns the term info associated with the term.
|
||||
pub fn get_term_info(&self, term: &Term) -> Option<TermInfo> {
|
||||
self.terms.get(term.as_slice())
|
||||
}
|
||||
|
||||
/// Returns the segment id
|
||||
pub fn segment_id(&self) -> SegmentId {
|
||||
self.segment_id
|
||||
@@ -277,19 +312,19 @@ impl SegmentReader {
|
||||
|
||||
/// Returns the bitset representing
|
||||
/// the documents that have been deleted.
|
||||
pub fn delete_bitset(&self) -> &DeleteBitSet {
|
||||
&self.delete_bitset
|
||||
pub fn delete_bitset(&self) -> Option<&DeleteBitSet> {
|
||||
self.delete_bitset_opt.as_ref()
|
||||
}
|
||||
|
||||
|
||||
/// Returns true iff the `doc` is marked
|
||||
/// as deleted.
|
||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||
self.delete_bitset.is_deleted(doc)
|
||||
self.delete_bitset()
|
||||
.map(|delete_set| delete_set.is_deleted(doc))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl fmt::Debug for SegmentReader {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "SegmentReader({:?})", self.segment_id)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
mod skip;
|
||||
pub mod stacker;
|
||||
|
||||
pub use self::skip::{SkipListBuilder, SkipList};
|
||||
pub use self::skip::{SkipList, SkipListBuilder};
|
||||
|
||||
@@ -6,17 +6,15 @@ mod skiplist;
|
||||
pub use self::skiplist_builder::SkipListBuilder;
|
||||
pub use self::skiplist::SkipList;
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use super::{SkipList, SkipListBuilder};
|
||||
|
||||
#[test]
|
||||
fn test_skiplist() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(10);
|
||||
let mut skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(8);
|
||||
skip_list_builder.insert(2, &3).unwrap();
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
let mut skip_list: SkipList<u32> = SkipList::from(output.as_slice());
|
||||
@@ -26,7 +24,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_skiplist2() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(10);
|
||||
let skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(8);
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
let mut skip_list: SkipList<u32> = SkipList::from(output.as_slice());
|
||||
assert_eq!(skip_list.next(), None);
|
||||
@@ -73,7 +71,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_skiplist5() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(3);
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(4);
|
||||
skip_list_builder.insert(2, &()).unwrap();
|
||||
skip_list_builder.insert(3, &()).unwrap();
|
||||
skip_list_builder.insert(5, &()).unwrap();
|
||||
@@ -105,7 +103,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_skiplist7() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(3);
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(4);
|
||||
for i in 0..1000 {
|
||||
skip_list_builder.insert(i, &()).unwrap();
|
||||
}
|
||||
@@ -123,35 +121,48 @@ mod tests {
|
||||
#[test]
|
||||
fn test_skiplist8() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(10);
|
||||
let mut skip_list_builder: SkipListBuilder<u64> = SkipListBuilder::new(8);
|
||||
skip_list_builder.insert(2, &3).unwrap();
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
assert_eq!(output.len(), 13);
|
||||
assert_eq!(output.len(), 11);
|
||||
assert_eq!(output[0], 1u8 + 128u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skiplist9() {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<u32> = SkipListBuilder::new(3);
|
||||
for i in 0..9 {
|
||||
let mut skip_list_builder: SkipListBuilder<u64> = SkipListBuilder::new(4);
|
||||
for i in 0..4 * 4 * 4 {
|
||||
skip_list_builder.insert(i, &i).unwrap();
|
||||
}
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
assert_eq!(output.len(), 117);
|
||||
assert_eq!(output[0], 3u8 + 128u8);
|
||||
assert_eq!(output.len(), 774);
|
||||
assert_eq!(output[0], 4u8 + 128u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skiplist10() {
|
||||
// checking that void gets serialized to nothing.
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(3);
|
||||
for i in 0..9 {
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(4);
|
||||
for i in 0..((4 * 4 * 4) - 1) {
|
||||
skip_list_builder.insert(i, &()).unwrap();
|
||||
}
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
assert_eq!(output.len(), 81);
|
||||
assert_eq!(output.len(), 230);
|
||||
assert_eq!(output[0], 128u8 + 3u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skiplist11() {
|
||||
// checking that void gets serialized to nothing.
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut skip_list_builder: SkipListBuilder<()> = SkipListBuilder::new(4);
|
||||
for i in 0..(4 * 4) {
|
||||
skip_list_builder.insert(i, &()).unwrap();
|
||||
}
|
||||
skip_list_builder.write::<Vec<u8>>(&mut output).unwrap();
|
||||
assert_eq!(output.len(), 65);
|
||||
assert_eq!(output[0], 128u8 + 3u8);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use common::BinarySerializable;
|
||||
use common::{BinarySerializable, VInt};
|
||||
use std::marker::PhantomData;
|
||||
use DocId;
|
||||
use std::cmp::max;
|
||||
|
||||
static EMPTY: [u8; 0] = [];
|
||||
@@ -8,21 +7,20 @@ static EMPTY: [u8; 0] = [];
|
||||
struct Layer<'a, T> {
|
||||
data: &'a [u8],
|
||||
cursor: &'a [u8],
|
||||
next_id: DocId,
|
||||
next_id: Option<u64>,
|
||||
_phantom_: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<'a, T: BinarySerializable> Iterator for Layer<'a, T> {
|
||||
type Item = (DocId, T);
|
||||
type Item = (u64, T);
|
||||
|
||||
fn next(&mut self) -> Option<(DocId, T)> {
|
||||
if self.next_id == u32::max_value() {
|
||||
None
|
||||
} else {
|
||||
fn next(&mut self) -> Option<(u64, T)> {
|
||||
if let Some(cur_id) = self.next_id {
|
||||
let cur_val = T::deserialize(&mut self.cursor).unwrap();
|
||||
let cur_id = self.next_id;
|
||||
self.next_id = u32::deserialize(&mut self.cursor).unwrap_or(u32::max_value());
|
||||
self.next_id = VInt::deserialize_u64(&mut self.cursor).ok();
|
||||
Some((cur_id, cur_val))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -30,11 +28,11 @@ impl<'a, T: BinarySerializable> Iterator for Layer<'a, T> {
|
||||
impl<'a, T: BinarySerializable> From<&'a [u8]> for Layer<'a, T> {
|
||||
fn from(data: &'a [u8]) -> Layer<'a, T> {
|
||||
let mut cursor = data;
|
||||
let next_id = u32::deserialize(&mut cursor).unwrap_or(u32::max_value());
|
||||
let next_id = VInt::deserialize_u64(&mut cursor).ok();
|
||||
Layer {
|
||||
data: data,
|
||||
cursor: cursor,
|
||||
next_id: next_id,
|
||||
data,
|
||||
cursor,
|
||||
next_id,
|
||||
_phantom_: PhantomData,
|
||||
}
|
||||
}
|
||||
@@ -45,14 +43,14 @@ impl<'a, T: BinarySerializable> Layer<'a, T> {
|
||||
Layer {
|
||||
data: &EMPTY,
|
||||
cursor: &EMPTY,
|
||||
next_id: DocId::max_value(),
|
||||
next_id: None,
|
||||
_phantom_: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn seek_offset(&mut self, offset: usize) {
|
||||
self.cursor = &self.data[offset..];
|
||||
self.next_id = u32::deserialize(&mut self.cursor).unwrap_or(u32::max_value());
|
||||
self.next_id = VInt::deserialize_u64(&mut self.cursor).ok();
|
||||
}
|
||||
|
||||
// Returns the last element (key, val)
|
||||
@@ -60,56 +58,61 @@ impl<'a, T: BinarySerializable> Layer<'a, T> {
|
||||
//
|
||||
// If there is no such element anymore,
|
||||
// returns None.
|
||||
fn seek(&mut self, doc_id: DocId) -> Option<(DocId, T)> {
|
||||
let mut val = None;
|
||||
while self.next_id < doc_id {
|
||||
match self.next() {
|
||||
None => {
|
||||
break;
|
||||
}
|
||||
v => {
|
||||
val = v;
|
||||
//
|
||||
// If the element exists, it will be returned
|
||||
// at the next call to `.next()`.
|
||||
fn seek(&mut self, key: u64) -> Option<(u64, T)> {
|
||||
let mut result: Option<(u64, T)> = None;
|
||||
loop {
|
||||
if let Some(next_id) = self.next_id {
|
||||
if next_id < key {
|
||||
if let Some(v) = self.next() {
|
||||
result = Some(v);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct SkipList<'a, T: BinarySerializable> {
|
||||
data_layer: Layer<'a, T>,
|
||||
skip_layers: Vec<Layer<'a, u32>>,
|
||||
skip_layers: Vec<Layer<'a, u64>>,
|
||||
}
|
||||
|
||||
impl<'a, T: BinarySerializable> Iterator for SkipList<'a, T> {
|
||||
type Item = (DocId, T);
|
||||
type Item = (u64, T);
|
||||
|
||||
fn next(&mut self) -> Option<(DocId, T)> {
|
||||
fn next(&mut self) -> Option<(u64, T)> {
|
||||
self.data_layer.next()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: BinarySerializable> SkipList<'a, T> {
|
||||
pub fn seek(&mut self, doc_id: DocId) -> Option<(DocId, T)> {
|
||||
let mut next_layer_skip: Option<(DocId, u32)> = None;
|
||||
pub fn seek(&mut self, key: u64) -> Option<(u64, T)> {
|
||||
let mut next_layer_skip: Option<(u64, u64)> = None;
|
||||
for skip_layer in &mut self.skip_layers {
|
||||
if let Some((_, offset)) = next_layer_skip {
|
||||
skip_layer.seek_offset(offset as usize);
|
||||
}
|
||||
next_layer_skip = skip_layer.seek(doc_id);
|
||||
next_layer_skip = skip_layer.seek(key);
|
||||
}
|
||||
if let Some((_, offset)) = next_layer_skip {
|
||||
self.data_layer.seek_offset(offset as usize);
|
||||
}
|
||||
self.data_layer.seek(doc_id)
|
||||
self.data_layer.seek(key)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a, T: BinarySerializable> From<&'a [u8]> for SkipList<'a, T> {
|
||||
fn from(mut data: &'a [u8]) -> SkipList<'a, T> {
|
||||
let offsets: Vec<u32> = Vec::deserialize(&mut data).unwrap();
|
||||
let offsets: Vec<u64> = Vec::<VInt>::deserialize(&mut data)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|el| el.0)
|
||||
.collect();
|
||||
let num_layers = offsets.len();
|
||||
let layers_data: &[u8] = data;
|
||||
let data_layer: Layer<'a, T> = if num_layers == 0 {
|
||||
@@ -123,8 +126,8 @@ impl<'a, T: BinarySerializable> From<&'a [u8]> for SkipList<'a, T> {
|
||||
.map(|(start, stop)| Layer::from(&layers_data[start..stop]))
|
||||
.collect();
|
||||
SkipList {
|
||||
skip_layers: skip_layers,
|
||||
data_layer: data_layer,
|
||||
skip_layers,
|
||||
data_layer,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
use std::io::Write;
|
||||
use common::BinarySerializable;
|
||||
use common::{BinarySerializable, VInt, is_power_of_2};
|
||||
use std::marker::PhantomData;
|
||||
use DocId;
|
||||
use std::io;
|
||||
|
||||
struct LayerBuilder<T: BinarySerializable> {
|
||||
period: usize,
|
||||
period_mask: usize,
|
||||
buffer: Vec<u8>,
|
||||
remaining: usize,
|
||||
len: usize,
|
||||
_phantom_: PhantomData<T>,
|
||||
}
|
||||
@@ -23,48 +21,45 @@ impl<T: BinarySerializable> LayerBuilder<T> {
|
||||
}
|
||||
|
||||
fn with_period(period: usize) -> LayerBuilder<T> {
|
||||
assert!(is_power_of_2(period), "The period has to be a power of 2.");
|
||||
LayerBuilder {
|
||||
period: period,
|
||||
period_mask: (period - 1),
|
||||
buffer: Vec::new(),
|
||||
remaining: period,
|
||||
len: 0,
|
||||
_phantom_: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn insert(&mut self, doc_id: DocId, value: &T) -> io::Result<Option<(DocId, u32)>> {
|
||||
self.remaining -= 1;
|
||||
fn insert(&mut self, key: u64, value: &T) -> io::Result<Option<(u64, u64)>> {
|
||||
self.len += 1;
|
||||
let offset = self.written_size() as u32;
|
||||
doc_id.serialize(&mut self.buffer)?;
|
||||
let offset = self.written_size() as u64;
|
||||
VInt(key).serialize(&mut self.buffer)?;
|
||||
value.serialize(&mut self.buffer)?;
|
||||
Ok(if self.remaining == 0 {
|
||||
self.remaining = self.period;
|
||||
Some((doc_id, offset))
|
||||
} else {
|
||||
None
|
||||
})
|
||||
let emit_skip_info = (self.period_mask & self.len) == 0;
|
||||
if emit_skip_info {
|
||||
Ok(Some((key, offset)))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct SkipListBuilder<T: BinarySerializable> {
|
||||
period: usize,
|
||||
data_layer: LayerBuilder<T>,
|
||||
skip_layers: Vec<LayerBuilder<u32>>,
|
||||
skip_layers: Vec<LayerBuilder<u64>>,
|
||||
}
|
||||
|
||||
|
||||
impl<T: BinarySerializable> SkipListBuilder<T> {
|
||||
pub fn new(period: usize) -> SkipListBuilder<T> {
|
||||
SkipListBuilder {
|
||||
period: period,
|
||||
period,
|
||||
data_layer: LayerBuilder::with_period(period),
|
||||
skip_layers: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_skip_layer(&mut self, layer_id: usize) -> &mut LayerBuilder<u32> {
|
||||
fn get_skip_layer(&mut self, layer_id: usize) -> &mut LayerBuilder<u64> {
|
||||
if layer_id == self.skip_layers.len() {
|
||||
let layer_builder = LayerBuilder::with_period(self.period);
|
||||
self.skip_layers.push(layer_builder);
|
||||
@@ -72,15 +67,13 @@ impl<T: BinarySerializable> SkipListBuilder<T> {
|
||||
&mut self.skip_layers[layer_id]
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, doc_id: DocId, dest: &T) -> io::Result<()> {
|
||||
pub fn insert(&mut self, key: u64, dest: &T) -> io::Result<()> {
|
||||
let mut layer_id = 0;
|
||||
let mut skip_pointer = try!(self.data_layer.insert(doc_id, dest));
|
||||
let mut skip_pointer = self.data_layer.insert(key, dest)?;
|
||||
loop {
|
||||
skip_pointer = match skip_pointer {
|
||||
Some((skip_doc_id, skip_offset)) => {
|
||||
try!(self.get_skip_layer(layer_id)
|
||||
.insert(skip_doc_id, &skip_offset))
|
||||
}
|
||||
Some((skip_doc_id, skip_offset)) => self.get_skip_layer(layer_id)
|
||||
.insert(skip_doc_id, &skip_offset)?,
|
||||
None => {
|
||||
return Ok(());
|
||||
}
|
||||
@@ -90,13 +83,11 @@ impl<T: BinarySerializable> SkipListBuilder<T> {
|
||||
}
|
||||
|
||||
pub fn write<W: Write>(self, output: &mut W) -> io::Result<()> {
|
||||
let mut size: u32 = 0;
|
||||
let mut layer_sizes: Vec<u32> = Vec::new();
|
||||
size += self.data_layer.buffer.len() as u32;
|
||||
layer_sizes.push(size);
|
||||
let mut size: u64 = self.data_layer.buffer.len() as u64;
|
||||
let mut layer_sizes = vec![VInt(size)];
|
||||
for layer in self.skip_layers.iter().rev() {
|
||||
size += layer.buffer.len() as u32;
|
||||
layer_sizes.push(size);
|
||||
size += layer.buffer.len() as u64;
|
||||
layer_sizes.push(VInt(size));
|
||||
}
|
||||
layer_sizes.serialize(output)?;
|
||||
self.data_layer.write(output)?;
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::mem;
|
||||
use super::heap::{Heap, HeapAllocable};
|
||||
|
||||
|
||||
#[inline]
|
||||
pub fn is_power_of_2(val: u32) -> bool {
|
||||
val & (val - 1) == 0
|
||||
@@ -12,7 +11,6 @@ pub fn jump_needed(val: u32) -> bool {
|
||||
val > 3 && is_power_of_2(val)
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ExpUnrolledLinkedList {
|
||||
len: u32,
|
||||
@@ -26,7 +24,7 @@ pub struct ExpUnrolledLinkedList {
|
||||
impl ExpUnrolledLinkedList {
|
||||
pub fn iter<'a>(&self, addr: u32, heap: &'a Heap) -> ExpUnrolledLinkedListIterator<'a> {
|
||||
ExpUnrolledLinkedListIterator {
|
||||
heap: heap,
|
||||
heap,
|
||||
addr: addr + 2u32 * (mem::size_of::<u32>() as u32),
|
||||
len: self.len,
|
||||
consumed: 0,
|
||||
@@ -51,7 +49,6 @@ impl ExpUnrolledLinkedList {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl HeapAllocable for u32 {
|
||||
fn with_addr(_addr: u32) -> u32 {
|
||||
0u32
|
||||
@@ -96,14 +93,9 @@ impl<'a> Iterator for ExpUnrolledLinkedListIterator<'a> {
|
||||
self.addr = addr + mem::size_of::<u32>() as u32;
|
||||
Some(*self.heap.get_mut_ref(addr))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::iter;
|
||||
use std::mem;
|
||||
use super::heap::{Heap, HeapAllocable, BytesRef};
|
||||
use postings::UnorderedTermId;
|
||||
use super::heap::{BytesRef, Heap, HeapAllocable};
|
||||
|
||||
mod murmurhash2 {
|
||||
|
||||
@@ -9,7 +10,7 @@ mod murmurhash2 {
|
||||
#[inline(always)]
|
||||
pub fn murmurhash2(key: &[u8]) -> u32 {
|
||||
let mut key_ptr: *const u32 = key.as_ptr() as *const u32;
|
||||
let m: u32 = 0x5bd1e995;
|
||||
let m: u32 = 0x5bd1_e995;
|
||||
let r = 24;
|
||||
let len = key.len() as u32;
|
||||
|
||||
@@ -30,18 +31,18 @@ mod murmurhash2 {
|
||||
let key_ptr_u8: *const u8 = key_ptr as *const u8;
|
||||
match remaining {
|
||||
3 => {
|
||||
h ^= unsafe { *key_ptr_u8.wrapping_offset(2) as u32 } << 16;
|
||||
h ^= unsafe { *key_ptr_u8.wrapping_offset(1) as u32 } << 8;
|
||||
h ^= unsafe { *key_ptr_u8 as u32 };
|
||||
h ^= unsafe { u32::from(*key_ptr_u8.wrapping_offset(2)) } << 16;
|
||||
h ^= unsafe { u32::from(*key_ptr_u8.wrapping_offset(1)) } << 8;
|
||||
h ^= unsafe { u32::from(*key_ptr_u8) };
|
||||
h = h.wrapping_mul(m);
|
||||
}
|
||||
2 => {
|
||||
h ^= unsafe { *key_ptr_u8.wrapping_offset(1) as u32 } << 8;
|
||||
h ^= unsafe { *key_ptr_u8 as u32 };
|
||||
h ^= unsafe { u32::from(*key_ptr_u8.wrapping_offset(1)) } << 8;
|
||||
h ^= unsafe { u32::from(*key_ptr_u8) };
|
||||
h = h.wrapping_mul(m);
|
||||
}
|
||||
1 => {
|
||||
h ^= unsafe { *key_ptr_u8 as u32 };
|
||||
h ^= unsafe { u32::from(*key_ptr_u8) };
|
||||
h = h.wrapping_mul(m);
|
||||
}
|
||||
_ => {}
|
||||
@@ -52,9 +53,6 @@ mod murmurhash2 {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/// Split the thread memory budget into
|
||||
/// - the heap size
|
||||
/// - the hash table "table" itself.
|
||||
@@ -62,21 +60,20 @@ mod murmurhash2 {
|
||||
/// Returns (the heap size in bytes, the hash table size in number of bits)
|
||||
pub(crate) fn split_memory(per_thread_memory_budget: usize) -> (usize, usize) {
|
||||
let table_size_limit: usize = per_thread_memory_budget / 3;
|
||||
let compute_table_size = |num_bits: usize| {
|
||||
let table_size: usize = (1 << num_bits) * mem::size_of::<KeyValue>();
|
||||
table_size * mem::size_of::<KeyValue>()
|
||||
};
|
||||
let compute_table_size = |num_bits: usize| (1 << num_bits) * mem::size_of::<KeyValue>();
|
||||
let table_num_bits: usize = (1..)
|
||||
.into_iter()
|
||||
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_size_limit)
|
||||
.last()
|
||||
.expect(&format!("Per thread memory is too small: {}", per_thread_memory_budget));
|
||||
.expect(&format!(
|
||||
"Per thread memory is too small: {}",
|
||||
per_thread_memory_budget
|
||||
));
|
||||
let table_size = compute_table_size(table_num_bits);
|
||||
let heap_size = per_thread_memory_budget - table_size;
|
||||
(heap_size, table_num_bits)
|
||||
}
|
||||
|
||||
|
||||
/// `KeyValue` is the item stored in the hash table.
|
||||
/// The key is actually a `BytesRef` object stored in an external heap.
|
||||
/// The `value_addr` also points to an address in the heap.
|
||||
@@ -85,7 +82,6 @@ pub(crate) fn split_memory(per_thread_memory_budget: usize) -> (usize, usize) {
|
||||
/// For this reason, the (start, stop) information is actually redundant
|
||||
/// and can be simplified in the future
|
||||
#[derive(Copy, Clone, Default)]
|
||||
#[repr(packed)]
|
||||
struct KeyValue {
|
||||
key_value_addr: BytesRef,
|
||||
hash: u32,
|
||||
@@ -97,7 +93,6 @@ impl KeyValue {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Customized `HashMap` with string keys
|
||||
///
|
||||
/// This `HashMap` takes String as keys. Keys are
|
||||
@@ -107,14 +102,13 @@ impl KeyValue {
|
||||
/// the computation of the hash of the key twice,
|
||||
/// or copying the key as long as there is no insert.
|
||||
///
|
||||
pub struct HashMap<'a> {
|
||||
pub struct TermHashMap<'a> {
|
||||
table: Box<[KeyValue]>,
|
||||
heap: &'a Heap,
|
||||
mask: usize,
|
||||
occupied: Vec<usize>,
|
||||
}
|
||||
|
||||
|
||||
struct QuadraticProbing {
|
||||
hash: usize,
|
||||
i: usize,
|
||||
@@ -124,9 +118,9 @@ struct QuadraticProbing {
|
||||
impl QuadraticProbing {
|
||||
fn compute(hash: usize, mask: usize) -> QuadraticProbing {
|
||||
QuadraticProbing {
|
||||
hash: hash,
|
||||
hash,
|
||||
i: 0,
|
||||
mask: mask,
|
||||
mask,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,14 +131,13 @@ impl QuadraticProbing {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a> HashMap<'a> {
|
||||
pub fn new(num_bucket_power_of_2: usize, heap: &'a Heap) -> HashMap<'a> {
|
||||
impl<'a> TermHashMap<'a> {
|
||||
pub fn new(num_bucket_power_of_2: usize, heap: &'a Heap) -> TermHashMap<'a> {
|
||||
let table_size = 1 << num_bucket_power_of_2;
|
||||
let table: Vec<KeyValue> = iter::repeat(KeyValue::default()).take(table_size).collect();
|
||||
HashMap {
|
||||
TermHashMap {
|
||||
table: table.into_boxed_slice(),
|
||||
heap: heap,
|
||||
heap,
|
||||
mask: table_size - 1,
|
||||
occupied: Vec::with_capacity(table_size / 2),
|
||||
}
|
||||
@@ -165,26 +158,25 @@ impl<'a> HashMap<'a> {
|
||||
(key_bytes, expull_addr)
|
||||
}
|
||||
|
||||
pub fn set_bucket(&mut self, hash: u32, key_bytes_ref: BytesRef, bucket: usize) {
|
||||
pub fn set_bucket(&mut self, hash: u32, key_value_addr: BytesRef, bucket: usize) {
|
||||
self.occupied.push(bucket);
|
||||
self.table[bucket] = KeyValue {
|
||||
key_value_addr: key_bytes_ref,
|
||||
hash: hash,
|
||||
key_value_addr, hash
|
||||
};
|
||||
}
|
||||
|
||||
pub fn iter<'b: 'a>(&'b self) -> impl Iterator<Item = (&'a [u8], u32)> + 'b {
|
||||
self.occupied
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(move |bucket: usize| {
|
||||
let kv = self.table[bucket];
|
||||
self.get_key_value(kv.key_value_addr)
|
||||
})
|
||||
pub fn iter<'b: 'a>(&'b self) -> impl Iterator<Item = (&'a [u8], u32, UnorderedTermId)> + 'b {
|
||||
self.occupied.iter().cloned().map(move |bucket: usize| {
|
||||
let kv = self.table[bucket];
|
||||
let (key, offset) = self.get_key_value(kv.key_value_addr);
|
||||
(key, offset, bucket as UnorderedTermId)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
pub fn get_or_create<S: AsRef<[u8]>, V: HeapAllocable>(&mut self, key: S) -> &mut V {
|
||||
pub fn get_or_create<S: AsRef<[u8]>, V: HeapAllocable>(
|
||||
&mut self,
|
||||
key: S,
|
||||
) -> (UnorderedTermId, &mut V) {
|
||||
let key_bytes: &[u8] = key.as_ref();
|
||||
let hash = murmurhash2::murmurhash2(key.as_ref());
|
||||
let mut probe = self.probe(hash);
|
||||
@@ -196,18 +188,20 @@ impl<'a> HashMap<'a> {
|
||||
let (addr, val): (u32, &mut V) = self.heap.allocate_object();
|
||||
assert_eq!(addr, key_bytes_ref.addr() + 2 + key_bytes.len() as u32);
|
||||
self.set_bucket(hash, key_bytes_ref, bucket);
|
||||
return val;
|
||||
return (bucket as UnorderedTermId, val);
|
||||
} else if kv.hash == hash {
|
||||
let (stored_key, expull_addr): (&[u8], u32) = self.get_key_value(kv.key_value_addr);
|
||||
if stored_key == key_bytes {
|
||||
return self.heap.get_mut_ref(expull_addr);
|
||||
return (
|
||||
bucket as UnorderedTermId,
|
||||
self.heap.get_mut_ref(expull_addr),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -218,7 +212,6 @@ mod tests {
|
||||
use std::collections::HashSet;
|
||||
use super::split_memory;
|
||||
|
||||
|
||||
struct TestValue {
|
||||
val: u32,
|
||||
_addr: u32,
|
||||
@@ -235,42 +228,41 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_hashmap_size() {
|
||||
assert_eq!(split_memory(100_000), (67232, 9));
|
||||
assert_eq!(split_memory(1_000_000), (737856, 12));
|
||||
assert_eq!(split_memory(10_000_000), (7902848, 15));
|
||||
assert_eq!(split_memory(100_000), (67232, 12));
|
||||
assert_eq!(split_memory(1_000_000), (737856, 15));
|
||||
assert_eq!(split_memory(10_000_000), (7902848, 18));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_hash_map() {
|
||||
let heap = Heap::with_capacity(2_000_000);
|
||||
let mut hash_map: HashMap = HashMap::new(18, &heap);
|
||||
let mut hash_map: TermHashMap = TermHashMap::new(18, &heap);
|
||||
{
|
||||
let v: &mut TestValue = hash_map.get_or_create("abc");
|
||||
let v: &mut TestValue = hash_map.get_or_create("abc").1;
|
||||
assert_eq!(v.val, 0u32);
|
||||
v.val = 3u32;
|
||||
}
|
||||
{
|
||||
let v: &mut TestValue = hash_map.get_or_create("abcd");
|
||||
let v: &mut TestValue = hash_map.get_or_create("abcd").1;
|
||||
assert_eq!(v.val, 0u32);
|
||||
v.val = 4u32;
|
||||
}
|
||||
{
|
||||
let v: &mut TestValue = hash_map.get_or_create("abc");
|
||||
let v: &mut TestValue = hash_map.get_or_create("abc").1;
|
||||
assert_eq!(v.val, 3u32);
|
||||
}
|
||||
{
|
||||
let v: &mut TestValue = hash_map.get_or_create("abcd");
|
||||
let v: &mut TestValue = hash_map.get_or_create("abcd").1;
|
||||
assert_eq!(v.val, 4u32);
|
||||
}
|
||||
let mut iter_values = hash_map.iter();
|
||||
{
|
||||
let (_, addr) = iter_values.next().unwrap();
|
||||
let (_, addr, _) = iter_values.next().unwrap();
|
||||
let val: &TestValue = heap.get_ref(addr);
|
||||
assert_eq!(val.val, 3u32);
|
||||
}
|
||||
{
|
||||
let (_, addr) = iter_values.next().unwrap();
|
||||
let (_, addr, _) = iter_values.next().unwrap();
|
||||
let val: &TestValue = heap.get_ref(addr);
|
||||
assert_eq!(val.val, 4u32);
|
||||
}
|
||||
@@ -282,8 +274,10 @@ mod tests {
|
||||
let s1 = "abcdef";
|
||||
let s2 = "abcdeg";
|
||||
for i in 0..5 {
|
||||
assert_eq!(murmurhash2(&s1[i..5].as_bytes()),
|
||||
murmurhash2(&s2[i..5].as_bytes()));
|
||||
assert_eq!(
|
||||
murmurhash2(&s1[i..5].as_bytes()),
|
||||
murmurhash2(&s2[i..5].as_bytes())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -303,14 +297,13 @@ mod tests {
|
||||
let keys: Vec<&'static str> =
|
||||
vec!["wer qwe qwe qwe ", "werbq weqweqwe2 ", "weraq weqweqwe3 "];
|
||||
b.iter(|| {
|
||||
keys.iter()
|
||||
.map(|&s| s.as_bytes())
|
||||
.map(murmurhash2::murmurhash2)
|
||||
.map(|h| h as u64)
|
||||
.last()
|
||||
.unwrap()
|
||||
});
|
||||
keys.iter()
|
||||
.map(|&s| s.as_bytes())
|
||||
.map(murmurhash2::murmurhash2)
|
||||
.map(|h| h as u64)
|
||||
.last()
|
||||
.unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::cell::UnsafeCell;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use byteorder::{NativeEndian, ByteOrder};
|
||||
use byteorder::{ByteOrder, NativeEndian};
|
||||
|
||||
/// `BytesRef` refers to a slice in tantivy's custom `Heap`.
|
||||
///
|
||||
@@ -40,7 +40,9 @@ pub struct Heap {
|
||||
impl Heap {
|
||||
/// Creates a new heap with a given capacity
|
||||
pub fn with_capacity(num_bytes: usize) -> Heap {
|
||||
Heap { inner: UnsafeCell::new(InnerHeap::with_capacity(num_bytes)) }
|
||||
Heap {
|
||||
inner: UnsafeCell::new(InnerHeap::with_capacity(num_bytes)),
|
||||
}
|
||||
}
|
||||
|
||||
fn inner(&self) -> &mut InnerHeap {
|
||||
@@ -102,7 +104,6 @@ impl Heap {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct InnerHeap {
|
||||
buffer: Vec<u8>,
|
||||
buffer_len: u32,
|
||||
@@ -110,12 +111,11 @@ struct InnerHeap {
|
||||
next_heap: Option<Box<InnerHeap>>,
|
||||
}
|
||||
|
||||
|
||||
impl InnerHeap {
|
||||
pub fn with_capacity(num_bytes: usize) -> InnerHeap {
|
||||
let buffer: Vec<u8> = vec![0u8; num_bytes];
|
||||
InnerHeap {
|
||||
buffer: buffer,
|
||||
buffer,
|
||||
buffer_len: num_bytes as u32,
|
||||
next_heap: None,
|
||||
used: 0u32,
|
||||
@@ -144,7 +144,10 @@ impl InnerHeap {
|
||||
addr
|
||||
} else {
|
||||
if self.next_heap.is_none() {
|
||||
info!(r#"Exceeded heap size. The segment will be committed right after indexing this document."#,);
|
||||
info!(
|
||||
r#"Exceeded heap size. The segment will be committed right
|
||||
after indexing this document."#,
|
||||
);
|
||||
self.next_heap = Some(Box::new(InnerHeap::with_capacity(self.buffer_len as usize)));
|
||||
}
|
||||
self.next_heap.as_mut().unwrap().allocate_space(num_bytes) + self.buffer_len
|
||||
|
||||
@@ -4,7 +4,7 @@ mod expull;
|
||||
|
||||
pub use self::heap::{Heap, HeapAllocable};
|
||||
pub use self::expull::ExpUnrolledLinkedList;
|
||||
pub use self::hashmap::HashMap;
|
||||
pub use self::hashmap::TermHashMap;
|
||||
|
||||
#[test]
|
||||
fn test_unrolled_linked_list() {
|
||||
@@ -16,15 +16,15 @@ fn test_unrolled_linked_list() {
|
||||
ks.push(2);
|
||||
ks.push(3);
|
||||
for k in (1..5).map(|k| k * 100) {
|
||||
let mut hashmap: HashMap = HashMap::new(10, &heap);
|
||||
let mut hashmap: TermHashMap = TermHashMap::new(10, &heap);
|
||||
for j in 0..k {
|
||||
for i in 0..500 {
|
||||
let v: &mut ExpUnrolledLinkedList = hashmap.get_or_create(i.to_string());
|
||||
let v: &mut ExpUnrolledLinkedList = hashmap.get_or_create(i.to_string()).1;
|
||||
v.push(i * j, &heap);
|
||||
}
|
||||
}
|
||||
let mut map_addr: collections::HashMap<Vec<u8>, u32> = collections::HashMap::new();
|
||||
for (key, addr) in hashmap.iter() {
|
||||
for (key, addr, _) in hashmap.iter() {
|
||||
map_addr.insert(Vec::from(key), addr);
|
||||
}
|
||||
|
||||
@@ -39,6 +39,5 @@ fn test_unrolled_linked_list() {
|
||||
assert!(!it.next().is_some());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::marker::Send;
|
||||
use std::fmt;
|
||||
use std::path::Path;
|
||||
use directory::error::{OpenReadError, DeleteError, OpenWriteError};
|
||||
use directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use directory::{ReadOnlySource, WritePtr};
|
||||
use std::result;
|
||||
use std::io;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user