Replace FNV by rustc-hash

Both construction have similar goals but rustc-hash ist better suited for
contemporary CPU as it works one word at a time instead of byte per byte.
This commit is contained in:
Adam Reichold
2022-10-27 00:35:09 +02:00
parent 0c2bd36fe3
commit bbb058d976
10 changed files with 33 additions and 40 deletions

View File

@@ -45,7 +45,7 @@ rust-stemmers = "1.2.0"
downcast-rs = "1.2.0"
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
census = "0.4.0"
fnv = "1.0.7"
rustc-hash = "1.1.0"
thiserror = "1.0.30"
htmlescape = "0.3.1"
fail = "0.5.0"

View File

@@ -6,7 +6,7 @@
use std::collections::HashMap;
use fnv::FnvHashMap;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use super::agg_req::BucketAggregationInternal;
@@ -145,7 +145,7 @@ pub enum BucketEntries<T> {
/// Vector format bucket entries
Vec(Vec<T>),
/// HashMap format bucket entries
HashMap(FnvHashMap<String, T>),
HashMap(FxHashMap<String, T>),
}
/// This is the default entry for a bucket, which contains a key, count, and optionally

View File

@@ -1,7 +1,7 @@
use std::fmt::Debug;
use std::ops::Range;
use fnv::FnvHashMap;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use crate::aggregation::agg_req_with_accessor::{
@@ -176,7 +176,7 @@ impl SegmentRangeCollector {
) -> crate::Result<IntermediateBucketResult> {
let field_type = self.field_type;
let buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
let buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
.buckets
.into_iter()
.map(move |range_bucket| {

View File

@@ -1,7 +1,7 @@
use std::fmt::Debug;
use fnv::FnvHashMap;
use itertools::Itertools;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use super::{CustomOrder, Order, OrderTarget};
@@ -199,7 +199,7 @@ impl TermsAggregationInternal {
#[derive(Clone, Debug, PartialEq)]
/// Container to store term_ids and their buckets.
struct TermBuckets {
pub(crate) entries: FnvHashMap<u32, TermBucketEntry>,
pub(crate) entries: FxHashMap<u32, TermBucketEntry>,
blueprint: Option<SegmentAggregationResultsCollector>,
}
@@ -397,7 +397,7 @@ impl SegmentTermCollector {
.expect("internal error: inverted index not loaded for term aggregation");
let term_dict = inverted_index.terms();
let mut dict: FnvHashMap<String, IntermediateTermBucketEntry> = Default::default();
let mut dict: FxHashMap<String, IntermediateTermBucketEntry> = Default::default();
let mut buffer = vec![];
for (term_id, entry) in entries {
term_dict
@@ -1129,9 +1129,9 @@ mod tests {
assert_eq!(res["my_texts"]["buckets"][0]["key"], "terma");
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 4);
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termb");
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termc");
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 0);
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termc");
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termb");
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 0);
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
assert_eq!(res["my_texts"]["doc_count_error_upper_bound"], 0);

View File

@@ -5,8 +5,8 @@
use std::cmp::Ordering;
use std::collections::HashMap;
use fnv::FnvHashMap;
use itertools::Itertools;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use super::agg_req::{
@@ -288,7 +288,7 @@ impl IntermediateBucketResult {
.keyed;
let buckets = if is_keyed {
let mut bucket_map =
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
FxHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
for bucket in buckets {
bucket_map.insert(bucket.key.to_string(), bucket);
}
@@ -308,7 +308,7 @@ impl IntermediateBucketResult {
let buckets = if req.as_histogram().unwrap().keyed {
let mut bucket_map =
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
FxHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
for bucket in buckets {
bucket_map.insert(bucket.key.to_string(), bucket);
}
@@ -396,13 +396,13 @@ impl IntermediateBucketResult {
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
/// Range aggregation including error counts
pub struct IntermediateRangeBucketResult {
pub(crate) buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry>,
pub(crate) buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry>,
}
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
/// Term aggregation including error counts
pub struct IntermediateTermBucketResult {
pub(crate) entries: FnvHashMap<String, IntermediateTermBucketEntry>,
pub(crate) entries: FxHashMap<String, IntermediateTermBucketEntry>,
pub(crate) sum_other_doc_count: u64,
pub(crate) doc_count_error_upper_bound: u64,
}
@@ -499,8 +499,8 @@ trait MergeFruits {
}
fn merge_maps<V: MergeFruits + Clone>(
entries_left: &mut FnvHashMap<SerializedKey, V>,
mut entries_right: FnvHashMap<SerializedKey, V>,
entries_left: &mut FxHashMap<SerializedKey, V>,
mut entries_right: FxHashMap<SerializedKey, V>,
) {
for (name, entry_left) in entries_left.iter_mut() {
if let Some(entry_right) = entries_right.remove(name) {
@@ -626,7 +626,7 @@ mod tests {
fn get_sub_test_tree(data: &[(String, u64)]) -> IntermediateAggregationResults {
let mut map = HashMap::new();
let mut buckets = FnvHashMap::default();
let mut buckets = FxHashMap::default();
for (key, doc_count) in data {
buckets.insert(
key.to_string(),
@@ -653,7 +653,7 @@ mod tests {
data: &[(String, u64, String, u64)],
) -> IntermediateAggregationResults {
let mut map = HashMap::new();
let mut buckets: FnvHashMap<_, _> = Default::default();
let mut buckets: FxHashMap<_, _> = Default::default();
for (key, doc_count, sub_aggregation_key, sub_aggregation_count) in data {
buckets.insert(
key.to_string(),

View File

@@ -3,7 +3,7 @@ use std::io;
use fastfield_codecs::{
Column, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn,
};
use fnv::FnvHashMap;
use rustc_hash::FxHashMap;
use super::get_fastfield_codecs_for_multivalue;
use crate::fastfield::writer::unexpected_value;
@@ -144,7 +144,7 @@ impl MultiValuedFastFieldWriter {
pub fn serialize(
mut self,
serializer: &mut CompositeFastFieldSerializer,
term_mapping_opt: Option<&FnvHashMap<UnorderedTermId, TermOrdinal>>,
term_mapping_opt: Option<&FxHashMap<UnorderedTermId, TermOrdinal>>,
doc_id_map: Option<&DocIdMapping>,
) -> io::Result<()> {
{

View File

@@ -3,7 +3,7 @@ use std::io;
use common;
use fastfield_codecs::{Column, MonotonicallyMappableToU128, MonotonicallyMappableToU64};
use fnv::FnvHashMap;
use rustc_hash::FxHashMap;
use tantivy_bitpacker::BlockedBitpacker;
use super::multivalued::{MultiValueU128FastFieldWriter, MultiValuedFastFieldWriter};
@@ -256,7 +256,7 @@ impl FastFieldsWriter {
pub fn serialize(
self,
serializer: &mut CompositeFastFieldSerializer,
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>,
mapping: &HashMap<Field, FxHashMap<UnorderedTermId, TermOrdinal>>,
doc_id_map: Option<&DocIdMapping>,
) -> io::Result<()> {
for field_writer in self.term_id_writers {

View File

@@ -1,6 +1,6 @@
use fastfield_codecs::MonotonicallyMappableToU64;
use fnv::FnvHashMap;
use murmurhash32::murmurhash2;
use rustc_hash::FxHashMap;
use crate::fastfield::FastValue;
use crate::postings::{IndexingContext, IndexingPosition, PostingsWriter};
@@ -52,7 +52,7 @@ use crate::{DatePrecision, DateTime, DocId, Term};
/// path map to the same index position as long as the probability is relatively low.
#[derive(Default)]
struct IndexingPositionsPerPath {
positions_per_path: FnvHashMap<u32, IndexingPosition>,
positions_per_path: FxHashMap<u32, IndexingPosition>,
}
impl IndexingPositionsPerPath {

View File

@@ -3,7 +3,7 @@ use std::io;
use std::marker::PhantomData;
use std::ops::Range;
use fnv::FnvHashMap;
use rustc_hash::FxHashMap;
use super::stacker::Addr;
use crate::fastfield::MultiValuedFastFieldWriter;
@@ -56,12 +56,12 @@ pub(crate) fn serialize_postings(
doc_id_map: Option<&DocIdMapping>,
schema: &Schema,
serializer: &mut InvertedIndexSerializer,
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
) -> crate::Result<HashMap<Field, FxHashMap<UnorderedTermId, TermOrdinal>>> {
let mut term_offsets: Vec<(Term<&[u8]>, Addr, UnorderedTermId)> =
Vec::with_capacity(ctx.term_index.len());
term_offsets.extend(ctx.term_index.iter());
term_offsets.sort_unstable_by_key(|(k, _, _)| k.clone());
let mut unordered_term_mappings: HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>> =
let mut unordered_term_mappings: HashMap<Field, FxHashMap<UnorderedTermId, TermOrdinal>> =
HashMap::new();
let field_offsets = make_field_partition(&term_offsets);
@@ -74,7 +74,7 @@ pub(crate) fn serialize_postings(
let unordered_term_ids = term_offsets[byte_offsets.clone()]
.iter()
.map(|&(_, _, bucket)| bucket);
let mapping: FnvHashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
let mapping: FxHashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
.enumerate()
.map(|(term_ord, unord_term_id)| {
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal)

View File

@@ -10,28 +10,21 @@
//! assert_eq!(stream.next().unwrap().text, "crafty");
//! assert!(stream.next().is_none());
//! ```
use std::collections::HashSet;
use std::hash::BuildHasherDefault;
use fnv::FnvHasher;
use rustc_hash::FxHashSet;
use super::{Token, TokenFilter, TokenStream};
use crate::tokenizer::BoxTokenStream;
// configure our hashers for SPEED
type StopWordHasher = BuildHasherDefault<FnvHasher>;
type StopWordHashSet = HashSet<String, StopWordHasher>;
/// `TokenFilter` that removes stop words from a token stream
#[derive(Clone)]
pub struct StopWordFilter {
words: StopWordHashSet,
words: FxHashSet<String>,
}
impl StopWordFilter {
/// Creates a `StopWordFilter` given a list of words to remove
pub fn remove(words: Vec<String>) -> StopWordFilter {
let mut set = StopWordHashSet::default();
let mut set = FxHashSet::default();
for word in words {
set.insert(word);
@@ -52,7 +45,7 @@ impl StopWordFilter {
}
pub struct StopWordFilterStream<'a> {
words: StopWordHashSet,
words: FxHashSet<String>,
tail: BoxTokenStream<'a>,
}