Properly change type of HashMapInit in .with_hasher()

This commit is contained in:
David Freifeld
2025-06-25 03:03:19 -07:00
parent 00dfaa2eb4
commit 1fb3639170
2 changed files with 27 additions and 21 deletions

View File

@@ -129,11 +129,9 @@ fn small_benchs(c: &mut Criterion) {
group.bench_function("small_rehash_xxhash", |b| {
let ideal_filled = 4_000_000;
let size = 5_000_000;
let shmem = ShmemHandle::new("bench", 0, 1073741824 * 2).unwrap();
let init_struct = HashMapInit::<FileCacheKey, FileCacheEntry, _>::init_in_shmem_with_hasher(
size, shmem, twox_hash::xxhash64::RandomState::default(),
);
let mut writer = init_struct.attach_writer();
let mut writer = HashMapInit::new_resizeable(size, size * 2)
.with_hasher(twox_hash::xxhash64::RandomState::default())
.attach_writer();
let mut rng = rand::rng();
while writer.get_num_buckets_in_use() < ideal_filled as usize {
let key: FileCacheKey = rng.random();
@@ -147,11 +145,9 @@ fn small_benchs(c: &mut Criterion) {
group.bench_function("small_rehash_ahash", |b| {
let ideal_filled = 4_000_000;
let size = 5_000_000;
let shmem = ShmemHandle::new("bench", 0, 1073741824 * 2).unwrap();
let init_struct = HashMapInit::<FileCacheKey, FileCacheEntry, _>::init_in_shmem_with_hasher(
size, shmem, ahash::RandomState::default()
);
let mut writer = init_struct.attach_writer();
let mut writer = HashMapInit::new_resizeable(size, size * 2)
.with_hasher(ahash::RandomState::default())
.attach_writer();
let mut rng = rand::rng();
while writer.get_num_buckets_in_use() < ideal_filled as usize {
let key: FileCacheKey = rng.random();
@@ -164,12 +160,10 @@ fn small_benchs(c: &mut Criterion) {
group.bench_function("small_rehash_seahash", |b| {
let ideal_filled = 4_000_000;
let size = 5_000_000;
let shmem = ShmemHandle::new("bench", 0, 1073741824 * 2).unwrap();
let init_struct = HashMapInit::<FileCacheKey, FileCacheEntry, _>::init_in_shmem_with_hasher(
size, shmem, SeaRandomState::new()
);
let mut writer = init_struct.attach_writer();
let mut rng = rand::rng();
let mut writer = HashMapInit::new_resizeable(size, size * 2)
.with_hasher(SeaRandomState::new())
.attach_writer();
let mut rng = rand::rng();
while writer.get_num_buckets_in_use() < ideal_filled as usize {
let key: FileCacheKey = rng.random();
let val = FileCacheEntry::dummy();
@@ -225,6 +219,10 @@ fn real_benchs(c: &mut Criterion) {
let mut writer = hashbrown::raw::RawTable::new();
let mut rng = rand::rng();
let hasher = rustc_hash::FxBuildHasher::default();
unsafe {
writer.resize(size, |(k,_)| hasher.hash_one(&k),
hashbrown::raw::Fallibility::Infallible).unwrap();
}
while writer.len() < ideal_filled as usize {
let key: FileCacheKey = rng.random();
let val = FileCacheEntry::dummy();
@@ -261,7 +259,8 @@ fn real_benchs(c: &mut Criterion) {
let mut rng = rand::rng();
let hasher = rustc_hash::FxBuildHasher::default();
unsafe {
writer.resize(size, |(k,_)| hasher.hash_one(&k), hashbrown::raw::Fallibility::Infallible).unwrap();
writer.resize(size, |(k,_)| hasher.hash_one(&k),
hashbrown::raw::Fallibility::Infallible).unwrap();
}
while writer.len() < ideal_filled as usize {
let key: FileCacheKey = rng.random();

View File

@@ -48,8 +48,14 @@ unsafe impl<'a, K: Sync, V: Sync, S> Sync for HashMapAccess<'a, K, V, S> {}
unsafe impl<'a, K: Send, V: Send, S> Send for HashMapAccess<'a, K, V, S> {}
impl<'a, K: Clone + Hash + Eq, V, S> HashMapInit<'a, K, V, S> {
pub fn with_hasher(self, hasher: S) -> HashMapInit<'a, K, V, S> {
Self { hasher, ..self }
pub fn with_hasher<T: BuildHasher>(self, hasher: T) -> HashMapInit<'a, K, V, T> {
HashMapInit {
hasher,
shmem_handle: self.shmem_handle,
shared_ptr: self.shared_ptr,
shared_size: self.shared_size,
num_buckets: self.num_buckets,
}
}
/// Loosely (over)estimate the size needed to store a hash table with `num_buckets` buckets.
@@ -287,11 +293,12 @@ where
buckets = std::slice::from_raw_parts_mut(buckets_ptr, num_buckets as usize);
dictionary = std::slice::from_raw_parts_mut(dictionary_ptr, dictionary_size);
}
(dictionary_ptr, dictionary_size)
}
for i in 0..dictionary.len() {
dictionary[i] = INVALID_POS;
}
for i in 0..rehash_buckets as usize {
if buckets[i].inner.is_none() {
buckets[i].next = inner.free_head;