diff --git a/libs/neon-shmem/Cargo.toml b/libs/neon-shmem/Cargo.toml index 284a19d55d..bf14eb2e83 100644 --- a/libs/neon-shmem/Cargo.toml +++ b/libs/neon-shmem/Cargo.toml @@ -23,7 +23,3 @@ tempfile = "3.14.0" [[bench]] name = "hmap_resize" harness = false - -[[bin]] -name = "hmap_test" -path = "main.rs" diff --git a/libs/neon-shmem/src/hash.rs b/libs/neon-shmem/src/hash.rs index be7bca5104..54e83c433c 100644 --- a/libs/neon-shmem/src/hash.rs +++ b/libs/neon-shmem/src/hash.rs @@ -22,16 +22,16 @@ pub mod entry; mod tests; use core::{CoreHashMap, INVALID_POS}; -use entry::{Entry, OccupiedEntry, PrevPos}; +use entry::{Entry, OccupiedEntry}; -#[derive(Debug)] -pub struct OutOfMemoryError(); pub struct HashMapInit<'a, K, V, S = rustc_hash::FxBuildHasher> { // Hash table can be allocated in a fixed memory area, or in a resizeable ShmemHandle. shmem_handle: Option, shared_ptr: *mut HashMapShared<'a, K, V>, + shared_size: usize, hasher: S, + num_buckets: u32, } pub struct HashMapAccess<'a, K, V, S = rustc_hash::FxBuildHasher> { @@ -43,8 +43,51 @@ pub struct HashMapAccess<'a, K, V, S = rustc_hash::FxBuildHasher> { unsafe impl<'a, K: Sync, V: Sync, S> Sync for HashMapAccess<'a, K, V, S> {} unsafe impl<'a, K: Send, V: Send, S> Send for HashMapAccess<'a, K, V, S> {} -impl<'a, K, V, S> HashMapInit<'a, K, V, S> { +impl<'a, K: Clone + Hash + Eq, V, S> HashMapInit<'a, K, V, S> { + pub fn with_hasher(self, hasher: S) -> HashMapInit<'a, K, V, S> { + Self { hasher, ..self } + } + + pub fn estimate_size(num_buckets: u32) -> usize { + // add some margin to cover alignment etc. + CoreHashMap::::estimate_size(num_buckets) + size_of::>() + 1000 + } + pub fn attach_writer(self) -> HashMapAccess<'a, K, V, S> { + let mut ptr: *mut u8 = self.shared_ptr.cast(); + let end_ptr: *mut u8 = unsafe { ptr.add(self.shared_size) }; + ptr = unsafe { ptr.add(ptr.align_offset(align_of::>())) }; + let shared_ptr: *mut HashMapShared = ptr.cast(); + ptr = unsafe { ptr.add(size_of::>()) }; + + // carve out the buckets + ptr = unsafe { ptr.byte_add(ptr.align_offset(align_of::>())) }; + let keys_ptr = ptr; + ptr = unsafe { ptr.add(size_of::>() * self.num_buckets as usize) }; + + ptr = unsafe { ptr.byte_add(ptr.align_offset(align_of::>())) }; + let vals_ptr = ptr; + ptr = unsafe { ptr.add(size_of::>() * self.num_buckets as usize) }; + + // use remaining space for the dictionary + ptr = unsafe { ptr.byte_add(ptr.align_offset(align_of::())) }; + assert!(ptr.addr() < end_ptr.addr()); + let dictionary_ptr = ptr; + let dictionary_size = unsafe { end_ptr.byte_offset_from(ptr) / size_of::() as isize }; + assert!(dictionary_size > 0); + + let keys = + unsafe { std::slice::from_raw_parts_mut(keys_ptr.cast(), self.num_buckets as usize) }; + let vals = + unsafe { std::slice::from_raw_parts_mut(vals_ptr.cast(), self.num_buckets as usize) }; + let dictionary = unsafe { + std::slice::from_raw_parts_mut(dictionary_ptr.cast(), dictionary_size as usize) + }; + let hashmap = CoreHashMap::new(keys, vals, dictionary); + unsafe { + std::ptr::write(shared_ptr, HashMapShared { inner: hashmap }); + } + HashMapAccess { shmem_handle: self.shmem_handle, shared_ptr: self.shared_ptr, @@ -77,100 +120,56 @@ impl<'a, K, V> HashMapInit<'a, K, V, rustc_hash::FxBuildHasher> where K: Clone + Hash + Eq { - pub fn init_in_fixed_area( - num_buckets: u32, + pub fn with_fixed( + num_buckets: u32, area: &'a mut [MaybeUninit], ) -> HashMapInit<'a, K, V> { - Self::init_in_fixed_area_with_hasher(num_buckets, area, rustc_hash::FxBuildHasher::default()) + Self { + num_buckets, + shmem_handle: None, + shared_ptr: area.as_mut_ptr().cast(), + shared_size: area.len(), + hasher: rustc_hash::FxBuildHasher::default(), + } } /// Initialize a new hash map in the given shared memory area - pub fn init_in_shmem(num_buckets: u32, shmem: ShmemHandle) -> HashMapInit<'a, K, V> { - Self::init_in_shmem_with_hasher(num_buckets, shmem, rustc_hash::FxBuildHasher::default()) - } -} - -impl<'a, K, V, S: BuildHasher> HashMapInit<'a, K, V, S> -where - K: Clone + Hash + Eq -{ - pub fn estimate_size(num_buckets: u32) -> usize { - // add some margin to cover alignment etc. - CoreHashMap::::estimate_size(num_buckets) + size_of::>() + 1000 - } - - pub fn init_in_shmem_with_hasher(num_buckets: u32, mut shmem: ShmemHandle, hasher: S) -> HashMapInit<'a, K, V, S> { - let size = Self::estimate_size(num_buckets); - shmem + pub fn with_shmem(num_buckets: u32, shmem: ShmemHandle) -> HashMapInit<'a, K, V> { + let size = Self::estimate_size(num_buckets); + shmem .set_size(size) .expect("could not resize shared memory area"); - - let ptr = unsafe { shmem.data_ptr.as_mut() }; - Self::init_common(num_buckets, Some(shmem), ptr, size, hasher) + Self { + num_buckets, + shared_ptr: shmem.data_ptr.as_ptr().cast(), + shmem_handle: Some(shmem), + shared_size: size, + hasher: rustc_hash::FxBuildHasher::default() + } } - pub fn init_in_fixed_area_with_hasher( - num_buckets: u32, - area: &'a mut [MaybeUninit], - hasher: S, - ) -> HashMapInit<'a, K, V, S> { - Self::init_common(num_buckets, None, area.as_mut_ptr().cast(), area.len(), hasher) - } - - fn init_common( - num_buckets: u32, - shmem_handle: Option, - area_ptr: *mut u8, - area_len: usize, - hasher: S, - ) -> HashMapInit<'a, K, V, S> { - // carve out the HashMapShared struct from the area. - let mut ptr: *mut u8 = area_ptr; - let end_ptr: *mut u8 = unsafe { area_ptr.add(area_len) }; - ptr = unsafe { ptr.add(ptr.align_offset(align_of::>())) }; - let shared_ptr: *mut HashMapShared = ptr.cast(); - ptr = unsafe { ptr.add(size_of::>()) }; - - // carve out the buckets - ptr = unsafe { ptr.byte_add(ptr.align_offset(align_of::>())) }; - let keys_ptr = ptr; - ptr = unsafe { ptr.add(size_of::>() * num_buckets as usize) }; + pub fn new_resizeable_named(num_buckets: u32, max_buckets: u32, name: &str) -> HashMapInit<'a, K, V> { + let size = Self::estimate_size(num_buckets); + let max_size = Self::estimate_size(max_buckets); + let shmem = ShmemHandle::new(name, size, max_size) + .expect("failed to make shared memory area"); - ptr = unsafe { ptr.byte_add(ptr.align_offset(align_of::>())) }; - let vals_ptr = ptr; - ptr = unsafe { ptr.add(size_of::>() * num_buckets as usize) }; + Self { + num_buckets, + shared_ptr: shmem.data_ptr.as_ptr().cast(), + shmem_handle: Some(shmem), + shared_size: size, + hasher: rustc_hash::FxBuildHasher::default() + } + } - ptr = unsafe { ptr.byte_add(ptr.align_offset(align_of::())) }; - let prevs_ptr = ptr; - ptr = unsafe { ptr.add(size_of::() * num_buckets as usize) }; - - // use remaining space for the dictionary - ptr = unsafe { ptr.byte_add(ptr.align_offset(align_of::())) }; - assert!(ptr.addr() < end_ptr.addr()); - let dictionary_ptr = ptr; - let dictionary_size = unsafe { end_ptr.byte_offset_from(ptr) / size_of::() as isize }; - assert!(dictionary_size > 0); - - let keys = - unsafe { std::slice::from_raw_parts_mut(keys_ptr.cast(), num_buckets as usize) }; - let vals = - unsafe { std::slice::from_raw_parts_mut(vals_ptr.cast(), num_buckets as usize) }; - let prevs = - unsafe { std::slice::from_raw_parts_mut(prevs_ptr.cast(), num_buckets as usize) }; - let dictionary = unsafe { - std::slice::from_raw_parts_mut(dictionary_ptr.cast(), dictionary_size as usize) - }; - let hashmap = CoreHashMap::new(keys, vals, prevs, dictionary); - unsafe { - std::ptr::write(shared_ptr, HashMapShared { inner: hashmap }); - } - - HashMapInit { - shmem_handle, - shared_ptr, - hasher, - } - } + pub fn new_resizeable(num_buckets: u32, max_buckets: u32) -> HashMapInit<'a, K, V> { + use std::sync::atomic::{AtomicUsize, Ordering}; + const COUNTER: AtomicUsize = AtomicUsize::new(0); + let val = COUNTER.fetch_add(1, Ordering::Relaxed); + let name = format!("neon_shmem_hmap{}", val); + Self::new_resizeable_named(num_buckets, max_buckets, &name) + } } impl<'a, K, V, S: BuildHasher> HashMapAccess<'a, K, V, S> @@ -260,16 +259,15 @@ where num_buckets: u32, rehash_buckets: u32, ) { + inner.free_head = INVALID_POS; + // Recalculate the dictionary let keys; let dictionary; unsafe { let keys_end_ptr = keys_ptr.add(num_buckets as usize); - - let ptr: *mut u8 = (keys_end_ptr as *mut u8) + let buckets_end_ptr: *mut u8 = (keys_end_ptr as *mut u8) .add(size_of::>() * num_buckets as usize); - let buckets_end_ptr = ptr.byte_add(ptr.align_offset(align_of::())) - .add(size_of::() * num_buckets as usize); let dictionary_ptr: *mut u32 = buckets_end_ptr .byte_add(buckets_end_ptr.align_offset(align_of::())) .cast(); @@ -284,9 +282,11 @@ where } for i in 0..rehash_buckets as usize { - if keys[i].inner.is_none() { - continue; - } + if keys[i].inner.is_none() { + keys[i].next = inner.free_head; + inner.free_head = i as u32; + continue; + } let hash = self.hasher.hash_one(&keys[i].inner.as_ref().unwrap()); let pos: usize = (hash % dictionary.len() as u64) as usize; @@ -299,18 +299,13 @@ where inner.keys = keys; } - /// Rehash the map. Intended for benchmarking only. + /// Rehash the map. Intended for benchmarking only. pub fn shuffle(&mut self) { let map = unsafe { self.shared_ptr.as_mut() }.unwrap(); let inner = &mut map.inner; - - let shmem_handle = self - .shmem_handle - .as_ref() - .expect("TODO(quantumish): make shuffle work w/ fixed-size table"); let num_buckets = inner.get_num_buckets() as u32; let size_bytes = HashMapInit::::estimate_size(num_buckets); - let end_ptr: *mut u8 = unsafe { shmem_handle.data_ptr.as_ptr().add(size_bytes) }; + let end_ptr: *mut u8 = unsafe { (self.shared_ptr as *mut u8).add(size_bytes) }; let keys_ptr = inner.keys.as_mut_ptr(); self.rehash_dict(inner, keys_ptr, end_ptr, num_buckets, num_buckets); } @@ -325,7 +320,6 @@ where // let map = unsafe { self.shared_ptr.as_mut() }.unwrap(); // let inner = &mut map.inner; // let old_num_buckets = inner.buckets.len() as u32; - // if num_buckets < old_num_buckets { // panic!("grow called with a smaller number of buckets"); // } @@ -362,7 +356,6 @@ where // }); // } // } - // self.rehash_dict(inner, keys_ptr, end_ptr, num_buckets, old_num_buckets); // inner.free_head = old_num_buckets; diff --git a/libs/neon-shmem/src/hash/core.rs b/libs/neon-shmem/src/hash/core.rs index 986d5ed892..e8b6e3bfdd 100644 --- a/libs/neon-shmem/src/hash/core.rs +++ b/libs/neon-shmem/src/hash/core.rs @@ -10,23 +10,17 @@ use crate::hash::entry::{Entry, OccupiedEntry, PrevPos, VacantEntry}; pub(crate) const INVALID_POS: u32 = u32::MAX; -// Bucket -// pub(crate) struct Bucket { -// pub(crate) next: u32, -// pub(crate) prev: PrevPos, -// pub(crate) inner: Option<(K, V)>, -// } - pub(crate) struct LinkedKey { pub(crate) inner: Option, pub(crate) next: u32, } pub(crate) struct CoreHashMap<'a, K, V> { + /// Dictionary used to map hashes to bucket indices. pub(crate) dictionary: &'a mut [u32], pub(crate) keys: &'a mut [LinkedKey], pub(crate) vals: &'a mut [Option], - pub(crate) prevs: &'a mut [PrevPos], + /// Head of the freelist. pub(crate) free_head: u32, pub(crate) _user_list_head: u32, @@ -50,7 +44,7 @@ where let mut size = 0; // buckets - size += (size_of::>() + size_of::>() + size_of::()) + size += (size_of::>() + size_of::>()) * num_buckets as usize; // dictionary @@ -63,7 +57,6 @@ where pub fn new( keys: &'a mut [MaybeUninit>], vals: &'a mut [MaybeUninit>], - prevs: &'a mut [MaybeUninit], dictionary: &'a mut [MaybeUninit], ) -> CoreHashMap<'a, K, V> { // Initialize the buckets @@ -80,15 +73,8 @@ where for i in 0..vals.len() { vals[i].write(None); } - for i in 0..prevs.len() { - prevs[i].write(if i > 0 { - PrevPos::Chained(i as u32 - 1) - } else { - PrevPos::First(INVALID_POS) - }); - } - - // Initialize the dictionary + + // Initialize the dictionary for i in 0..dictionary.len() { dictionary[i].write(INVALID_POS); } @@ -98,8 +84,6 @@ where unsafe { std::slice::from_raw_parts_mut(keys.as_mut_ptr().cast(), keys.len()) }; let vals = unsafe { std::slice::from_raw_parts_mut(vals.as_mut_ptr().cast(), vals.len()) }; - let prevs = - unsafe { std::slice::from_raw_parts_mut(prevs.as_mut_ptr().cast(), prevs.len()) }; let dictionary = unsafe { std::slice::from_raw_parts_mut(dictionary.as_mut_ptr().cast(), dictionary.len()) }; @@ -108,7 +92,6 @@ where dictionary, keys, vals, - prevs, free_head: 0, buckets_in_use: 0, _user_list_head: INVALID_POS, @@ -181,8 +164,8 @@ where self.alloc_limit != INVALID_POS } - - // TODO(quantumish): How does this interact with an ongoing shrink? + /// Clears all entries from the hashmap. + /// Does not reset any allocation limits, but does clear any entries beyond them. pub fn clear(&mut self) { for i in 0..self.keys.len() { self.keys[i] = LinkedKey { @@ -194,13 +177,6 @@ where inner: None, } } - for i in 0..self.prevs.len() { - self.prevs[i] = if i > 0 { - PrevPos::Chained(i as u32 - 1) - } else { - PrevPos::First(INVALID_POS) - } - } for i in 0..self.vals.len() { self.vals[i] = None; } @@ -210,7 +186,6 @@ where } self.buckets_in_use = 0; - self.alloc_limit = INVALID_POS; } pub fn entry_at_bucket(&mut self, pos: usize) -> Option> { @@ -218,13 +193,12 @@ where return None; } - let prev = self.prevs[pos]; let entry = self.keys[pos].inner.as_ref(); match entry { Some(key) => Some(OccupiedEntry { _key: key.clone(), bucket_pos: pos as u32, - prev_pos: prev, + prev_pos: PrevPos::Unknown, map: self, }), _ => None, @@ -232,7 +206,7 @@ where } /// Find the position of an unused bucket via the freelist and initialize it. - pub(crate) fn alloc_bucket(&mut self, key: K, value: V, dict_pos: u32) -> Result { + pub(crate) fn alloc_bucket(&mut self, key: K, value: V) -> Result { let mut pos = self.free_head; // Find the first bucket we're *allowed* to use. @@ -251,17 +225,12 @@ where PrevPos::First(_) => { let next_pos = self.keys[pos as usize].next; self.free_head = next_pos; - if next_pos != INVALID_POS { - self.prevs[next_pos as usize] = PrevPos::First(dict_pos); - } } PrevPos::Chained(p) => if p != INVALID_POS { let next_pos = self.keys[pos as usize].next; self.keys[p as usize].next = next_pos; - if next_pos != INVALID_POS { - self.prevs[next_pos as usize] = PrevPos::Chained(p); - } }, + PrevPos::Unknown => unreachable!() } // Initialize the bucket. diff --git a/libs/neon-shmem/src/hash/entry.rs b/libs/neon-shmem/src/hash/entry.rs index e3335fb1db..f650bffa6c 100644 --- a/libs/neon-shmem/src/hash/entry.rs +++ b/libs/neon-shmem/src/hash/entry.rs @@ -10,17 +10,35 @@ pub enum Entry<'a, 'b, K, V> { Vacant(VacantEntry<'a, 'b, K, V>), } +/// Helper enum representing the previous position within a hashmap chain. #[derive(Clone, Copy)] pub(crate) enum PrevPos { + /// Starting index within the dictionary. First(u32), + /// Regular index within the buckets. Chained(u32), + /// Unknown - e.g. the associated entry was retrieved by index instead of chain. + Unknown, +} + +impl PrevPos { + /// Unwrap an index from a `PrevPos::First`, panicking otherwise. + pub fn unwrap_first(&self) -> u32 { + match self { + Self::First(i) => *i, + _ => panic!("not first entry in chain") + } + } } pub struct OccupiedEntry<'a, 'b, K, V> { - pub(crate) map: &'b mut CoreHashMap<'a, K, V>, - pub(crate) _key: K, // The key of the occupied entry + pub(crate) map: &'b mut CoreHashMap<'a, K, V>, + /// The key of the occupied entry + pub(crate) _key: K, + /// The index of the previous entry in the chain. pub(crate) prev_pos: PrevPos, - pub(crate) bucket_pos: u32, // The position of the bucket in the CoreHashMap's buckets array + /// The position of the bucket in the CoreHashMap's buckets array. + pub(crate) bucket_pos: u32, } impl<'a, 'b, K, V> OccupiedEntry<'a, 'b, K, V> { @@ -52,17 +70,14 @@ impl<'a, 'b, K, V> OccupiedEntry<'a, 'b, K, V> { PrevPos::First(dict_pos) => self.map.dictionary[dict_pos as usize] = keylink.next, PrevPos::Chained(bucket_pos) => { self.map.keys[bucket_pos as usize].next = keylink.next - } + }, + PrevPos::Unknown => panic!("can't safely remove entry with unknown previous entry"), } // and add it to the freelist - if self.map.free_head != INVALID_POS { - self.map.prevs[self.map.free_head as usize] = PrevPos::Chained(self.bucket_pos); - } let keylink = &mut self.map.keys[self.bucket_pos as usize]; keylink.inner = None; keylink.next = self.map.free_head; - self.map.prevs[self.bucket_pos as usize] = PrevPos::First(INVALID_POS); let old_value = self.map.vals[self.bucket_pos as usize].take(); self.map.free_head = self.bucket_pos; self.map.buckets_in_use -= 1; @@ -79,14 +94,10 @@ pub struct VacantEntry<'a, 'b, K, V> { impl<'a, 'b, K: Clone + Hash + Eq, V> VacantEntry<'a, 'b, K, V> { pub fn insert(self, value: V) -> Result<&'b mut V, FullError> { - let pos = self.map.alloc_bucket(self.key, value, self.dict_pos)?; + let pos = self.map.alloc_bucket(self.key, value)?; if pos == INVALID_POS { return Err(FullError()); } - let prev = &mut self.map.prevs[pos as usize]; - if let PrevPos::First(INVALID_POS) = prev { - *prev = PrevPos::First(self.dict_pos); - } self.map.keys[pos as usize].next = self.map.dictionary[self.dict_pos as usize]; self.map.dictionary[self.dict_pos as usize] = pos; diff --git a/libs/neon-shmem/src/hash/tests.rs b/libs/neon-shmem/src/hash/tests.rs index ee6acfc144..a522423db1 100644 --- a/libs/neon-shmem/src/hash/tests.rs +++ b/libs/neon-shmem/src/hash/tests.rs @@ -38,11 +38,9 @@ impl<'a> From<&'a [u8]> for TestKey { } fn test_inserts + Copy>(keys: &[K]) { - const MAX_MEM_SIZE: usize = 10000000; - let shmem = ShmemHandle::new("test_inserts", 0, MAX_MEM_SIZE).unwrap(); - - let init_struct = HashMapInit::::init_in_shmem(100000, shmem); - let mut w = init_struct.attach_writer(); + let mut w = HashMapInit::::new_resizeable_named( + 100000, 120000, "test_inserts" + ).attach_writer(); for (idx, k) in keys.iter().enumerate() { let hash = w.get_hash_value(&(*k).into()); @@ -193,24 +191,23 @@ fn do_shrink( to: u32 ) { writer.begin_shrink(to); - for i in to..from { - if let Some(entry) = writer.entry_at_bucket(i as usize) { - shadow.remove(&entry._key); - entry.remove(); + while writer.get_num_buckets_in_use() > to as usize { + let (k, _) = shadow.pop_first().unwrap(); + let hash = writer.get_hash_value(&k); + let entry = writer.entry_with_hash(k, hash); + if let Entry::Occupied(mut e) = entry { + e.remove(); } } writer.finish_shrink().unwrap(); - } #[test] fn random_ops() { - let shmem = ShmemHandle::new("test_inserts", 0, 10000000).unwrap(); - - let init_struct = HashMapInit::::init_in_shmem(100000, shmem); - let mut writer = init_struct.attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named( + 100000, 120000, "test_random" + ).attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); - let distribution = Zipf::new(u128::MAX as f64, 1.1).unwrap(); let mut rng = rand::rng(); @@ -227,11 +224,25 @@ fn random_ops() { } } + +#[test] +fn test_shuffle() { + let mut writer = HashMapInit::::new_resizeable_named( + 1000, 1200, "test_shuf" + ).attach_writer(); + let mut shadow: std::collections::BTreeMap = BTreeMap::new(); + let mut rng = rand::rng(); + + do_random_ops(10000, 1000, 0.75, &mut writer, &mut shadow, &mut rng); + writer.shuffle(); + do_random_ops(10000, 1000, 0.75, &mut writer, &mut shadow, &mut rng); +} + #[test] fn test_grow() { - let shmem = ShmemHandle::new("test_grow", 0, 10000000).unwrap(); - let init_struct = HashMapInit::::init_in_shmem(1000, shmem); - let mut writer = init_struct.attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named( + 1000, 2000, "test_grow" + ).attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); let mut rng = rand::rng(); @@ -242,9 +253,9 @@ fn test_grow() { #[test] fn test_shrink() { - let shmem = ShmemHandle::new("test_shrink", 0, 10000000).unwrap(); - let init_struct = HashMapInit::::init_in_shmem(1500, shmem); - let mut writer = init_struct.attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named( + 1500, 2000, "test_shrink" + ).attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); let mut rng = rand::rng(); @@ -257,9 +268,9 @@ fn test_shrink() { #[test] fn test_shrink_grow_seq() { - let shmem = ShmemHandle::new("test_shrink_grow_seq", 0, 10000000).unwrap(); - let init_struct = HashMapInit::::init_in_shmem(1500, shmem); - let mut writer = init_struct.attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named( + 1000, 20000, "test_grow_seq" + ).attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); let mut rng = rand::rng(); @@ -281,9 +292,9 @@ fn test_shrink_grow_seq() { #[test] fn test_bucket_ops() { - let shmem = ShmemHandle::new("test_bucket_ops", 0, 10000000).unwrap(); - let init_struct = HashMapInit::::init_in_shmem(1000, shmem); - let mut writer = init_struct.attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named( + 1000, 1200, "test_bucket_ops" + ).attach_writer(); let hash = writer.get_hash_value(&1.into()); match writer.entry_with_hash(1.into(), hash) { Entry::Occupied(mut e) => { e.insert(2); }, @@ -307,9 +318,9 @@ fn test_bucket_ops() { #[test] fn test_shrink_zero() { - let shmem = ShmemHandle::new("test_shrink_zero", 0, 10000000).unwrap(); - let init_struct = HashMapInit::::init_in_shmem(1500, shmem); - let mut writer = init_struct.attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named( + 1500, 2000, "test_shrink_zero" + ).attach_writer(); writer.begin_shrink(0); for i in 0..1500 { writer.entry_at_bucket(i).map(|x| x.remove()); @@ -336,27 +347,27 @@ fn test_shrink_zero() { #[test] #[should_panic] fn test_grow_oom() { - let shmem = ShmemHandle::new("test_grow_oom", 0, 500).unwrap(); - let init_struct = HashMapInit::::init_in_shmem(5, shmem); - let mut writer = init_struct.attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named( + 1500, 2000, "test_grow_oom" + ).attach_writer(); writer.grow(20000).unwrap(); } #[test] #[should_panic] fn test_shrink_bigger() { - let shmem = ShmemHandle::new("test_shrink_bigger", 0, 10000000).unwrap(); - let init_struct = HashMapInit::::init_in_shmem(1500, shmem); - let mut writer = init_struct.attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named( + 1500, 2500, "test_shrink_bigger" + ).attach_writer(); writer.begin_shrink(2000); } #[test] #[should_panic] fn test_shrink_early_finish() { - let shmem = ShmemHandle::new("test_shrink_early_finish", 0, 10000000).unwrap(); - let init_struct = HashMapInit::::init_in_shmem(1500, shmem); - let mut writer = init_struct.attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named( + 1500, 2500, "test_shrink_early_finish" + ).attach_writer(); writer.finish_shrink().unwrap(); } @@ -364,7 +375,7 @@ fn test_shrink_early_finish() { #[should_panic] fn test_shrink_fixed_size() { let mut area = [MaybeUninit::uninit(); 10000]; - let init_struct = HashMapInit::::init_in_fixed_area(3, &mut area); + let init_struct = HashMapInit::::with_fixed(3, &mut area); let mut writer = init_struct.attach_writer(); writer.begin_shrink(1); }