From b018b0ff60664879343faeda0da94e10050e61fb Mon Sep 17 00:00:00 2001 From: David Freifeld Date: Wed, 2 Jul 2025 12:48:44 -0700 Subject: [PATCH] Fix cargo clippy lints for neon-shmem --- libs/neon-shmem/src/hash.rs | 44 ++++++++++++++++------------ libs/neon-shmem/src/sync.rs | 58 +++++++++++++++++++------------------ 2 files changed, 55 insertions(+), 47 deletions(-) diff --git a/libs/neon-shmem/src/hash.rs b/libs/neon-shmem/src/hash.rs index 733e4b6f33..d0cec22493 100644 --- a/libs/neon-shmem/src/hash.rs +++ b/libs/neon-shmem/src/hash.rs @@ -1,23 +1,28 @@ -//! Resizable hash table implementation on top of byte-level storage (either a [`ShmemHandle`] or a fixed byte array). +//! Resizable hash table implementation on top of byte-level storage (either a [`ShmemHandle`] or a +//! fixed byte array). //! -//! This hash table has two major components: the bucket array and the dictionary. Each bucket within the -//! bucket array contains a `Option<(K, V)>` and an index of another bucket. In this way there is both an -//! implicit freelist within the bucket array (`None` buckets point to other `None` entries) and various hash -//! chains within the bucket array (a Some bucket will point to other Some buckets that had the same hash). +//! This hash table has two major components: the bucket array and the dictionary. Each bucket +//! within the bucket array contains a `Option<(K, V)>` and an index of another bucket. In this way +//! there is both an implicit freelist within the bucket array (`None` buckets point to other `None` +//! entries) and various hash chains within the bucket array (a Some bucket will point to other Some +//! buckets that had the same hash). //! -//! Buckets are never moved unless they are within a region that is being shrunk, and so the actual hash- -//! dependent component is done with the dictionary. When a new key is inserted into the map, a position -//! within the dictionary is decided based on its hash, the data is inserted into an empty bucket based -//! off of the freelist, and then the index of said bucket is placed in the dictionary. +//! Buckets are never moved unless they are within a region that is being shrunk, and so the actual +//! hash- dependent component is done with the dictionary. When a new key is inserted into the map, +//! a position within the dictionary is decided based on its hash, the data is inserted into an +//! empty bucket based off of the freelist, and then the index of said bucket is placed in the +//! dictionary. //! -//! This map is resizable (if initialized on top of a [`ShmemHandle`]). Both growing and shrinking happen -//! in-place and are at a high level achieved by expanding/reducing the bucket array and rebuilding the -//! dictionary by rehashing all keys. +//! This map is resizable (if initialized on top of a [`ShmemHandle`]). Both growing and shrinking +//! happen in-place and are at a high level achieved by expanding/reducing the bucket array and +//! rebuilding the dictionary by rehashing all keys. use std::hash::{Hash, BuildHasher}; use std::mem::MaybeUninit; -use crate::{shmem, sync::*}; +use crate::{shmem, sync::{ + PthreadRwLock, RwLock, RwLockReadGuard, RwLockWriteGuard, ValueReadGuard +}}; use crate::shmem::ShmemHandle; mod core; @@ -97,7 +102,9 @@ impl<'a, K: Clone + Hash + Eq, V, S> HashMapInit<'a, K, V, S> { std::slice::from_raw_parts_mut(dictionary_ptr.cast(), dictionary_size as usize) }; let hashmap = CoreHashMap::new(buckets, dictionary); - let lock = RwLock::from_raw(PthreadRwLock::new(raw_lock_ptr.cast()), hashmap); + let lock = RwLock::from_raw(PthreadRwLock::new( + std::ptr::NonNull::new(raw_lock_ptr.cast()).unwrap() + ), hashmap); unsafe { std::ptr::write(shared_ptr, lock); } @@ -257,7 +264,7 @@ where /// Remove a key given its hash. Returns the associated value if it existed. pub fn remove(&self, key: &K) -> Option { - let hash = self.get_hash_value(&key); + let hash = self.get_hash_value(key); match self.entry_with_hash(key.clone(), hash) { Entry::Occupied(e) => Some(e.remove()), Entry::Vacant(_) => None @@ -296,7 +303,7 @@ where _key: key.clone(), bucket_pos: pos as u32, prev_pos: entry::PrevPos::Unknown( - self.get_hash_value(&key) + self.get_hash_value(key) ), map, }), @@ -351,7 +358,7 @@ where /// in the process. fn rehash_dict( &self, - inner: &mut CoreHashMap<'a, K, V>, + inner: &mut RwLockWriteGuard<'_, CoreHashMap<'a, K, V>>, buckets_ptr: *mut core::Bucket, end_ptr: *mut u8, num_buckets: u32, @@ -482,10 +489,9 @@ where /// Complete a shrink after caller has evicted entries, removing the unused buckets and rehashing. /// /// # Panics - /// The following cases result in a panic: + /// The following two cases result in a panic: /// - Calling this function on a map initialized with [`HashMapInit::with_fixed`]. /// - Calling this function on a map when no shrink operation is in progress. - /// - Calling this function on a map with `shrink_mode` set to [`HashMapShrinkMode::Remap`] and /// there are more buckets in use than the value returned by [`HashMapAccess::shrink_goal`]. /// /// # Errors diff --git a/libs/neon-shmem/src/sync.rs b/libs/neon-shmem/src/sync.rs index fc39df9100..8837971547 100644 --- a/libs/neon-shmem/src/sync.rs +++ b/libs/neon-shmem/src/sync.rs @@ -11,11 +11,18 @@ pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, PthreadRwLock, pub type ValueReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, PthreadRwLock, T>; pub type ValueWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, PthreadRwLock, T>; -/// Shared memory read-write lock. +/// Wrapper around a pointer to a [`libc::pthread_rwlock_t`]. +/// +/// `PthreadRwLock(None)` is an invalid state for this type. It only exists because the +/// [`lock_api::RawRwLock`] trait has a mandatory `INIT` const member to allow for static +/// initialization of the lock. Unfortunately, pthread seemingly does not support any way +/// to statically initialize a `pthread_rwlock_t` with `PTHREAD_PROCESS_SHARED` set. However, +/// `lock_api` allows manual construction and seemingly doesn't use `INIT` itself so for +/// now it's set to this invalid value to satisfy the trait constraints. pub struct PthreadRwLock(Option>); impl PthreadRwLock { - pub fn new(lock: *mut libc::pthread_rwlock_t) -> Self { + pub fn new(lock: NonNull) -> Self { unsafe { let mut attrs = MaybeUninit::uninit(); // Ignoring return value here - only possible error is OOM. @@ -24,34 +31,40 @@ impl PthreadRwLock { attrs.as_mut_ptr(), libc::PTHREAD_PROCESS_SHARED ); - // TODO(quantumish): worth making this function return Result? - libc::pthread_rwlock_init(lock, attrs.as_mut_ptr()); + // TODO(quantumish): worth making this function fallible? + libc::pthread_rwlock_init(lock.as_ptr(), attrs.as_mut_ptr()); // Safety: POSIX specifies that "any function affecting the attributes // object (including destruction) shall not affect any previously // initialized read-write locks". libc::pthread_rwlockattr_destroy(attrs.as_mut_ptr()); - Self(Some(NonNull::new_unchecked(lock))) + Self(Some(lock)) } } fn inner(&self) -> NonNull { - match self.0 { - None => panic!("PthreadRwLock constructed badly - something likely used RawMutex::INIT"), - Some(x) => x, + self.0.unwrap_or_else( + || panic!("PthreadRwLock constructed badly - something likely used RawMutex::INIT") + ) + } + + fn unlock(&self) { + unsafe { + let res = libc::pthread_rwlock_unlock(self.inner().as_ptr()); + assert!(res == 0, "unlock failed with {}", Errno::from_raw(res)); } } } unsafe impl lock_api::RawRwLock for PthreadRwLock { type GuardMarker = lock_api::GuardSend; + + /// *DO NOT USE THIS.* See [`PthreadRwLock`] for the full explanation. const INIT: Self = Self(None); fn lock_shared(&self) { unsafe { let res = libc::pthread_rwlock_rdlock(self.inner().as_ptr()); - if res != 0 { - panic!("rdlock failed with {}", Errno::from_raw(res)); - } + assert!(res == 0, "rdlock failed with {}", Errno::from_raw(res)); } } @@ -61,7 +74,7 @@ unsafe impl lock_api::RawRwLock for PthreadRwLock { match res { 0 => true, libc::EAGAIN => false, - o => panic!("try_rdlock failed with {}", Errno::from_raw(res)), + o => panic!("try_rdlock failed with {}", Errno::from_raw(o)), } } } @@ -69,9 +82,7 @@ unsafe impl lock_api::RawRwLock for PthreadRwLock { fn lock_exclusive(&self) { unsafe { let res = libc::pthread_rwlock_wrlock(self.inner().as_ptr()); - if res != 0 { - panic!("wrlock failed with {}", Errno::from_raw(res)); - } + assert!(res == 0, "wrlock failed with {}", Errno::from_raw(res)); } } @@ -81,25 +92,16 @@ unsafe impl lock_api::RawRwLock for PthreadRwLock { match res { 0 => true, libc::EAGAIN => false, - o => panic!("try_wrlock failed with {}", Errno::from_raw(res)), + o => panic!("try_wrlock failed with {}", Errno::from_raw(o)), } } } unsafe fn unlock_exclusive(&self) { - unsafe { - let res = libc::pthread_rwlock_unlock(self.inner().as_ptr()); - if res != 0 { - panic!("unlock failed with {}", Errno::from_raw(res)); - } - } + self.unlock(); } + unsafe fn unlock_shared(&self) { - unsafe { - let res = libc::pthread_rwlock_unlock(self.inner().as_ptr()); - if res != 0 { - panic!("unlock failed with {}", Errno::from_raw(res)); - } - } + self.unlock(); } }