From 42df3e54532087193b21c2fbb9ce2bb9888d0ac0 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Sat, 3 May 2025 19:21:29 +0300 Subject: [PATCH] debugging stats --- libs/neonart/src/algorithm.rs | 18 ++++++++++--- libs/neonart/src/allocator.rs | 12 ++++++++- libs/neonart/src/allocator/block.rs | 31 +++++++++++++++++++++++ libs/neonart/src/lib.rs | 39 ++++++++++++++++++++--------- libs/neonart/src/tests.rs | 2 ++ 5 files changed, 86 insertions(+), 16 deletions(-) diff --git a/libs/neonart/src/algorithm.rs b/libs/neonart/src/algorithm.rs index 93c7d9274b..e476c14f12 100644 --- a/libs/neonart/src/algorithm.rs +++ b/libs/neonart/src/algorithm.rs @@ -73,7 +73,8 @@ pub(crate) fn update_fn<'e, K: Key, V: Value, A: ArtAllocator, F>( let root_ref = NodeRef::from_root_ptr(root); let this_value_fn = |arg: Option<&V>| value_fn_cell.take().unwrap()(arg); let key_bytes = key.as_bytes(); - if let Ok(()) = update_recurse( + + match update_recurse( key_bytes, this_value_fn, root_ref, @@ -82,9 +83,20 @@ pub(crate) fn update_fn<'e, K: Key, V: Value, A: ArtAllocator, F>( 0, key_bytes, ) { - break; + Ok(()) => break, + Err(ArtError::ConcurrentUpdate) => continue, // retry + Err(ArtError::OutOfMemory) => { + panic!("todo: OOM: try to GC, propagate to caller"); + }, + Err(ArtError::GarbageQueueFull) => { + if guard.collect_garbage() { + continue; + } + // FIXME: This can happen if someone is holding back the epoch. We should + // wait for the epoch to advance + panic!("todo: GC queue is full and couldn't free up space"); + }, } - // retry } } diff --git a/libs/neonart/src/allocator.rs b/libs/neonart/src/allocator.rs index 602551fdd6..008ed34194 100644 --- a/libs/neonart/src/allocator.rs +++ b/libs/neonart/src/allocator.rs @@ -1,4 +1,4 @@ -mod block; +pub mod block; mod multislab; mod slab; pub mod r#static; @@ -12,6 +12,7 @@ use crate::allocator::r#static::alloc_from_slice; use spin; +use crate::ArtTreeStatistics; use crate::Tree; pub use crate::algorithm::node_ptr::{ NodeInternal4, NodeInternal16, NodeInternal48, NodeInternal256, NodeLeaf4, NodeLeaf16, @@ -138,3 +139,12 @@ impl<'t, V: crate::Value> ArtAllocator for ArtMultiSlabAllocator<'t, V> { self.inner.dealloc_slab(7, ptr.cast()) } } + + +impl<'t, V: crate::Value> ArtMultiSlabAllocator<'t, V> { + pub fn get_statistics(&self) -> ArtTreeStatistics { + ArtTreeStatistics { + blocks: self.inner.block_allocator.get_statistics(), + } + } +} diff --git a/libs/neonart/src/allocator/block.rs b/libs/neonart/src/allocator/block.rs index 54909b3f10..a8e02ef4b8 100644 --- a/libs/neonart/src/allocator/block.rs +++ b/libs/neonart/src/allocator/block.rs @@ -149,4 +149,35 @@ impl<'t> BlockAllocator<'t> { unsafe { (*block_ptr) = init }; *freelist_head = blockno; } + + // for debugging + pub(crate) fn get_statistics(&self) -> BlockAllocatorStats { + let mut num_free_blocks = 0; + + let mut _prev_lock= None; + let head_lock = self.freelist_head.lock(); + let mut next_blk = *head_lock; + let mut _head_lock = Some(head_lock); + while next_blk != INVALID_BLOCK { + let freelist_block = self.read_freelist_block(next_blk); + let lock = freelist_block.inner.lock(); + num_free_blocks += lock.num_free_blocks; + next_blk = lock.next; + _prev_lock = Some(lock); // hold the lock until we've read the next block + _head_lock = None; + } + + BlockAllocatorStats { + num_blocks: self.num_blocks, + num_initialized: self.num_initialized.load(Ordering::Relaxed), + num_free_blocks, + } + } +} + +#[derive(Clone, Debug)] +pub struct BlockAllocatorStats { + pub num_blocks: u64, + pub num_initialized: u64, + pub num_free_blocks: u64, } diff --git a/libs/neonart/src/lib.rs b/libs/neonart/src/lib.rs index a3c4b879c5..7ab184ae4e 100644 --- a/libs/neonart/src/lib.rs +++ b/libs/neonart/src/lib.rs @@ -311,18 +311,6 @@ impl<'t, K: Key + Clone, V: Value, A: ArtAllocator> TreeWriteAccess<'t, K, V, phantom_key: PhantomData, } } - - pub fn collect_garbage(&'t self) { - self.tree.epoch.advance(); - self.tree.epoch.broadcast(); - - let cutoff_epoch = self.tree.epoch.get_oldest(); - - let mut garbage_queue = self.tree.garbage.lock(); - while let Some(ptr) = garbage_queue.next_obsolete(cutoff_epoch) { - ptr.deallocate(self.allocator); - } - } } impl<'t, K: Key + Clone, V: Value> TreeReadAccess<'t, K, V> { @@ -386,10 +374,37 @@ impl<'t, K: Key, V: Value, A: ArtAllocator> TreeWriteGuard<'t, K, V, A> { .lock() .remember_obsolete_node(ptr, self.epoch_pin.epoch) } + + // returns true if something was free'd up + fn collect_garbage(&'t self) -> bool { + let mut result = false; + self.tree.epoch.advance(); + self.tree.epoch.broadcast(); + + let cutoff_epoch = self.tree.epoch.get_oldest(); + + let mut garbage_queue = self.tree.garbage.lock(); + while let Some(ptr) = garbage_queue.next_obsolete(cutoff_epoch) { + ptr.deallocate(self.allocator); + result = true; + } + result + } } +// Debugging functions impl<'t, K: Key, V: Value + Debug> TreeReadGuard<'t, K, V> { pub fn dump(&mut self) { algorithm::dump_tree(self.tree.root, &self.epoch_pin) } } +impl<'t, K: Key, V: Value + Debug> TreeWriteGuard<'t, K, V, ArtMultiSlabAllocator<'t, V>> { + pub fn get_statistics(&self) -> ArtTreeStatistics { + self.allocator.get_statistics() + } +} + +#[derive(Clone, Debug)] +pub struct ArtTreeStatistics { + pub blocks: allocator::block::BlockAllocatorStats, +} diff --git a/libs/neonart/src/tests.rs b/libs/neonart/src/tests.rs index 06b7ca383b..fc79b32c11 100644 --- a/libs/neonart/src/tests.rs +++ b/libs/neonart/src/tests.rs @@ -51,6 +51,8 @@ fn test_inserts + Copy>(keys: &[K]) { let value = r.get(&(*k).into()); assert_eq!(value, Some(idx)); } + + eprintln!("stats: {:?}", tree_writer.start_write().get_statistics()); } #[test]