From f7c908f2f047e90a5f3fadd148e9d6a1c604bf46 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 12 May 2025 01:01:50 +0300 Subject: [PATCH] more metrics --- libs/neonart/src/algorithm.rs | 5 +++ libs/neonart/src/allocator.rs | 11 +----- libs/neonart/src/allocator/block.rs | 2 +- libs/neonart/src/lib.rs | 16 ++++++--- .../neon/communicator/src/integrated_cache.rs | 35 +++++++++++++++++++ 5 files changed, 54 insertions(+), 15 deletions(-) diff --git a/libs/neonart/src/algorithm.rs b/libs/neonart/src/algorithm.rs index c0c4b19e93..f1ee34c140 100644 --- a/libs/neonart/src/algorithm.rs +++ b/libs/neonart/src/algorithm.rs @@ -3,6 +3,7 @@ pub(crate) mod node_ptr; mod node_ref; use std::vec::Vec; +use std::sync::atomic::Ordering; use crate::algorithm::lock_and_version::ConcurrentUpdateError; use crate::algorithm::node_ptr::MAX_PREFIX_LEN; @@ -253,6 +254,7 @@ where UpdateAction::Nothing => {} UpdateAction::Insert(new_value) => { insert_split_prefix(key, new_value, &mut wnode, &mut wparent, parent_key, guard)?; + guard.tree_writer.tree.num_values.fetch_add(1, Ordering::Relaxed); } UpdateAction::Remove => { panic!("unexpected Remove action on insertion"); @@ -285,6 +287,7 @@ where // TODO: If parent has only one child left, merge it with the child, extending its // prefix wparent.delete_child(parent_key); + guard.tree_writer.tree.num_values.fetch_sub(1, Ordering::Relaxed); } } wnode.write_unlock(); @@ -310,6 +313,7 @@ where insert_and_grow(key, new_value, &wnode, &mut wparent, parent_key, guard)?; wnode.write_unlock_obsolete(); wparent.write_unlock(); + guard.tree_writer.tree.num_values.fetch_add(1, Ordering::Relaxed); } UpdateAction::Remove => { panic!("unexpected Remove action on insertion"); @@ -324,6 +328,7 @@ where UpdateAction::Nothing => {} UpdateAction::Insert(new_value) => { insert_to_node(&mut wnode, key, new_value, guard)?; + guard.tree_writer.tree.num_values.fetch_add(1, Ordering::Relaxed); } UpdateAction::Remove => { panic!("unexpected Remove action on insertion"); diff --git a/libs/neonart/src/allocator.rs b/libs/neonart/src/allocator.rs index fef89da4a2..ce3fe08c21 100644 --- a/libs/neonart/src/allocator.rs +++ b/libs/neonart/src/allocator.rs @@ -12,7 +12,6 @@ use crate::allocator::r#static::alloc_from_slice; use spin; -use crate::ArtTreeStatistics; use crate::Tree; pub use crate::algorithm::node_ptr::{ NodeInternal4, NodeInternal16, NodeInternal48, NodeInternal256, NodeLeaf, @@ -42,7 +41,7 @@ where { tree_area: spin::Mutex>>>, - inner: MultiSlabAllocator<'t, 5>, + pub(crate) inner: MultiSlabAllocator<'t, 5>, phantom_val: PhantomData, } @@ -112,11 +111,3 @@ impl<'t, V: crate::Value> ArtAllocator for ArtMultiSlabAllocator<'t, V> { self.inner.dealloc_slab(4, ptr.cast()) } } - -impl<'t, V: crate::Value> ArtMultiSlabAllocator<'t, V> { - pub fn get_statistics(&self) -> ArtTreeStatistics { - ArtTreeStatistics { - blocks: self.inner.block_allocator.get_statistics(), - } - } -} diff --git a/libs/neonart/src/allocator/block.rs b/libs/neonart/src/allocator/block.rs index 292a74f148..03aea91157 100644 --- a/libs/neonart/src/allocator/block.rs +++ b/libs/neonart/src/allocator/block.rs @@ -5,7 +5,7 @@ use std::sync::atomic::{AtomicU64, Ordering}; use spin; -pub(crate) const BLOCK_SIZE: usize = 16 * 1024; +pub const BLOCK_SIZE: usize = 16 * 1024; const INVALID_BLOCK: u64 = u64::MAX; diff --git a/libs/neonart/src/lib.rs b/libs/neonart/src/lib.rs index 4e93bf5c0e..c8ccaa9647 100644 --- a/libs/neonart/src/lib.rs +++ b/libs/neonart/src/lib.rs @@ -131,7 +131,7 @@ use std::collections::VecDeque; use std::fmt::Debug; use std::marker::PhantomData; use std::ptr::NonNull; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use crate::epoch::EpochPin; @@ -164,6 +164,9 @@ pub struct Tree { writer_attached: AtomicBool, epoch: epoch::EpochShared, + + // for metrics + num_values: AtomicU64, } unsafe impl Sync for Tree {} @@ -213,7 +216,7 @@ where { tree: &'t Tree, - allocator: &'t A, + pub allocator: &'t A, epoch_handle: epoch::LocalHandle<'t>, @@ -244,6 +247,7 @@ impl<'a, 't: 'a, K: Key, V: Value, A: ArtAllocator> TreeInitStruct<'t, K, V, root: algorithm::new_root(allocator), writer_attached: AtomicBool::new(false), epoch: epoch::EpochShared::new(), + num_values: AtomicU64::new(0), }; unsafe { tree_ptr.write(init) }; @@ -551,13 +555,17 @@ impl<'e, K: Key, V: Value + Debug> TreeReadGuard<'e, K, V> { algorithm::dump_tree(self.tree.root, &self.epoch_pin) } } -impl<'e, K: Key, V: Value + Debug> TreeWriteGuard<'e, K, V, ArtMultiSlabAllocator<'e, V>> { +impl<'e, K: Key, V: Value> TreeWriteAccess<'e, K, V, ArtMultiSlabAllocator<'e, V>> { pub fn get_statistics(&self) -> ArtTreeStatistics { - self.tree_writer.allocator.get_statistics() + ArtTreeStatistics { + num_values: self.tree.num_values.load(Ordering::Relaxed), + blocks: self.allocator.inner.block_allocator.get_statistics(), + } } } #[derive(Clone, Debug)] pub struct ArtTreeStatistics { + pub num_values: u64, pub blocks: allocator::block::BlockAllocatorStats, } diff --git a/pgxn/neon/communicator/src/integrated_cache.rs b/pgxn/neon/communicator/src/integrated_cache.rs index 1b86262993..29eae38fc4 100644 --- a/pgxn/neon/communicator/src/integrated_cache.rs +++ b/pgxn/neon/communicator/src/integrated_cache.rs @@ -66,8 +66,13 @@ pub struct IntegratedCacheWriteAccess<'t> { clock_hand: std::sync::Mutex>, // Metrics + entries_total: metrics::IntGauge, page_evictions_counter: metrics::IntCounter, clock_iterations_counter: metrics::IntCounter, + + // metrics from the art tree + cache_memory_size_bytes: metrics::IntGauge, + cache_memory_used_bytes: metrics::IntGauge, } /// Represents read-only access to the integrated cache. Backend processes have this. @@ -113,6 +118,11 @@ impl<'t> IntegratedCacheInitStruct<'t> { file_cache, clock_hand: std::sync::Mutex::new(TreeIterator::new_wrapping()), + entries_total: metrics::IntGauge::new( + "entries_total", + "Number of entries in the cache", + ).unwrap(), + page_evictions_counter: metrics::IntCounter::new( "integrated_cache_evictions", "Page evictions from the Local File Cache", @@ -122,6 +132,15 @@ impl<'t> IntegratedCacheInitStruct<'t> { "clock_iterations", "Number of times the clock hand has moved", ).unwrap(), + + cache_memory_size_bytes: metrics::IntGauge::new( + "cache_memory_size_bytes", + "Memory reserved for cache metadata", + ).unwrap(), + cache_memory_used_bytes: metrics::IntGauge::new( + "cache_memory_size_bytes", + "Memory used for cache metadata", + ).unwrap(), } } @@ -612,14 +631,30 @@ impl<'t> IntegratedCacheWriteAccess<'t> { impl metrics::core::Collector for IntegratedCacheWriteAccess<'_> { fn desc(&self) -> Vec<&metrics::core::Desc> { let mut descs = Vec::new(); + descs.append(&mut self.entries_total.desc()); descs.append(&mut self.page_evictions_counter.desc()); descs.append(&mut self.clock_iterations_counter.desc()); + + descs.append(&mut self.cache_memory_size_bytes.desc()); + descs.append(&mut self.cache_memory_used_bytes.desc()); descs } fn collect(&self) -> Vec { + // Update gauges + let art_statistics = self.cache_tree.get_statistics(); + self.entries_total.set(art_statistics.num_values as i64); + let block_statistics = &art_statistics.blocks; + self.cache_memory_size_bytes.set(block_statistics.num_blocks as i64 * neonart::allocator::block::BLOCK_SIZE as i64); + self.cache_memory_used_bytes.set((block_statistics.num_initialized as i64 - block_statistics.num_free_blocks as i64 ) * neonart::allocator::block::BLOCK_SIZE as i64); + let mut values = Vec::new(); + values.append(&mut self.entries_total.collect()); values.append(&mut self.page_evictions_counter.collect()); values.append(&mut self.clock_iterations_counter.collect()); + + values.append(&mut self.cache_memory_size_bytes.collect()); + values.append(&mut self.cache_memory_used_bytes.collect()); + values } }