mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-15 09:22:55 +00:00
refactor: move all metrics updates to layer
this fixes some missing increments for num_persistent_files_created, persistent_bytes_written and removes double entries for residence events.
This commit is contained in:
@@ -119,9 +119,14 @@ impl Layer {
|
||||
LayerInner::new(conf, timeline, access_stats, desc, Some(inner))
|
||||
}));
|
||||
|
||||
let downloaded = resident.expect("just initialized");
|
||||
|
||||
debug_assert!(owner.0.needs_download_blocking().unwrap().is_none());
|
||||
|
||||
let downloaded = resident.expect("just initialized");
|
||||
timeline
|
||||
.metrics
|
||||
.resident_physical_size_gauge
|
||||
.add(metadata.file_size());
|
||||
|
||||
ResidentLayer { downloaded, owner }
|
||||
}
|
||||
@@ -156,6 +161,15 @@ impl Layer {
|
||||
std::fs::rename(temp_path, owner.local_path())
|
||||
.context("rename temporary file as correct path for {owner}")?;
|
||||
|
||||
{
|
||||
let metrics = &timeline.metrics;
|
||||
let file_size = owner.layer_desc().file_size;
|
||||
|
||||
metrics.resident_physical_size_gauge.add(file_size);
|
||||
metrics.num_persistent_files_created.inc_by(1);
|
||||
metrics.persistent_bytes_written.inc_by(file_size);
|
||||
}
|
||||
|
||||
Ok(ResidentLayer { downloaded, owner })
|
||||
}
|
||||
|
||||
|
||||
@@ -1544,9 +1544,6 @@ impl Timeline {
|
||||
"loaded layer map with {} layers at {}, total physical size: {}",
|
||||
num_layers, disk_consistent_lsn, total_physical_size
|
||||
);
|
||||
self.metrics
|
||||
.resident_physical_size_gauge
|
||||
.set(total_physical_size);
|
||||
|
||||
timer.stop_and_record();
|
||||
Ok(())
|
||||
@@ -2372,7 +2369,7 @@ impl Timeline {
|
||||
// the mapping in `create_delta_layer`.
|
||||
{
|
||||
let mut guard = self.layers.write().await;
|
||||
guard.finish_flush_l0_layer(delta_layer_to_add.as_ref(), &frozen_layer, &self.metrics);
|
||||
guard.finish_flush_l0_layer(delta_layer_to_add.as_ref(), &frozen_layer);
|
||||
}
|
||||
|
||||
// FIXME: between create_delta_layer and the scheduling of the upload in `update_metadata_file`,
|
||||
@@ -2735,7 +2732,7 @@ impl Timeline {
|
||||
|
||||
let mut guard = self.layers.write().await;
|
||||
|
||||
guard.track_new_image_layers(&image_layers, &self.metrics);
|
||||
guard.track_new_image_layers(&image_layers);
|
||||
drop_wlock(guard);
|
||||
timer.stop_and_record();
|
||||
|
||||
@@ -3399,12 +3396,7 @@ impl Timeline {
|
||||
|
||||
// deletion will happen later, the layer file manager sets wanted_garbage_collected
|
||||
|
||||
guard.finish_compact_l0(
|
||||
&layer_removal_cs,
|
||||
remove_layers,
|
||||
&insert_layers,
|
||||
&self.metrics,
|
||||
)?;
|
||||
guard.finish_compact_l0(&layer_removal_cs, remove_layers, &insert_layers)?;
|
||||
|
||||
drop_wlock(guard);
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
use pageserver_api::models::{LayerResidenceEventReason, LayerResidenceStatus};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tracing::trace;
|
||||
use utils::{
|
||||
@@ -154,20 +153,9 @@ impl LayerManager {
|
||||
}
|
||||
|
||||
/// Add image layers to the layer map, called from `create_image_layers`.
|
||||
pub(crate) fn track_new_image_layers(
|
||||
&mut self,
|
||||
image_layers: &[ResidentLayer],
|
||||
metrics: &crate::metrics::TimelineMetrics,
|
||||
) {
|
||||
pub(crate) fn track_new_image_layers(&mut self, image_layers: &[ResidentLayer]) {
|
||||
let mut updates = self.layer_map.batch_update();
|
||||
for layer in image_layers {
|
||||
metrics
|
||||
.resident_physical_size_gauge
|
||||
.add(layer.layer_desc().file_size);
|
||||
layer.access_stats().record_residence_event(
|
||||
LayerResidenceStatus::Resident,
|
||||
LayerResidenceEventReason::LayerCreate,
|
||||
);
|
||||
Self::insert_historic_layer(layer.as_ref().clone(), &mut updates, &mut self.layer_fmgr);
|
||||
}
|
||||
updates.flush();
|
||||
@@ -178,7 +166,6 @@ impl LayerManager {
|
||||
&mut self,
|
||||
delta_layer: Option<&ResidentLayer>,
|
||||
frozen_layer_for_check: &Arc<InMemoryLayer>,
|
||||
metrics: &crate::metrics::TimelineMetrics,
|
||||
) {
|
||||
let inmem = self
|
||||
.layer_map
|
||||
@@ -193,15 +180,6 @@ impl LayerManager {
|
||||
|
||||
if let Some(l) = delta_layer {
|
||||
let mut updates = self.layer_map.batch_update();
|
||||
l.access_stats().record_residence_event(
|
||||
LayerResidenceStatus::Resident,
|
||||
LayerResidenceEventReason::LayerCreate,
|
||||
);
|
||||
let sz = l.layer_desc().file_size;
|
||||
metrics.resident_physical_size_gauge.add(sz);
|
||||
metrics.num_persistent_files_created.inc_by(1);
|
||||
metrics.persistent_bytes_written.inc_by(sz);
|
||||
|
||||
Self::insert_historic_layer(l.as_ref().clone(), &mut updates, &mut self.layer_fmgr);
|
||||
updates.flush();
|
||||
}
|
||||
@@ -213,18 +191,9 @@ impl LayerManager {
|
||||
layer_removal_cs: &Arc<tokio::sync::OwnedMutexGuard<()>>,
|
||||
compact_from: Vec<Layer>,
|
||||
compact_to: &[ResidentLayer],
|
||||
metrics: &crate::metrics::TimelineMetrics,
|
||||
) -> Result<()> {
|
||||
let mut updates = self.layer_map.batch_update();
|
||||
for l in compact_to {
|
||||
l.access_stats().record_residence_event(
|
||||
LayerResidenceStatus::Resident,
|
||||
LayerResidenceEventReason::LayerCreate,
|
||||
);
|
||||
metrics
|
||||
.resident_physical_size_gauge
|
||||
.add(l.layer_desc().file_size);
|
||||
|
||||
Self::insert_historic_layer(l.as_ref().clone(), &mut updates, &mut self.layer_fmgr);
|
||||
}
|
||||
for l in compact_from {
|
||||
|
||||
Reference in New Issue
Block a user