From fa96b758bc27ebeaaae72bcec89a2825b094b1b8 Mon Sep 17 00:00:00 2001 From: John Spray Date: Thu, 16 Jan 2025 15:22:17 +0000 Subject: [PATCH] pageserver: rename TenantManager -> TenantShardManager --- pageserver/src/consumption_metrics.rs | 8 ++++---- pageserver/src/consumption_metrics/metrics.rs | 4 ++-- pageserver/src/disk_usage_eviction_task.rs | 12 ++++++------ pageserver/src/http/routes.rs | 8 ++++---- pageserver/src/lib.rs | 4 ++-- pageserver/src/page_service.rs | 14 +++++++------- pageserver/src/tenant/mgr.rs | 12 ++++++------ pageserver/src/tenant/secondary.rs | 4 ++-- pageserver/src/tenant/secondary/downloader.rs | 6 +++--- .../src/tenant/secondary/heatmap_uploader.rs | 6 +++--- pageserver/src/utilization.rs | 6 ++++-- 11 files changed, 43 insertions(+), 41 deletions(-) diff --git a/pageserver/src/consumption_metrics.rs b/pageserver/src/consumption_metrics.rs index 8135240a4d..e2bae0f90a 100644 --- a/pageserver/src/consumption_metrics.rs +++ b/pageserver/src/consumption_metrics.rs @@ -7,7 +7,7 @@ use crate::context::{DownloadBehavior, RequestContext}; use crate::task_mgr::{self, TaskKind, BACKGROUND_RUNTIME}; use crate::tenant::size::CalculateSyntheticSizeError; use crate::tenant::tasks::BackgroundLoopKind; -use crate::tenant::{mgr::TenantManager, LogicalSizeCalculationCause, TenantShard}; +use crate::tenant::{mgr::TenantShardManager, LogicalSizeCalculationCause, TenantShard}; use camino::Utf8PathBuf; use consumption_metrics::EventType; use itertools::Itertools as _; @@ -95,7 +95,7 @@ type Cache = HashMap; pub async fn run( conf: &'static PageServerConf, - tenant_manager: Arc, + tenant_manager: Arc, cancel: CancellationToken, ) { let Some(metric_collection_endpoint) = conf.metric_collection_endpoint.as_ref() else { @@ -150,7 +150,7 @@ pub async fn run( /// Main thread that serves metrics collection #[allow(clippy::too_many_arguments)] async fn collect_metrics( - tenant_manager: Arc, + tenant_manager: Arc, metric_collection_endpoint: &Url, metric_collection_bucket: &Option, metric_collection_interval: Duration, @@ -362,7 +362,7 @@ async fn reschedule( /// Caclculate synthetic size for each active tenant async fn calculate_synthetic_size_worker( - tenant_manager: Arc, + tenant_manager: Arc, synthetic_size_calculation_interval: Duration, cancel: CancellationToken, ctx: RequestContext, diff --git a/pageserver/src/consumption_metrics/metrics.rs b/pageserver/src/consumption_metrics/metrics.rs index 2f87825373..a156701eeb 100644 --- a/pageserver/src/consumption_metrics/metrics.rs +++ b/pageserver/src/consumption_metrics/metrics.rs @@ -1,4 +1,4 @@ -use crate::tenant::mgr::TenantManager; +use crate::tenant::mgr::TenantShardManager; use crate::{context::RequestContext, tenant::timeline::logical_size::CurrentLogicalSize}; use chrono::{DateTime, Utc}; use consumption_metrics::EventType; @@ -213,7 +213,7 @@ impl MetricsKey { } pub(super) async fn collect_all_metrics( - tenant_manager: &Arc, + tenant_manager: &Arc, cached_metrics: &Cache, ctx: &RequestContext, ) -> Vec { diff --git a/pageserver/src/disk_usage_eviction_task.rs b/pageserver/src/disk_usage_eviction_task.rs index ca44fbe6ae..a095df6719 100644 --- a/pageserver/src/disk_usage_eviction_task.rs +++ b/pageserver/src/disk_usage_eviction_task.rs @@ -57,7 +57,7 @@ use crate::{ metrics::disk_usage_based_eviction::METRICS, task_mgr::{self, BACKGROUND_RUNTIME}, tenant::{ - mgr::TenantManager, + mgr::TenantShardManager, remote_timeline_client::LayerFileMetadata, secondary::SecondaryTenant, storage_layer::{AsLayerDesc, EvictionError, Layer, LayerName, LayerVisibilityHint}, @@ -166,7 +166,7 @@ pub fn launch_disk_usage_global_eviction_task( conf: &'static PageServerConf, storage: GenericRemoteStorage, state: Arc, - tenant_manager: Arc, + tenant_manager: Arc, background_jobs_barrier: completion::Barrier, ) -> Option { let Some(task_config) = &conf.disk_usage_based_eviction else { @@ -203,7 +203,7 @@ async fn disk_usage_eviction_task( state: &State, task_config: &DiskUsageEvictionTaskConfig, storage: &GenericRemoteStorage, - tenant_manager: Arc, + tenant_manager: Arc, cancel: CancellationToken, ) { scopeguard::defer! { @@ -265,7 +265,7 @@ async fn disk_usage_eviction_task_iteration( state: &State, task_config: &DiskUsageEvictionTaskConfig, storage: &GenericRemoteStorage, - tenant_manager: &Arc, + tenant_manager: &Arc, cancel: &CancellationToken, ) -> anyhow::Result<()> { let tenants_dir = tenant_manager.get_conf().tenants_path(); @@ -361,7 +361,7 @@ pub(crate) async fn disk_usage_eviction_task_iteration_impl( state: &State, _storage: &GenericRemoteStorage, usage_pre: U, - tenant_manager: &Arc, + tenant_manager: &Arc, eviction_order: EvictionOrder, cancel: &CancellationToken, ) -> anyhow::Result> { @@ -788,7 +788,7 @@ enum EvictionCandidates { /// - tenant B 1 layer /// - tenant C 8 layers async fn collect_eviction_candidates( - tenant_manager: &Arc, + tenant_manager: &Arc, eviction_order: EvictionOrder, cancel: &CancellationToken, ) -> anyhow::Result { diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index d5ac8699ef..8f1a6d7595 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -74,7 +74,7 @@ use crate::task_mgr::TaskKind; use crate::tenant::config::{LocationConf, TenantConfOpt}; use crate::tenant::mgr::GetActiveTenantError; use crate::tenant::mgr::{ - GetTenantError, TenantManager, TenantMapError, TenantMapInsertError, TenantSlotError, + GetTenantError, TenantMapError, TenantMapInsertError, TenantShardManager, TenantSlotError, TenantSlotUpsertError, TenantStateError, }; use crate::tenant::mgr::{TenantSlot, UpsertLocationError}; @@ -130,7 +130,7 @@ pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000); pub struct State { conf: &'static PageServerConf, - tenant_manager: Arc, + tenant_manager: Arc, auth: Option>, allowlist_routes: &'static [&'static str], remote_storage: GenericRemoteStorage, @@ -145,7 +145,7 @@ impl State { #[allow(clippy::too_many_arguments)] pub fn new( conf: &'static PageServerConf, - tenant_manager: Arc, + tenant_manager: Arc, auth: Option>, remote_storage: GenericRemoteStorage, broker_client: storage_broker::BrokerClientChannel, @@ -2571,7 +2571,7 @@ async fn timeline_collect_keyspace( } async fn active_timeline_of_active_tenant( - tenant_manager: &TenantManager, + tenant_manager: &TenantShardManager, tenant_shard_id: TenantShardId, timeline_id: TimelineId, ) -> Result, ApiError> { diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index ff6af3566c..81fccbc851 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -36,7 +36,7 @@ pub mod walredo; use camino::Utf8Path; use deletion_queue::DeletionQueue; use tenant::{ - mgr::{BackgroundPurges, TenantManager}, + mgr::{BackgroundPurges, TenantShardManager}, secondary, }; use tracing::{info, info_span}; @@ -81,7 +81,7 @@ pub async fn shutdown_pageserver( page_service: page_service::Listener, consumption_metrics_worker: ConsumptionMetricsTasks, disk_usage_eviction_task: Option, - tenant_manager: &TenantManager, + tenant_manager: &TenantShardManager, background_purges: BackgroundPurges, mut deletion_queue: DeletionQueue, secondary_controller_tasks: secondary::GlobalTasks, diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index da4180a927..82fa6efaa6 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -59,7 +59,7 @@ use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_i use crate::task_mgr::TaskKind; use crate::task_mgr::{self, COMPUTE_REQUEST_RUNTIME}; use crate::tenant::mgr::ShardSelector; -use crate::tenant::mgr::TenantManager; +use crate::tenant::mgr::TenantShardManager; use crate::tenant::mgr::{GetActiveTenantError, GetTenantError, ShardResolveResult}; use crate::tenant::timeline::{self, WaitLsnError}; use crate::tenant::GetTimelineError; @@ -94,7 +94,7 @@ pub struct Connections { pub fn spawn( conf: &'static PageServerConf, - tenant_manager: Arc, + tenant_manager: Arc, pg_auth: Option>, tcp_listener: tokio::net::TcpListener, ) -> Listener { @@ -159,7 +159,7 @@ impl Connections { /// open connections. /// pub async fn libpq_listener_main( - tenant_manager: Arc, + tenant_manager: Arc, auth: Option>, listener: tokio::net::TcpListener, auth_type: AuthType, @@ -218,7 +218,7 @@ type ConnectionHandlerResult = anyhow::Result<()>; #[instrument(skip_all, fields(peer_addr))] async fn page_service_conn_main( - tenant_manager: Arc, + tenant_manager: Arc, auth: Option>, socket: tokio::net::TcpStream, auth_type: AuthType, @@ -337,7 +337,7 @@ struct TimelineHandles { } impl TimelineHandles { - fn new(tenant_manager: Arc) -> Self { + fn new(tenant_manager: Arc) -> Self { Self { wrapper: TenantManagerWrapper { tenant_manager, @@ -379,7 +379,7 @@ impl TimelineHandles { } pub(crate) struct TenantManagerWrapper { - tenant_manager: Arc, + tenant_manager: Arc, // We do not support switching tenant_id on a connection at this point. // We can can add support for this later if needed without changing // the protocol. @@ -613,7 +613,7 @@ impl BatchedFeMessage { impl PageServerHandler { pub fn new( - tenant_manager: Arc, + tenant_manager: Arc, auth: Option>, pipelining_config: PageServicePipeliningConfig, connection_ctx: RequestContext, diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 407db3a577..0da9891fd2 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -292,12 +292,12 @@ static TENANTS: Lazy> = /// Responsible for storing and mutating the collection of all tenants /// that this pageserver has state for. /// -/// Every Tenant and SecondaryTenant instance lives inside the TenantManager. +/// Every TenantShard and SecondaryTenant instance lives inside the TenantShardManager. /// -/// The most important role of the TenantManager is to prevent conflicts: e.g. trying to attach +/// The most important role of the TenantShardManager is to prevent conflicts: e.g. trying to attach /// the same tenant twice concurrently, or trying to configure the same tenant into secondary /// and attached modes concurrently. -pub struct TenantManager { +pub struct TenantShardManager { conf: &'static PageServerConf, // TODO: currently this is a &'static pointing to TENANTs. When we finish refactoring // out of that static variable, the TenantManager can own this. @@ -493,7 +493,7 @@ pub async fn init_tenant_mgr( resources: TenantSharedResources, init_order: InitializationOrder, cancel: CancellationToken, -) -> anyhow::Result { +) -> anyhow::Result { let mut tenants = BTreeMap::new(); let ctx = RequestContext::todo_child(TaskKind::Startup, DownloadBehavior::Warn); @@ -676,7 +676,7 @@ pub async fn init_tenant_mgr( *tenants_map = TenantsMap::Open(tenants); - Ok(TenantManager { + Ok(TenantShardManager { conf, tenants: &TENANTS, resources, @@ -873,7 +873,7 @@ pub(crate) enum UpsertLocationError { InternalError(anyhow::Error), } -impl TenantManager { +impl TenantShardManager { /// Convenience function so that anyone with a TenantManager can get at the global configuration, without /// having to pass it around everywhere as a separate object. pub(crate) fn get_conf(&self) -> &'static PageServerConf { diff --git a/pageserver/src/tenant/secondary.rs b/pageserver/src/tenant/secondary.rs index 4bc208331b..065af891b9 100644 --- a/pageserver/src/tenant/secondary.rs +++ b/pageserver/src/tenant/secondary.rs @@ -19,7 +19,7 @@ use self::{ use super::{ config::{SecondaryLocationConfig, TenantConfOpt}, - mgr::TenantManager, + mgr::TenantShardManager, span::debug_assert_current_span_has_tenant_id, storage_layer::LayerName, GetTenantError, @@ -374,7 +374,7 @@ impl GlobalTasks { } pub fn spawn_tasks( - tenant_manager: Arc, + tenant_manager: Arc, remote_storage: GenericRemoteStorage, background_jobs_can_start: Barrier, cancel: CancellationToken, diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 395e34e404..5961142993 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -39,7 +39,7 @@ use super::{ }; use crate::tenant::{ - mgr::TenantManager, + mgr::TenantShardManager, remote_timeline_client::{download::download_layer_file, remote_heatmap_path}, }; @@ -69,7 +69,7 @@ use super::{ const DEFAULT_DOWNLOAD_INTERVAL: Duration = Duration::from_millis(60000); pub(super) async fn downloader_task( - tenant_manager: Arc, + tenant_manager: Arc, remote_storage: GenericRemoteStorage, command_queue: tokio::sync::mpsc::Receiver>, background_jobs_can_start: Barrier, @@ -92,7 +92,7 @@ pub(super) async fn downloader_task( } struct SecondaryDownloader { - tenant_manager: Arc, + tenant_manager: Arc, remote_storage: GenericRemoteStorage, root_ctx: RequestContext, } diff --git a/pageserver/src/tenant/secondary/heatmap_uploader.rs b/pageserver/src/tenant/secondary/heatmap_uploader.rs index d49551e26c..56078a1350 100644 --- a/pageserver/src/tenant/secondary/heatmap_uploader.rs +++ b/pageserver/src/tenant/secondary/heatmap_uploader.rs @@ -10,7 +10,7 @@ use crate::{ tenant::{ config::AttachmentMode, mgr::GetTenantError, - mgr::TenantManager, + mgr::TenantShardManager, remote_timeline_client::remote_heatmap_path, span::debug_assert_current_span_has_tenant_id, tasks::{warn_when_period_overrun, BackgroundLoopKind}, @@ -35,7 +35,7 @@ use tracing::{info_span, instrument, Instrument}; use utils::{backoff, completion::Barrier, yielding_loop::yielding_loop}; pub(super) async fn heatmap_uploader_task( - tenant_manager: Arc, + tenant_manager: Arc, remote_storage: GenericRemoteStorage, command_queue: tokio::sync::mpsc::Receiver>, background_jobs_can_start: Barrier, @@ -61,7 +61,7 @@ pub(super) async fn heatmap_uploader_task( /// handling loop and mutates it as needed: there are no locks here, because that event loop /// can hold &mut references to this type throughout. struct HeatmapUploader { - tenant_manager: Arc, + tenant_manager: Arc, remote_storage: GenericRemoteStorage, cancel: CancellationToken, diff --git a/pageserver/src/utilization.rs b/pageserver/src/utilization.rs index a0223f3bce..1d0736b1cc 100644 --- a/pageserver/src/utilization.rs +++ b/pageserver/src/utilization.rs @@ -9,12 +9,14 @@ use utils::serde_percent::Percent; use pageserver_api::models::PageserverUtilization; -use crate::{config::PageServerConf, metrics::NODE_UTILIZATION_SCORE, tenant::mgr::TenantManager}; +use crate::{ + config::PageServerConf, metrics::NODE_UTILIZATION_SCORE, tenant::mgr::TenantShardManager, +}; pub(crate) fn regenerate( conf: &PageServerConf, tenants_path: &Path, - tenant_manager: &TenantManager, + tenant_manager: &TenantShardManager, ) -> anyhow::Result { let statvfs = nix::sys::statvfs::statvfs(tenants_path) .map_err(std::io::Error::from)