diff --git a/libs/remote_storage/src/config.rs b/libs/remote_storage/src/config.rs index dae141bf77..ff34158c9c 100644 --- a/libs/remote_storage/src/config.rs +++ b/libs/remote_storage/src/config.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; use crate::{ DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT, - DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT, + DEFAULT_REMOTE_STORAGE_LOCALFS_CONCURRENCY_LIMIT, DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT, }; /// External backup storage configuration, enough for creating a client for that storage. @@ -45,11 +45,11 @@ impl RemoteStorageKind { impl RemoteStorageConfig { /// Helper to fetch the configured concurrency limit. - pub fn concurrency_limit(&self) -> Option { + pub fn concurrency_limit(&self) -> usize { match &self.storage { - RemoteStorageKind::LocalFs { .. } => None, - RemoteStorageKind::AwsS3(c) => Some(c.concurrency_limit.into()), - RemoteStorageKind::AzureContainer(c) => Some(c.concurrency_limit.into()), + RemoteStorageKind::LocalFs { .. } => DEFAULT_REMOTE_STORAGE_LOCALFS_CONCURRENCY_LIMIT, + RemoteStorageKind::AwsS3(c) => c.concurrency_limit.into(), + RemoteStorageKind::AzureContainer(c) => c.concurrency_limit.into(), } } } diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index 7a864151ec..69b522d63e 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -65,6 +65,12 @@ pub const DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT: usize = 100; /// Here, a limit of max 20k concurrent connections was noted. /// pub const DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT: usize = 100; +/// Set this limit analogously to the S3 limit. +/// +/// The local filesystem backend doesn't enforce a concurrency limit itself, but this also bounds +/// the upload queue concurrency. Some tests create thousands of uploads, which slows down the +/// quadratic scheduling of the upload queue, and there is no point spawning so many Tokio tasks. +pub const DEFAULT_REMOTE_STORAGE_LOCALFS_CONCURRENCY_LIMIT: usize = 100; /// No limits on the client side, which currenltly means 1000 for AWS S3. /// pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option = None; diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index bcba6d1f62..ad6d8dfae8 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -437,8 +437,7 @@ impl RemoteTimelineClient { .conf .remote_storage_config .as_ref() - .and_then(|r| r.concurrency_limit()) - .unwrap_or(0); + .map_or(0, |r| r.concurrency_limit()); let mut upload_queue = self.upload_queue.lock().unwrap(); upload_queue.initialize_with_current_remote_index_part(index_part, inprogress_limit)?; self.update_remote_physical_size_gauge(Some(index_part)); @@ -461,8 +460,7 @@ impl RemoteTimelineClient { .conf .remote_storage_config .as_ref() - .and_then(|r| r.concurrency_limit()) - .unwrap_or(0); + .map_or(0, |r| r.concurrency_limit()); let mut upload_queue = self.upload_queue.lock().unwrap(); upload_queue.initialize_empty_remote(local_metadata, inprogress_limit)?; self.update_remote_physical_size_gauge(None); @@ -484,8 +482,7 @@ impl RemoteTimelineClient { .conf .remote_storage_config .as_ref() - .and_then(|r| r.concurrency_limit()) - .unwrap_or(0); + .map_or(0, |r| r.concurrency_limit()); let mut upload_queue = self.upload_queue.lock().unwrap(); upload_queue.initialize_with_current_remote_index_part(index_part, inprogress_limit)?;