From 0b91edb943169ad7804fe337ed3d2a5f64f93b98 Mon Sep 17 00:00:00 2001 From: Vadim Kharitonov Date: Fri, 2 Feb 2024 19:36:31 +0100 Subject: [PATCH 01/34] Revert pgvector 0.6.0 (#6592) It doesn't work in our VMs. Need more time to investigate --- .dockerignore | 25 +++++++++-------- Dockerfile.compute-node | 7 ++--- patches/pgvector.patch | 60 ----------------------------------------- 3 files changed, 14 insertions(+), 78 deletions(-) delete mode 100644 patches/pgvector.patch diff --git a/.dockerignore b/.dockerignore index 29abdc37aa..ae0ad8fd77 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,28 +1,27 @@ * -# Files -!Cargo.lock -!Cargo.toml -!Makefile !rust-toolchain.toml -!scripts/combine_control_files.py -!scripts/ninstall.sh -!vm-cgconfig.conf +!Cargo.toml +!Cargo.lock +!Makefile -# Directories !.cargo/ !.config/ -!compute_tools/ !control_plane/ +!compute_tools/ !libs/ -!neon_local/ !pageserver/ -!patches/ !pgxn/ !proxy/ -!s3_scrubber/ !safekeeper/ +!s3_scrubber/ !storage_broker/ !trace/ -!vendor/postgres-*/ +!vendor/postgres-v14/ +!vendor/postgres-v15/ +!vendor/postgres-v16/ !workspace_hack/ +!neon_local/ +!scripts/ninstall.sh +!scripts/combine_control_files.py +!vm-cgconfig.conf diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index b13225172d..d91c7cfd72 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -241,12 +241,9 @@ RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz - FROM build-deps AS vector-pg-build COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ -COPY patches/pgvector.patch /pgvector.patch - -RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.6.0.tar.gz -O pgvector.tar.gz && \ - echo "b0cf4ba1ab016335ac8fb1cada0d2106235889a194fffeece217c5bda90b2f19 pgvector.tar.gz" | sha256sum --check && \ +RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.5.1.tar.gz -O pgvector.tar.gz && \ + echo "cc7a8e034a96e30a819911ac79d32f6bc47bdd1aa2de4d7d4904e26b83209dc8 pgvector.tar.gz" | sha256sum --check && \ mkdir pgvector-src && cd pgvector-src && tar xvzf ../pgvector.tar.gz --strip-components=1 -C . && \ - patch -p1 < /pgvector.patch && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/vector.control diff --git a/patches/pgvector.patch b/patches/pgvector.patch deleted file mode 100644 index cc1ca2e3a6..0000000000 --- a/patches/pgvector.patch +++ /dev/null @@ -1,60 +0,0 @@ -From de3dd0cd034d2bcc12b456171ce163bdc1f4cb65 Mon Sep 17 00:00:00 2001 -From: Heikki Linnakangas -Date: Thu, 1 Feb 2024 17:42:31 +0200 -Subject: [PATCH 1/1] Make v0.6.0 work with Neon - -Now that the WAL-logging happens as a separate step at the end of the -build, we need a few neon-specific hints to make it work. ---- - src/hnswbuild.c | 28 ++++++++++++++++++++++++++++ - 1 file changed, 28 insertions(+) - -diff --git a/src/hnswbuild.c b/src/hnswbuild.c -index 680789b..bfa657a 100644 ---- a/src/hnswbuild.c -+++ b/src/hnswbuild.c -@@ -1089,13 +1089,41 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo, - SeedRandom(42); - #endif - -+#ifdef NEON_SMGR -+ smgr_start_unlogged_build(RelationGetSmgr(index)); -+#endif -+ - InitBuildState(buildstate, heap, index, indexInfo, forkNum); - - BuildGraph(buildstate, forkNum); - -+#ifdef NEON_SMGR -+ smgr_finish_unlogged_build_phase_1(RelationGetSmgr(index)); -+#endif -+ - if (RelationNeedsWAL(index)) -+ { - log_newpage_range(index, forkNum, 0, RelationGetNumberOfBlocks(index), true); - -+#ifdef NEON_SMGR -+ { -+#if PG_VERSION_NUM >= 160000 -+ RelFileLocator rlocator = RelationGetSmgr(index)->smgr_rlocator.locator; -+#else -+ RelFileNode rlocator = RelationGetSmgr(index)->smgr_rnode.node; -+#endif -+ -+ SetLastWrittenLSNForBlockRange(XactLastRecEnd, rlocator, -+ MAIN_FORKNUM, 0, RelationGetNumberOfBlocks(index)); -+ SetLastWrittenLSNForRelation(XactLastRecEnd, rlocator, MAIN_FORKNUM); -+ } -+#endif -+ } -+ -+#ifdef NEON_SMGR -+ smgr_end_unlogged_build(RelationGetSmgr(index)); -+#endif -+ - FreeBuildState(buildstate); - } - --- -2.39.2 - From 786e9cf75ba482e67b7e7e0626fac21b1696c761 Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 2 Feb 2024 19:22:03 +0000 Subject: [PATCH 02/34] control_plane: implement HTTP compute hook for attachment service (#6471) ## Problem When we change which physical pageservers a tenant is attached to, we must update the control plane so that it can update computes. This will be done via an HTTP hook, as described in https://www.notion.so/neondatabase/Sharding-Service-Control-Plane-interface-6de56dd310a043bfa5c2f5564fa98365#1fe185a35d6d41f0a54279ac1a41bc94 ## Summary of changes - Optional CLI args `--control-plane-jwt-token` and `-compute-hook-url` are added. If these are set, then we will use this HTTP endpoint, instead of trying to use neon_local LocalEnv to update compute configuration. - Implement an HTTP-driven version of ComputeHook that calls into the configured URL - Notify for all tenants on startup, to ensure that we don't miss notifications if we crash partway through a change, and carry a `pending_compute_notification` flag at runtime to allow notifications to fail without risking never sending the update. - Add a test for all this One might wonder: why not do a "forever" retry for compute hook notifications, rather than carrying a flag on the shard to call reconcile() again later. The reason is that we will later limit concurreny of reconciles, when dealing with larger numbers of shards, and if reconcile is stuck waiting for the control plane to accept a notification request, it could jam up the whole system and prevent us making other changes. Anyway: from the perspective of the outside world, we _do_ retry forever, but we don't retry forever within a given Reconciler lifetime. The `pending_compute_notification` logic is predicated on later adding a background task that just calls `Service::reconcile_all` on a schedule to make sure that anything+everything that can fail a Reconciler::reconcile call will eventually be retried. --- Cargo.lock | 1 + control_plane/attachment_service/Cargo.toml | 1 + .../attachment_service/src/compute_hook.rs | 286 +++++++++++++++--- control_plane/attachment_service/src/main.rs | 34 ++- .../attachment_service/src/reconciler.rs | 63 +++- .../attachment_service/src/service.rs | 86 +++++- .../attachment_service/src/tenant_state.rs | 60 ++++ control_plane/src/attachment_service.rs | 6 + control_plane/src/bin/neon_local.rs | 2 +- control_plane/src/endpoint.rs | 34 ++- control_plane/src/local_env.rs | 7 +- test_runner/fixtures/neon_fixtures.py | 9 +- test_runner/regress/test_sharding_service.py | 101 ++++++- 13 files changed, 600 insertions(+), 90 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 90991ab0a4..02450709d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -288,6 +288,7 @@ dependencies = [ "pageserver_api", "pageserver_client", "postgres_connection", + "reqwest", "serde", "serde_json", "thiserror", diff --git a/control_plane/attachment_service/Cargo.toml b/control_plane/attachment_service/Cargo.toml index 1d3831eea0..d3c62d74d2 100644 --- a/control_plane/attachment_service/Cargo.toml +++ b/control_plane/attachment_service/Cargo.toml @@ -16,6 +16,7 @@ hyper.workspace = true pageserver_api.workspace = true pageserver_client.workspace = true postgres_connection.workspace = true +reqwest.workspace = true serde.workspace = true serde_json.workspace = true thiserror.workspace = true diff --git a/control_plane/attachment_service/src/compute_hook.rs b/control_plane/attachment_service/src/compute_hook.rs index 02617cd065..9c1185f259 100644 --- a/control_plane/attachment_service/src/compute_hook.rs +++ b/control_plane/attachment_service/src/compute_hook.rs @@ -1,24 +1,76 @@ -use std::collections::HashMap; +use std::{collections::HashMap, time::Duration}; -use control_plane::endpoint::ComputeControlPlane; +use control_plane::endpoint::{ComputeControlPlane, EndpointStatus}; use control_plane::local_env::LocalEnv; -use pageserver_api::shard::{ShardCount, ShardIndex, TenantShardId}; +use hyper::{Method, StatusCode}; +use pageserver_api::shard::{ShardCount, ShardIndex, ShardNumber, TenantShardId}; use postgres_connection::parse_host_port; -use utils::id::{NodeId, TenantId}; +use serde::{Deserialize, Serialize}; +use tokio_util::sync::CancellationToken; +use utils::{ + backoff::{self}, + id::{NodeId, TenantId}, +}; + +use crate::service::Config; + +const BUSY_DELAY: Duration = Duration::from_secs(1); +const SLOWDOWN_DELAY: Duration = Duration::from_secs(5); + +pub(crate) const API_CONCURRENCY: usize = 32; pub(super) struct ComputeHookTenant { shards: Vec<(ShardIndex, NodeId)>, } +#[derive(Serialize, Deserialize, Debug)] +struct ComputeHookNotifyRequestShard { + node_id: NodeId, + shard_number: ShardNumber, +} + +/// Request body that we send to the control plane to notify it of where a tenant is attached +#[derive(Serialize, Deserialize, Debug)] +struct ComputeHookNotifyRequest { + tenant_id: TenantId, + shards: Vec, +} + +/// Error type for attempts to call into the control plane compute notification hook +#[derive(thiserror::Error, Debug)] +pub(crate) enum NotifyError { + // Request was not send successfully, e.g. transport error + #[error("Sending request: {0}")] + Request(#[from] reqwest::Error), + // Request could not be serviced right now due to ongoing Operation in control plane, but should be possible soon. + #[error("Control plane tenant busy")] + Busy, + // Explicit 429 response asking us to retry less frequently + #[error("Control plane overloaded")] + SlowDown, + // A 503 response indicates the control plane can't handle the request right now + #[error("Control plane unavailable (status {0})")] + Unavailable(StatusCode), + // API returned unexpected non-success status. We will retry, but log a warning. + #[error("Control plane returned unexpected status {0}")] + Unexpected(StatusCode), + // We shutdown while sending + #[error("Shutting down")] + ShuttingDown, + // A response indicates we will never succeed, such as 400 or 404 + #[error("Non-retryable error {0}")] + Fatal(StatusCode), +} + impl ComputeHookTenant { - pub(super) async fn maybe_reconfigure(&mut self, tenant_id: TenantId) -> anyhow::Result<()> { + async fn maybe_reconfigure(&mut self, tenant_id: TenantId) -> Option { // Find the highest shard count and drop any shards that aren't // for that shard count. let shard_count = self.shards.iter().map(|(k, _v)| k.shard_count).max(); let Some(shard_count) = shard_count else { // No shards, nothing to do. tracing::info!("ComputeHookTenant::maybe_reconfigure: no shards"); - return Ok(()); + return None; }; self.shards.retain(|(k, _v)| k.shard_count == shard_count); @@ -26,38 +78,18 @@ impl ComputeHookTenant { .sort_by_key(|(shard, _node_id)| shard.shard_number); if self.shards.len() == shard_count.0 as usize || shard_count == ShardCount(0) { - // We have pageservers for all the shards: proceed to reconfigure compute - let env = match LocalEnv::load_config() { - Ok(e) => e, - Err(e) => { - tracing::warn!( - "Couldn't load neon_local config, skipping compute update ({e})" - ); - return Ok(()); - } - }; - let cplane = ComputeControlPlane::load(env.clone()) - .expect("Error loading compute control plane"); - - let compute_pageservers = self - .shards - .iter() - .map(|(_shard, node_id)| { - let ps_conf = env - .get_pageserver_conf(*node_id) - .expect("Unknown pageserver"); - let (pg_host, pg_port) = parse_host_port(&ps_conf.listen_pg_addr) - .expect("Unable to parse listen_pg_addr"); - (pg_host, pg_port.unwrap_or(5432)) - }) - .collect::>(); - - for (endpoint_name, endpoint) in &cplane.endpoints { - if endpoint.tenant_id == tenant_id && endpoint.status() == "running" { - tracing::info!("🔁 Reconfiguring endpoint {}", endpoint_name,); - endpoint.reconfigure(compute_pageservers.clone()).await?; - } - } + // We have pageservers for all the shards: emit a configuration update + return Some(ComputeHookNotifyRequest { + tenant_id, + shards: self + .shards + .iter() + .map(|(shard, node_id)| ComputeHookNotifyRequestShard { + shard_number: shard.shard_number, + node_id: *node_id, + }) + .collect(), + }); } else { tracing::info!( "ComputeHookTenant::maybe_reconfigure: not enough shards ({}/{})", @@ -66,7 +98,7 @@ impl ComputeHookTenant { ); } - Ok(()) + None } } @@ -74,22 +106,171 @@ impl ComputeHookTenant { /// mapping. It aggregates updates for the shards in a tenant, and when appropriate reconfigures /// the compute connection string. pub(super) struct ComputeHook { + config: Config, state: tokio::sync::Mutex>, + authorization_header: Option, } impl ComputeHook { - pub(super) fn new() -> Self { + pub(super) fn new(config: Config) -> Self { + let authorization_header = config + .control_plane_jwt_token + .clone() + .map(|jwt| format!("Bearer {}", jwt)); + Self { state: Default::default(), + config, + authorization_header, } } + /// For test environments: use neon_local's LocalEnv to update compute + async fn do_notify_local( + &self, + reconfigure_request: ComputeHookNotifyRequest, + ) -> anyhow::Result<()> { + let env = match LocalEnv::load_config() { + Ok(e) => e, + Err(e) => { + tracing::warn!("Couldn't load neon_local config, skipping compute update ({e})"); + return Ok(()); + } + }; + let cplane = + ComputeControlPlane::load(env.clone()).expect("Error loading compute control plane"); + let ComputeHookNotifyRequest { tenant_id, shards } = reconfigure_request; + + let compute_pageservers = shards + .into_iter() + .map(|shard| { + let ps_conf = env + .get_pageserver_conf(shard.node_id) + .expect("Unknown pageserver"); + let (pg_host, pg_port) = parse_host_port(&ps_conf.listen_pg_addr) + .expect("Unable to parse listen_pg_addr"); + (pg_host, pg_port.unwrap_or(5432)) + }) + .collect::>(); + + for (endpoint_name, endpoint) in &cplane.endpoints { + if endpoint.tenant_id == tenant_id && endpoint.status() == EndpointStatus::Running { + tracing::info!("🔁 Reconfiguring endpoint {}", endpoint_name,); + endpoint.reconfigure(compute_pageservers.clone()).await?; + } + } + + Ok(()) + } + + async fn do_notify_iteration( + &self, + client: &reqwest::Client, + url: &String, + reconfigure_request: &ComputeHookNotifyRequest, + cancel: &CancellationToken, + ) -> Result<(), NotifyError> { + let req = client.request(Method::POST, url); + let req = if let Some(value) = &self.authorization_header { + req.header(reqwest::header::AUTHORIZATION, value) + } else { + req + }; + + tracing::debug!( + "Sending notify request to {} ({:?})", + url, + reconfigure_request + ); + let send_result = req.json(&reconfigure_request).send().await; + let response = match send_result { + Ok(r) => r, + Err(e) => return Err(e.into()), + }; + + // Treat all 2xx responses as success + if response.status() >= StatusCode::OK && response.status() < StatusCode::MULTIPLE_CHOICES { + if response.status() != StatusCode::OK { + // Non-200 2xx response: it doesn't make sense to retry, but this is unexpected, so + // log a warning. + tracing::warn!( + "Unexpected 2xx response code {} from control plane", + response.status() + ); + } + + return Ok(()); + } + + // Error response codes + match response.status() { + StatusCode::TOO_MANY_REQUESTS => { + // TODO: 429 handling should be global: set some state visible to other requests + // so that they will delay before starting, rather than all notifications trying + // once before backing off. + tokio::time::timeout(SLOWDOWN_DELAY, cancel.cancelled()) + .await + .ok(); + Err(NotifyError::SlowDown) + } + StatusCode::LOCKED => { + // Delay our retry if busy: the usual fast exponential backoff in backoff::retry + // is not appropriate + tokio::time::timeout(BUSY_DELAY, cancel.cancelled()) + .await + .ok(); + Err(NotifyError::Busy) + } + StatusCode::SERVICE_UNAVAILABLE + | StatusCode::GATEWAY_TIMEOUT + | StatusCode::BAD_GATEWAY => Err(NotifyError::Unavailable(response.status())), + StatusCode::BAD_REQUEST | StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => { + Err(NotifyError::Fatal(response.status())) + } + _ => Err(NotifyError::Unexpected(response.status())), + } + } + + async fn do_notify( + &self, + url: &String, + reconfigure_request: ComputeHookNotifyRequest, + cancel: &CancellationToken, + ) -> Result<(), NotifyError> { + let client = reqwest::Client::new(); + backoff::retry( + || self.do_notify_iteration(&client, url, &reconfigure_request, cancel), + |e| matches!(e, NotifyError::Fatal(_)), + 3, + 10, + "Send compute notification", + backoff::Cancel::new(cancel.clone(), || NotifyError::ShuttingDown), + ) + .await + } + + /// Call this to notify the compute (postgres) tier of new pageservers to use + /// for a tenant. notify() is called by each shard individually, and this function + /// will decide whether an update to the tenant is sent. An update is sent on the + /// condition that: + /// - We know a pageserver for every shard. + /// - All the shards have the same shard_count (i.e. we are not mid-split) + /// + /// Cancellation token enables callers to drop out, e.g. if calling from a Reconciler + /// that is cancelled. + /// + /// This function is fallible, including in the case that the control plane is transiently + /// unavailable. A limited number of retries are done internally to efficiently hide short unavailability + /// periods, but we don't retry forever. The **caller** is responsible for handling failures and + /// ensuring that they eventually call again to ensure that the compute is eventually notified of + /// the proper pageserver nodes for a tenant. + #[tracing::instrument(skip_all, fields(tenant_shard_id, node_id))] pub(super) async fn notify( &self, tenant_shard_id: TenantShardId, node_id: NodeId, - ) -> anyhow::Result<()> { - tracing::info!("ComputeHook::notify: {}->{}", tenant_shard_id, node_id); + cancel: &CancellationToken, + ) -> Result<(), NotifyError> { let mut locked = self.state.lock().await; let entry = locked .entry(tenant_shard_id.tenant_id) @@ -111,6 +292,25 @@ impl ComputeHook { entry.shards.push((shard_index, node_id)); } - entry.maybe_reconfigure(tenant_shard_id.tenant_id).await + let reconfigure_request = entry.maybe_reconfigure(tenant_shard_id.tenant_id).await; + let Some(reconfigure_request) = reconfigure_request else { + // The tenant doesn't yet have pageservers for all its shards: we won't notify anything + // until it does. + tracing::debug!("Tenant isn't yet ready to emit a notification",); + return Ok(()); + }; + + if let Some(notify_url) = &self.config.compute_hook_url { + self.do_notify(notify_url, reconfigure_request, cancel) + .await + } else { + self.do_notify_local(reconfigure_request) + .await + .map_err(|e| { + // This path is for testing only, so munge the error into our prod-style error type. + tracing::error!("Local notification hook failed: {e}"); + NotifyError::Fatal(StatusCode::INTERNAL_SERVER_ERROR) + }) + } } } diff --git a/control_plane/attachment_service/src/main.rs b/control_plane/attachment_service/src/main.rs index ed65437ba2..eda9c7aad6 100644 --- a/control_plane/attachment_service/src/main.rs +++ b/control_plane/attachment_service/src/main.rs @@ -35,9 +35,18 @@ struct Cli { public_key: Option, /// Token for authenticating this service with the pageservers it controls - #[arg(short, long)] + #[arg(long)] jwt_token: Option, + /// Token for authenticating this service with the control plane, when calling + /// the compute notification endpoint + #[arg(long)] + control_plane_jwt_token: Option, + + /// URL to control plane compute notification endpoint + #[arg(long)] + compute_hook_url: Option, + /// Path to the .json file to store state (will be created if it doesn't exist) #[arg(short, long)] path: Option, @@ -53,11 +62,15 @@ struct Secrets { database_url: String, public_key: Option, jwt_token: Option, + control_plane_jwt_token: Option, } impl Secrets { const DATABASE_URL_SECRET: &'static str = "rds-neon-storage-controller-url"; - const JWT_TOKEN_SECRET: &'static str = "neon-storage-controller-pageserver-jwt-token"; + const PAGESERVER_JWT_TOKEN_SECRET: &'static str = + "neon-storage-controller-pageserver-jwt-token"; + const CONTROL_PLANE_JWT_TOKEN_SECRET: &'static str = + "neon-storage-controller-control-plane-jwt-token"; const PUBLIC_KEY_SECRET: &'static str = "neon-storage-controller-public-key"; async fn load(args: &Cli) -> anyhow::Result { @@ -95,7 +108,7 @@ impl Secrets { let jwt_token = asm .get_secret_value() - .secret_id(Self::JWT_TOKEN_SECRET) + .secret_id(Self::PAGESERVER_JWT_TOKEN_SECRET) .send() .await? .secret_string() @@ -104,6 +117,17 @@ impl Secrets { tracing::warn!("No pageserver JWT token set: this will only work if authentication is disabled on the pageserver"); } + let control_plane_jwt_token = asm + .get_secret_value() + .secret_id(Self::CONTROL_PLANE_JWT_TOKEN_SECRET) + .send() + .await? + .secret_string() + .map(str::to_string); + if jwt_token.is_none() { + tracing::warn!("No control plane JWT token set: this will only work if authentication is disabled on the pageserver"); + } + let public_key = asm .get_secret_value() .secret_id(Self::PUBLIC_KEY_SECRET) @@ -125,6 +149,7 @@ impl Secrets { database_url, public_key, jwt_token, + control_plane_jwt_token, }) } @@ -137,6 +162,7 @@ impl Secrets { database_url: args.database_url.clone(), public_key, jwt_token: args.jwt_token.clone(), + control_plane_jwt_token: args.control_plane_jwt_token.clone(), }) } } @@ -165,6 +191,8 @@ async fn main() -> anyhow::Result<()> { let config = Config { jwt_token: secrets.jwt_token, + control_plane_jwt_token: secrets.control_plane_jwt_token, + compute_hook_url: args.compute_hook_url, }; let json_path = args.path; diff --git a/control_plane/attachment_service/src/reconciler.rs b/control_plane/attachment_service/src/reconciler.rs index d7f4c0406a..776e1f9d1e 100644 --- a/control_plane/attachment_service/src/reconciler.rs +++ b/control_plane/attachment_service/src/reconciler.rs @@ -14,7 +14,7 @@ use utils::generation::Generation; use utils::id::{NodeId, TimelineId}; use utils::lsn::Lsn; -use crate::compute_hook::ComputeHook; +use crate::compute_hook::{ComputeHook, NotifyError}; use crate::node::Node; use crate::tenant_state::{IntentState, ObservedState, ObservedStateLocation}; @@ -37,9 +37,15 @@ pub(super) struct Reconciler { pub(crate) pageservers: Arc>, /// A hook to notify the running postgres instances when we change the location - /// of a tenant + /// of a tenant. Use this via [`Self::compute_notify`] to update our failure flag + /// and guarantee eventual retries. pub(crate) compute_hook: Arc, + /// To avoid stalling if the cloud control plane is unavailable, we may proceed + /// past failures in [`ComputeHook::notify`], but we _must_ remember that we failed + /// so that we can set [`crate::tenant_state::TenantState::pending_compute_notification`] to ensure a later retry. + pub(crate) compute_notify_failure: bool, + /// A means to abort background reconciliation: it is essential to /// call this when something changes in the original TenantState that /// will make this reconciliation impossible or unnecessary, for @@ -52,7 +58,9 @@ pub(super) struct Reconciler { } #[derive(thiserror::Error, Debug)] -pub enum ReconcileError { +pub(crate) enum ReconcileError { + #[error(transparent)] + Notify(#[from] NotifyError), #[error(transparent)] Other(#[from] anyhow::Error), } @@ -317,9 +325,19 @@ impl Reconciler { } tracing::info!("🔁 Notifying compute to use pageserver {}", dest_ps_id); - self.compute_hook - .notify(self.tenant_shard_id, dest_ps_id) - .await?; + + // During a live migration it is unhelpful to proceed if we couldn't notify compute: if we detach + // the origin without notifying compute, we will render the tenant unavailable. + while let Err(e) = self.compute_notify().await { + match e { + NotifyError::Fatal(_) => return Err(anyhow::anyhow!(e)), + _ => { + tracing::warn!( + "Live migration blocked by compute notification error, retrying: {e}" + ); + } + } + } // Downgrade the origin to secondary. If the tenant's policy is PlacementPolicy::Single, then // this location will be deleted in the general case reconciliation that runs after this. @@ -400,15 +418,7 @@ impl Reconciler { wanted_conf.generation = self.generation.into(); tracing::info!("Observed configuration requires update."); self.location_config(node_id, wanted_conf, None).await?; - if let Err(e) = self - .compute_hook - .notify(self.tenant_shard_id, node_id) - .await - { - tracing::warn!( - "Failed to notify compute of newly attached pageserver {node_id}: {e}" - ); - } + self.compute_notify().await?; } } } @@ -461,6 +471,29 @@ impl Reconciler { Ok(()) } + + pub(crate) async fn compute_notify(&mut self) -> Result<(), NotifyError> { + // Whenever a particular Reconciler emits a notification, it is always notifying for the intended + // destination. + if let Some(node_id) = self.intent.attached { + let result = self + .compute_hook + .notify(self.tenant_shard_id, node_id, &self.cancel) + .await; + if let Err(e) = &result { + // It is up to the caller whether they want to drop out on this error, but they don't have to: + // in general we should avoid letting unavailability of the cloud control plane stop us from + // making progress. + tracing::warn!("Failed to notify compute of attached pageserver {node_id}: {e}"); + // Set this flag so that in our ReconcileResult we will set the flag on the shard that it + // needs to retry at some point. + self.compute_notify_failure = true; + } + result + } else { + Ok(()) + } + } } pub(crate) fn attached_location_conf( diff --git a/control_plane/attachment_service/src/service.rs b/control_plane/attachment_service/src/service.rs index 8c6a348515..6f0e3ebb74 100644 --- a/control_plane/attachment_service/src/service.rs +++ b/control_plane/attachment_service/src/service.rs @@ -12,6 +12,7 @@ use control_plane::attachment_service::{ TenantShardMigrateRequest, TenantShardMigrateResponse, }; use diesel::result::DatabaseErrorKind; +use futures::StreamExt; use hyper::StatusCode; use pageserver_api::{ control_api::{ @@ -27,6 +28,7 @@ use pageserver_api::{ shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId}, }; use pageserver_client::mgmt_api; +use tokio_util::sync::CancellationToken; use utils::{ completion::Barrier, generation::Generation, @@ -36,7 +38,7 @@ use utils::{ }; use crate::{ - compute_hook::ComputeHook, + compute_hook::{self, ComputeHook}, node::Node, persistence::{DatabaseError, NodePersistence, Persistence, TenantShardPersistence}, scheduler::Scheduler, @@ -66,6 +68,7 @@ struct ServiceState { impl ServiceState { fn new( + config: Config, result_tx: tokio::sync::mpsc::UnboundedSender, nodes: HashMap, tenants: BTreeMap, @@ -73,7 +76,7 @@ impl ServiceState { Self { tenants, nodes: Arc::new(nodes), - compute_hook: Arc::new(ComputeHook::new()), + compute_hook: Arc::new(ComputeHook::new(config)), result_tx, } } @@ -82,8 +85,17 @@ impl ServiceState { #[derive(Clone)] pub struct Config { // All pageservers managed by one instance of this service must have - // the same public key. + // the same public key. This JWT token will be used to authenticate + // this service to the pageservers it manages. pub jwt_token: Option, + + // This JWT token will be used to authenticate this service to the control plane. + pub control_plane_jwt_token: Option, + + /// Where the compute hook should send notifications of pageserver attachment locations + /// (this URL points to the control plane in prod). If this is None, the compute hook will + /// assume it is running in a test environment and try to update neon_local. + pub compute_hook_url: Option, } impl From for ApiError { @@ -163,6 +175,8 @@ impl Service { let mut cleanup = Vec::new(); + let mut compute_notifications = Vec::new(); + // Populate intent and observed states for all tenants, based on reported state on pageservers let shard_count = { let mut locked = self.inner.write().unwrap(); @@ -187,6 +201,13 @@ impl Service { // not enough pageservers are available. The tenant may well still be available // to clients. tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}"); + } else { + // If we're both intending and observed to be attached at a particular node, we will + // emit a compute notification for this. In the case where our observed state does not + // yet match our intent, we will eventually reconcile, and that will emit a compute notification. + if let Some(attached_at) = tenant_state.stably_attached() { + compute_notifications.push((*tenant_shard_id, attached_at)); + } } } @@ -235,10 +256,57 @@ impl Service { } } + // Emit compute hook notifications for all tenants which are already stably attached. Other tenants + // will emit compute hook notifications when they reconcile. + // + // Ordering: we must complete these notification attempts before doing any other reconciliation for the + // tenants named here, because otherwise our calls to notify() might race with more recent values + // generated by reconciliation. + + // Compute notify is fallible. If it fails here, do not delay overall startup: set the + // flag on these shards that they have a pending notification. + let compute_hook = self.inner.read().unwrap().compute_hook.clone(); + + // Construct an async stream of futures to invoke the compute notify function: we do this + // in order to subsequently use .buffered() on the stream to execute with bounded parallelism. + let stream = futures::stream::iter(compute_notifications.into_iter()) + .map(|(tenant_shard_id, node_id)| { + let compute_hook = compute_hook.clone(); + async move { + // TODO: give Service a cancellation token for clean shutdown + let cancel = CancellationToken::new(); + if let Err(e) = compute_hook.notify(tenant_shard_id, node_id, &cancel).await { + tracing::error!( + tenant_shard_id=%tenant_shard_id, + node_id=%node_id, + "Failed to notify compute on startup for shard: {e}" + ); + Some(tenant_shard_id) + } else { + None + } + } + }) + .buffered(compute_hook::API_CONCURRENCY); + let notify_results = stream.collect::>().await; + + // Update tenant state for any that failed to do their initial compute notify, so that they'll retry later. + { + let mut locked = self.inner.write().unwrap(); + for tenant_shard_id in notify_results.into_iter().flatten() { + if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) { + shard.pending_compute_notification = true; + } + } + } + // Finally, now that the service is up and running, launch reconcile operations for any tenants // which require it: under normal circumstances this should only include tenants that were in some - // transient state before we restarted. + // transient state before we restarted, or any tenants whose compute hooks failed above. let reconcile_tasks = self.reconcile_all(); + // We will not wait for these reconciliation tasks to run here: we're now done with startup and + // normal operations may proceed. + tracing::info!("Startup complete, spawned {reconcile_tasks} reconciliation tasks ({shard_count} shards total)"); } @@ -295,6 +363,7 @@ impl Service { waiter: Arc::new(SeqWait::new(Sequence::initial())), error_waiter: Arc::new(SeqWait::new(Sequence::initial())), last_error: Arc::default(), + pending_compute_notification: false, }; tenants.insert(tenant_shard_id, new_tenant); @@ -304,7 +373,10 @@ impl Service { let this = Arc::new(Self { inner: Arc::new(std::sync::RwLock::new(ServiceState::new( - result_tx, nodes, tenants, + config.clone(), + result_tx, + nodes, + tenants, ))), config, persistence, @@ -330,6 +402,10 @@ impl Service { // needed, but it is used to handle out-of-band updates via. e.g. test hook. tenant.generation = std::cmp::max(tenant.generation, result.generation); + // If the reconciler signals that it failed to notify compute, set this state on + // the shard so that a future [`TenantState::maybe_reconcile`] will try again. + tenant.pending_compute_notification = result.pending_compute_notification; + match result.result { Ok(()) => { for (node_id, loc) in &result.observed.locations { diff --git a/control_plane/attachment_service/src/tenant_state.rs b/control_plane/attachment_service/src/tenant_state.rs index 5290197d84..a358e1ff7b 100644 --- a/control_plane/attachment_service/src/tenant_state.rs +++ b/control_plane/attachment_service/src/tenant_state.rs @@ -71,6 +71,12 @@ pub(crate) struct TenantState { /// TODO: generalize to an array of recent events /// TOOD: use a ArcSwap instead of mutex for faster reads? pub(crate) last_error: std::sync::Arc>, + + /// If we have a pending compute notification that for some reason we weren't able to send, + /// set this to true. If this is set, calls to [`Self::maybe_reconcile`] will run a task to retry + /// sending it. This is the mechanism by which compute notifications are included in the scope + /// of state that we publish externally in an eventually consistent way. + pub(crate) pending_compute_notification: bool, } #[derive(Default, Clone, Debug)] @@ -164,6 +170,9 @@ pub(crate) struct ReconcileResult { pub(crate) tenant_shard_id: TenantShardId, pub(crate) generation: Generation, pub(crate) observed: ObservedState, + + /// Set [`TenantState::pending_compute_notification`] from this flag + pub(crate) pending_compute_notification: bool, } impl IntentState { @@ -226,6 +235,7 @@ impl TenantState { waiter: Arc::new(SeqWait::new(Sequence(0))), error_waiter: Arc::new(SeqWait::new(Sequence(0))), last_error: Arc::default(), + pending_compute_notification: false, } } @@ -333,6 +343,38 @@ impl TenantState { Ok(()) } + /// Query whether the tenant's observed state for attached node matches its intent state, and if so, + /// yield the node ID. This is appropriate for emitting compute hook notifications: we are checking that + /// the node in question is not only where we intend to attach, but that the tenant is indeed already attached there. + /// + /// Reconciliation may still be needed for other aspects of state such as secondaries (see [`Self::dirty`]): this + /// funciton should not be used to decide whether to reconcile. + pub(crate) fn stably_attached(&self) -> Option { + if let Some(attach_intent) = self.intent.attached { + match self.observed.locations.get(&attach_intent) { + Some(loc) => match &loc.conf { + Some(conf) => match conf.mode { + LocationConfigMode::AttachedMulti + | LocationConfigMode::AttachedSingle + | LocationConfigMode::AttachedStale => { + // Our intent and observed state agree that this node is in an attached state. + Some(attach_intent) + } + // Our observed config is not an attached state + _ => None, + }, + // Our observed state is None, i.e. in flux + None => None, + }, + // We have no observed state for this node + None => None, + } + } else { + // Our intent is not to attach + None + } + } + fn dirty(&self) -> bool { if let Some(node_id) = self.intent.attached { let wanted_conf = attached_location_conf(self.generation, &self.shard, &self.config); @@ -354,6 +396,12 @@ impl TenantState { } } + // Even if there is no pageserver work to be done, if we have a pending notification to computes, + // wake up a reconciler to send it. + if self.pending_compute_notification { + return true; + } + false } @@ -415,11 +463,13 @@ impl TenantState { service_config: service_config.clone(), cancel: cancel.clone(), persistence: persistence.clone(), + compute_notify_failure: false, }; let reconcile_seq = self.sequence; tracing::info!("Spawning Reconciler for sequence {}", self.sequence); + let must_notify = self.pending_compute_notification; let join_handle = tokio::task::spawn(async move { // Wait for any previous reconcile task to complete before we start if let Some(old_handle) = old_handle { @@ -438,7 +488,16 @@ impl TenantState { return; } + // Attempt to make observed state match intent state let result = reconciler.reconcile().await; + + // If we know we had a pending compute notification from some previous action, send a notification irrespective + // of whether the above reconcile() did any work + if result.is_ok() && must_notify { + // If this fails we will send the need to retry in [`ReconcileResult::pending_compute_notification`] + reconciler.compute_notify().await.ok(); + } + result_tx .send(ReconcileResult { sequence: reconcile_seq, @@ -446,6 +505,7 @@ impl TenantState { tenant_shard_id: reconciler.tenant_shard_id, generation: reconciler.generation, observed: reconciler.observed, + pending_compute_notification: reconciler.compute_notify_failure, }) .ok(); }); diff --git a/control_plane/src/attachment_service.rs b/control_plane/src/attachment_service.rs index 7816d0953b..140e5c4e34 100644 --- a/control_plane/src/attachment_service.rs +++ b/control_plane/src/attachment_service.rs @@ -457,6 +457,12 @@ impl AttachmentService { args.push(format!("--public-key={public_key_path}")); } + if let Some(control_plane_compute_hook_api) = &self.env.control_plane_compute_hook_api { + args.push(format!( + "--compute-hook-url={control_plane_compute_hook_api}" + )); + } + background_process::start_process( COMMAND, &self.env.base_data_dir, diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index d5abda729f..e56007dd20 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -795,7 +795,7 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re &endpoint.timeline_id.to_string(), branch_name, lsn_str.as_str(), - endpoint.status(), + &format!("{}", endpoint.status()), ]); } diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index dcad22b992..b19a6a1a18 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -184,7 +184,7 @@ impl ComputeControlPlane { v.tenant_id == tenant_id && v.timeline_id == timeline_id && v.mode == mode - && v.status() != "stopped" + && v.status() != EndpointStatus::Stopped }); if let Some((key, _)) = duplicates.next() { @@ -223,6 +223,26 @@ pub struct Endpoint { features: Vec, } +#[derive(PartialEq, Eq)] +pub enum EndpointStatus { + Running, + Stopped, + Crashed, + RunningNoPidfile, +} + +impl std::fmt::Display for EndpointStatus { + fn fmt(&self, writer: &mut std::fmt::Formatter) -> std::fmt::Result { + let s = match self { + Self::Running => "running", + Self::Stopped => "stopped", + Self::Crashed => "crashed", + Self::RunningNoPidfile => "running, no pidfile", + }; + write!(writer, "{}", s) + } +} + impl Endpoint { fn from_dir_entry(entry: std::fs::DirEntry, env: &LocalEnv) -> Result { if !entry.file_type()?.is_dir() { @@ -380,16 +400,16 @@ impl Endpoint { self.endpoint_path().join("pgdata") } - pub fn status(&self) -> &str { + pub fn status(&self) -> EndpointStatus { let timeout = Duration::from_millis(300); let has_pidfile = self.pgdata().join("postmaster.pid").exists(); let can_connect = TcpStream::connect_timeout(&self.pg_address, timeout).is_ok(); match (has_pidfile, can_connect) { - (true, true) => "running", - (false, false) => "stopped", - (true, false) => "crashed", - (false, true) => "running, no pidfile", + (true, true) => EndpointStatus::Running, + (false, false) => EndpointStatus::Stopped, + (true, false) => EndpointStatus::Crashed, + (false, true) => EndpointStatus::RunningNoPidfile, } } @@ -481,7 +501,7 @@ impl Endpoint { remote_ext_config: Option<&String>, shard_stripe_size: usize, ) -> Result<()> { - if self.status() == "running" { + if self.status() == EndpointStatus::Running { anyhow::bail!("The endpoint is already running"); } diff --git a/control_plane/src/local_env.rs b/control_plane/src/local_env.rs index aefef47da7..786ea6d098 100644 --- a/control_plane/src/local_env.rs +++ b/control_plane/src/local_env.rs @@ -72,11 +72,16 @@ pub struct LocalEnv { #[serde(default)] pub safekeepers: Vec, - // Control plane location: if None, we will not run attachment_service. If set, this will + // Control plane upcall API for pageserver: if None, we will not run attachment_service. If set, this will // be propagated into each pageserver's configuration. #[serde(default)] pub control_plane_api: Option, + // Control plane upcall API for attachment service. If set, this will be propagated into the + // attachment service's configuration. + #[serde(default)] + pub control_plane_compute_hook_api: Option, + /// Keep human-readable aliases in memory (and persist them to config), to hide ZId hex strings from the user. #[serde(default)] // A `HashMap>` would be more appropriate here, diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index e2a2291dbc..1e15ebe5a0 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -482,6 +482,7 @@ class NeonEnvBuilder: self.overlay_mounts_created_by_us: List[Tuple[str, Path]] = [] self.config_init_force: Optional[str] = None self.top_output_dir = top_output_dir + self.control_plane_compute_hook_api: Optional[str] = None self.pageserver_virtual_file_io_engine: Optional[str] = pageserver_virtual_file_io_engine @@ -1007,6 +1008,9 @@ class NeonEnv: # The base URL of the attachment service self.attachment_service_api: str = f"http://127.0.0.1:{self.attachment_service_port}" + # For testing this with a fake HTTP server, enable passing through a URL from config + self.control_plane_compute_hook_api = config.control_plane_compute_hook_api + self.attachment_service: NeonAttachmentService = NeonAttachmentService( self, config.auth_enabled ) @@ -1026,6 +1030,9 @@ class NeonEnv: if self.control_plane_api is not None: cfg["control_plane_api"] = self.control_plane_api + if self.control_plane_compute_hook_api is not None: + cfg["control_plane_compute_hook_api"] = self.control_plane_compute_hook_api + # Create config for pageserver http_auth_type = "NeonJWT" if config.auth_enabled else "Trust" pg_auth_type = "NeonJWT" if config.auth_enabled else "Trust" @@ -1904,7 +1911,7 @@ class Pagectl(AbstractNeonCli): class NeonAttachmentService: - def __init__(self, env: NeonEnv, auth_enabled): + def __init__(self, env: NeonEnv, auth_enabled: bool): self.env = env self.running = False self.auth_enabled = auth_enabled diff --git a/test_runner/regress/test_sharding_service.py b/test_runner/regress/test_sharding_service.py index 3b2c9334db..346df708de 100644 --- a/test_runner/regress/test_sharding_service.py +++ b/test_runner/regress/test_sharding_service.py @@ -1,14 +1,24 @@ import time from collections import defaultdict -from fixtures.neon_fixtures import ( - NeonEnvBuilder, -) +from fixtures.log_helper import log +from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import tenant_delete_wait_completed, timeline_delete_wait_completed from fixtures.pg_version import PgVersion from fixtures.types import TenantId, TimelineId from fixtures.utils import wait_until +from pytest_httpserver import HTTPServer +from werkzeug.wrappers.request import Request +from werkzeug.wrappers.response import Response + + +def get_node_shard_counts(env: NeonEnv, tenant_ids): + counts: defaultdict[str, int] = defaultdict(int) + for tid in tenant_ids: + for shard in env.attachment_service.locate(tid): + counts[shard["node_id"]] += 1 + return counts def test_sharding_service_smoke( @@ -54,14 +64,7 @@ def test_sharding_service_smoke( for tid in tenant_ids: env.neon_cli.create_tenant(tid, shard_count=shards_per_tenant) - def get_node_shard_counts(): - counts: defaultdict[str, int] = defaultdict(int) - for tid in tenant_ids: - for shard in env.attachment_service.locate(tid): - counts[shard["node_id"]] += 1 - return counts - - for node_id, count in get_node_shard_counts().items(): + for node_id, count in get_node_shard_counts(env, tenant_ids).items(): # we used a multiple of pagservers for the total shard count, # so expect equal number on all pageservers assert count == tenant_shard_count / len( @@ -89,7 +92,7 @@ def test_sharding_service_smoke( env.attachment_service.node_configure(env.pageservers[0].id, {"availability": "Offline"}) def node_evacuated(node_id: int): - counts = get_node_shard_counts() + counts = get_node_shard_counts(env, tenant_ids) assert counts[node_id] == 0 wait_until(10, 1, lambda: node_evacuated(env.pageservers[0].id)) @@ -98,7 +101,7 @@ def test_sharding_service_smoke( # immediately env.attachment_service.node_configure(env.pageservers[0].id, {"availability": "Active"}) time.sleep(1) - assert get_node_shard_counts()[env.pageservers[0].id] == 0 + assert get_node_shard_counts(env, tenant_ids)[env.pageservers[0].id] == 0 # Delete all the tenants for tid in tenant_ids: @@ -113,7 +116,7 @@ def test_sharding_service_smoke( for tid in tenant_ids: env.neon_cli.create_tenant(tid, shard_count=shards_per_tenant) - counts = get_node_shard_counts() + counts = get_node_shard_counts(env, tenant_ids) # Nothing should have been scheduled on the node in Draining assert counts[env.pageservers[1].id] == 0 assert counts[env.pageservers[0].id] == tenant_shard_count // 2 @@ -270,3 +273,73 @@ def test_sharding_service_onboarding( # The onboarded tenant should surviev a restart of pageserver dest_ps.stop() dest_ps.start() + + +def test_sharding_service_compute_hook( + httpserver: HTTPServer, + neon_env_builder: NeonEnvBuilder, + httpserver_listen_address, +): + """ + Test that the sharding service calls out to the configured HTTP endpoint on attachment changes + """ + + # We will run two pageserver to migrate and check that the attachment service sends notifications + # when migrating. + neon_env_builder.num_pageservers = 2 + (host, port) = httpserver_listen_address + neon_env_builder.control_plane_compute_hook_api = f"http://{host}:{port}/notify" + + # Set up fake HTTP notify endpoint + notifications = [] + + def handler(request: Request): + log.info(f"Notify request: {request}") + notifications.append(request.json) + return Response(status=200) + + httpserver.expect_request("/notify", method="POST").respond_with_handler(handler) + + # Start running + env = neon_env_builder.init_start() + + # We will to an unclean migration, which will result in deletion queue warnings + env.pageservers[0].allowed_errors.append(".*Dropped remote consistent LSN updates for tenant.*") + + # Initial notification from tenant creation + assert len(notifications) == 1 + expect = { + "tenant_id": str(env.initial_tenant), + "shards": [{"node_id": int(env.pageservers[0].id), "shard_number": 0}], + } + + env.attachment_service.node_configure(env.pageservers[0].id, {"availability": "Offline"}) + + def node_evacuated(node_id: int): + counts = get_node_shard_counts(env, [env.initial_tenant]) + assert counts[node_id] == 0 + + wait_until(10, 1, lambda: node_evacuated(env.pageservers[0].id)) + + # Additional notification from migration + log.info(f"notifications: {notifications}") + expect = { + "tenant_id": str(env.initial_tenant), + "shards": [{"node_id": int(env.pageservers[1].id), "shard_number": 0}], + } + + def received_migration_notification(): + assert len(notifications) == 2 + assert notifications[1] == expect + + wait_until(20, 0.25, received_migration_notification) + + # When we restart, we should re-emit notifications for all tenants + env.attachment_service.stop() + env.attachment_service.start() + + def received_restart_notification(): + assert len(notifications) == 3 + assert notifications[1] == expect + + wait_until(10, 1, received_restart_notification) From c9876b099397c7b990a7d359dcc0fa3b9dade926 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Fri, 2 Feb 2024 21:49:11 +0200 Subject: [PATCH 03/34] Fix double-free bug in walredo process. (#6534) At the end of ApplyRecord(), we called pfree on the decoded record, if it was "oversized". However, we had alread linked it to the "decode queue" list in XLogReaderState. If we later called XLogBeginRead(), it called ResetDecoder and tried to free the same record again. The conditions to hit this are: - a large WAL record (larger than aboue 64 kB I think, per DEFAULT_DECODE_BUFFER_SIZE), and - another WAL record processed by the same WAL redo process after the large one. I think the reason we haven't seen this earlier is that you don't get WAL records that large that are sent to the WAL redo process, except when logical replication is enabled. Logical replication adds data to the WAL records, making them larger. To fix, allocate the buffer ourselves, and don't link it to the decode queue. Alternatively, we could perhaps have just removed the pfree(), but frankly I'm a bit scared about the whole queue thing. --- pgxn/neon_walredo/walredoproc.c | 48 +++++++--------- .../regress/test_logical_replication.py | 57 +++++++++++++++++++ 2 files changed, 78 insertions(+), 27 deletions(-) diff --git a/pgxn/neon_walredo/walredoproc.c b/pgxn/neon_walredo/walredoproc.c index 7ca4fe93df..6ca0b2a274 100644 --- a/pgxn/neon_walredo/walredoproc.c +++ b/pgxn/neon_walredo/walredoproc.c @@ -804,6 +804,9 @@ ApplyRecord(StringInfo input_message) ErrorContextCallback errcallback; #if PG_VERSION_NUM >= 150000 DecodedXLogRecord *decoded; +#define STATIC_DECODEBUF_SIZE (64 * 1024) + static char *static_decodebuf = NULL; + size_t required_space; #endif /* @@ -833,7 +836,19 @@ ApplyRecord(StringInfo input_message) XLogBeginRead(reader_state, lsn); #if PG_VERSION_NUM >= 150000 - decoded = (DecodedXLogRecord *) XLogReadRecordAlloc(reader_state, record->xl_tot_len, true); + /* + * For reasonably small records, reuse a fixed size buffer to reduce + * palloc overhead. + */ + required_space = DecodeXLogRecordRequiredSpace(record->xl_tot_len); + if (required_space <= STATIC_DECODEBUF_SIZE) + { + if (static_decodebuf == NULL) + static_decodebuf = MemoryContextAlloc(TopMemoryContext, STATIC_DECODEBUF_SIZE); + decoded = (DecodedXLogRecord *) static_decodebuf; + } + else + decoded = palloc(required_space); if (!DecodeXLogRecord(reader_state, decoded, record, lsn, &errormsg)) elog(ERROR, "failed to decode WAL record: %s", errormsg); @@ -842,37 +857,15 @@ ApplyRecord(StringInfo input_message) /* Record the location of the next record. */ decoded->next_lsn = reader_state->NextRecPtr; - /* - * If it's in the decode buffer, mark the decode buffer space as - * occupied. - */ - if (!decoded->oversized) - { - /* The new decode buffer head must be MAXALIGNed. */ - Assert(decoded->size == MAXALIGN(decoded->size)); - if ((char *) decoded == reader_state->decode_buffer) - reader_state->decode_buffer_tail = reader_state->decode_buffer + decoded->size; - else - reader_state->decode_buffer_tail += decoded->size; - } - - /* Insert it into the queue of decoded records. */ - Assert(reader_state->decode_queue_tail != decoded); - if (reader_state->decode_queue_tail) - reader_state->decode_queue_tail->next = decoded; - reader_state->decode_queue_tail = decoded; - if (!reader_state->decode_queue_head) - reader_state->decode_queue_head = decoded; - /* * Update the pointers to the beginning and one-past-the-end of this * record, again for the benefit of historical code that expected the * decoder to track this rather than accessing these fields of the record * itself. */ - reader_state->record = reader_state->decode_queue_head; - reader_state->ReadRecPtr = reader_state->record->lsn; - reader_state->EndRecPtr = reader_state->record->next_lsn; + reader_state->record = decoded; + reader_state->ReadRecPtr = decoded->lsn; + reader_state->EndRecPtr = decoded->next_lsn; } #else /* @@ -912,8 +905,9 @@ ApplyRecord(StringInfo input_message) elog(TRACE, "applied WAL record with LSN %X/%X", (uint32) (lsn >> 32), (uint32) lsn); + #if PG_VERSION_NUM >= 150000 - if (decoded && decoded->oversized) + if ((char *) decoded != static_decodebuf) pfree(decoded); #endif } diff --git a/test_runner/regress/test_logical_replication.py b/test_runner/regress/test_logical_replication.py index 51e358e60d..059ddf79ec 100644 --- a/test_runner/regress/test_logical_replication.py +++ b/test_runner/regress/test_logical_replication.py @@ -1,4 +1,6 @@ import time +from random import choice +from string import ascii_lowercase import pytest from fixtures.log_helper import log @@ -11,6 +13,10 @@ from fixtures.types import Lsn from fixtures.utils import query_scalar +def random_string(n: int): + return "".join([choice(ascii_lowercase) for _ in range(n)]) + + def test_logical_replication(neon_simple_env: NeonEnv, vanilla_pg): env = neon_simple_env @@ -238,6 +244,57 @@ def test_wal_page_boundary_start(neon_simple_env: NeonEnv, vanilla_pg): ) == endpoint.safe_psql("select sum(somedata) from replication_example") +# Test that WAL redo works for fairly large records. +# +# See https://github.com/neondatabase/neon/pull/6534. That wasn't a +# logical replication bug as such, but without logical replication, +# records passed ot the WAL redo process are never large enough to hit +# the bug. +def test_large_records(neon_simple_env: NeonEnv, vanilla_pg): + env = neon_simple_env + + env.neon_cli.create_branch("init") + endpoint = env.endpoints.create_start("init") + + cur = endpoint.connect().cursor() + cur.execute("CREATE TABLE reptbl(id int, largeval text);") + cur.execute("alter table reptbl replica identity full") + cur.execute("create publication pub1 for table reptbl") + + # now start subscriber + vanilla_pg.start() + vanilla_pg.safe_psql("CREATE TABLE reptbl(id int, largeval text);") + + log.info(f"ep connstr is {endpoint.connstr()}, subscriber connstr {vanilla_pg.connstr()}") + connstr = endpoint.connstr().replace("'", "''") + vanilla_pg.safe_psql(f"create subscription sub1 connection '{connstr}' publication pub1") + + # Test simple insert, update, delete. But with very large values + value = random_string(10_000_000) + cur.execute(f"INSERT INTO reptbl VALUES (1, '{value}')") + logical_replication_sync(vanilla_pg, endpoint) + assert vanilla_pg.safe_psql("select id, largeval from reptbl") == [(1, value)] + + # Test delete, and reinsert another value + cur.execute("DELETE FROM reptbl WHERE id = 1") + cur.execute(f"INSERT INTO reptbl VALUES (2, '{value}')") + logical_replication_sync(vanilla_pg, endpoint) + assert vanilla_pg.safe_psql("select id, largeval from reptbl") == [(2, value)] + + value = random_string(10_000_000) + cur.execute(f"UPDATE reptbl SET largeval='{value}'") + logical_replication_sync(vanilla_pg, endpoint) + assert vanilla_pg.safe_psql("select id, largeval from reptbl") == [(2, value)] + + endpoint.stop() + endpoint.start() + cur = endpoint.connect().cursor() + value = random_string(10_000_000) + cur.execute(f"UPDATE reptbl SET largeval='{value}'") + logical_replication_sync(vanilla_pg, endpoint) + assert vanilla_pg.safe_psql("select id, largeval from reptbl") == [(2, value)] + + # # Check that slots are not inherited in brnach # From 2fd8e24c8ff300dc9e640c8765a0311307871e7d Mon Sep 17 00:00:00 2001 From: Sasha Krassovsky Date: Fri, 2 Feb 2024 12:32:40 -0900 Subject: [PATCH 04/34] Switch sleeps to wait_until (#6575) ## Problem I didn't know about `wait_until` and was relying on `sleep` to wait for stuff. This caused some tests to be flaky. https://github.com/neondatabase/neon/issues/6561 ## Summary of changes Switch to `wait_until`, this should make it tests less flaky --- test_runner/fixtures/neon_fixtures.py | 14 ++++++++++++++ test_runner/regress/test_migrations.py | 12 ++++++++---- test_runner/regress/test_neon_superuser.py | 19 ++++++++++--------- 3 files changed, 32 insertions(+), 13 deletions(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 1e15ebe5a0..5ce2fca820 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3130,6 +3130,20 @@ class Endpoint(PgProtocol): log.info(json.dumps(dict(data_dict, **kwargs))) json.dump(dict(data_dict, **kwargs), file, indent=4) + # Please note: if you didn't respec this endpoint to have the `migrations` + # feature, this function will probably fail because neon_migration.migration_id + # won't exist. This is temporary - soon we'll get rid of the feature flag and + # migrations will be enabled for everyone. + def wait_for_migrations(self): + with self.cursor() as cur: + + def check_migrations_done(): + cur.execute("SELECT id FROM neon_migration.migration_id") + migration_id = cur.fetchall()[0][0] + assert migration_id != 0 + + wait_until(20, 0.5, check_migrations_done) + # Mock the extension part of spec passed from control plane for local testing # endpooint.rs adds content of this file as a part of the spec.json def create_remote_extension_spec(self, spec: dict[str, Any]): diff --git a/test_runner/regress/test_migrations.py b/test_runner/regress/test_migrations.py index dee22f9b48..30dd54a8c1 100644 --- a/test_runner/regress/test_migrations.py +++ b/test_runner/regress/test_migrations.py @@ -13,12 +13,14 @@ def test_migrations(neon_simple_env: NeonEnv): endpoint.respec(skip_pg_catalog_updates=False, features=["migrations"]) endpoint.start() - time.sleep(1) # Sleep to let migrations run + endpoint.wait_for_migrations() + + num_migrations = 3 with endpoint.cursor() as cur: cur.execute("SELECT id FROM neon_migration.migration_id") migration_id = cur.fetchall() - assert migration_id[0][0] == 3 + assert migration_id[0][0] == num_migrations with open(log_path, "r") as log_file: logs = log_file.read() @@ -26,11 +28,13 @@ def test_migrations(neon_simple_env: NeonEnv): endpoint.stop() endpoint.start() - time.sleep(1) # Sleep to let migrations run + # We don't have a good way of knowing that the migrations code path finished executing + # in compute_ctl in the case that no migrations are being run + time.sleep(1) with endpoint.cursor() as cur: cur.execute("SELECT id FROM neon_migration.migration_id") migration_id = cur.fetchall() - assert migration_id[0][0] == 3 + assert migration_id[0][0] == num_migrations with open(log_path, "r") as log_file: logs = log_file.read() diff --git a/test_runner/regress/test_neon_superuser.py b/test_runner/regress/test_neon_superuser.py index 8b9eb1d9c4..eff2cadabf 100644 --- a/test_runner/regress/test_neon_superuser.py +++ b/test_runner/regress/test_neon_superuser.py @@ -1,8 +1,7 @@ -import time - from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnv from fixtures.pg_version import PgVersion +from fixtures.utils import wait_until def test_neon_superuser(neon_simple_env: NeonEnv, pg_version: PgVersion): @@ -19,7 +18,8 @@ def test_neon_superuser(neon_simple_env: NeonEnv, pg_version: PgVersion): sub.respec(skip_pg_catalog_updates=False, features=["migrations"]) sub.start() - time.sleep(1) # Sleep to let migrations run + pub.wait_for_migrations() + sub.wait_for_migrations() with pub.cursor() as cur: cur.execute( @@ -68,10 +68,11 @@ def test_neon_superuser(neon_simple_env: NeonEnv, pg_version: PgVersion): with pub.cursor(dbname="neondb", user="mr_whiskers", password="cat") as pcur: pcur.execute("INSERT INTO t VALUES (30), (40)") - time.sleep(1) # Give the change time to propagate + def check_that_changes_propagated(): + cur.execute("SELECT * FROM t") + res = cur.fetchall() + log.info(res) + assert len(res) == 4 + assert [r[0] for r in res] == [10, 20, 30, 40] - cur.execute("SELECT * FROM t") - res = cur.fetchall() - log.info(res) - assert len(res) == 4 - assert [r[0] for r in res] == [10, 20, 30, 40] + wait_until(10, 0.5, check_that_changes_propagated) From f2aa96f003e4ea59acc5161d7ee708f233dc13db Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Fri, 2 Feb 2024 21:41:55 +0000 Subject: [PATCH 05/34] Console split RFC (#1997) [Rendered](https://github.com/neondatabase/neon/blob/rfc-console-split/docs/rfcs/017-console-split.md) Co-authored-by: Stas Kelvich --- docs/rfcs/017-console-split.md | 420 +++++++++++++++++++++++++++++++++ 1 file changed, 420 insertions(+) create mode 100644 docs/rfcs/017-console-split.md diff --git a/docs/rfcs/017-console-split.md b/docs/rfcs/017-console-split.md new file mode 100644 index 0000000000..8036920610 --- /dev/null +++ b/docs/rfcs/017-console-split.md @@ -0,0 +1,420 @@ +# Splitting cloud console + +Created on 17.06.2022 + +## Summary + +Currently we have `cloud` repository that contains code implementing public API for our clients as well as code for managing storage and internal infrastructure services. We can split everything user-related from everything storage-related to make it easier to test and maintain. + +This RFC proposes to introduce a new control-plane service with HTTP API. The overall architecture will look like this: + +```markup +. x + external area x internal area + (our clients) x (our services) + x + x ┌───────────────────────┐ + x ┌───────────────┐ > ┌─────────────────────┐ │ Storage (EC2) │ + x │ console db │ > │ control-plane db │ │ │ + x └───────────────┘ > └─────────────────────┘ │ - safekeepers │ + x ▲ > ▲ │ - pageservers │ + x │ > │ │ │ +┌──────────────────┐ x ┌───────┴───────┐ > │ │ Dependencies │ +│ browser UI ├──►│ │ > ┌──────────┴──────────┐ │ │ +└──────────────────┘ x │ │ > │ │ │ - etcd │ + x │ console ├───────►│ control-plane ├────►│ - S3 │ +┌──────────────────┐ x │ │ > │ (deployed in k8s) │ │ - more? │ +│public API clients├──►│ │ > │ │ │ │ +└──────────────────┘ x └───────┬───────┘ > └──────────┬──────────┘ └───────────────────────┘ + x │ > ▲ │ ▲ + x │ > │ │ │ + x ┌───────┴───────┐ > │ │ ┌───────────┴───────────┐ + x │ dependencies │ > │ │ │ │ + x │- analytics │ > │ └───────────────►│ computes │ + x │- auth │ > │ │ (deployed in k8s) │ + x │- billing │ > │ │ │ + x └───────────────┘ > │ └───────────────────────┘ + x > │ ▲ + x > ┌─────┴───────────────┐ │ +┌──────────────────┐ x > │ │ │ +│ │ x > │ proxy ├─────────────────┘ +│ postgres ├───────────────────────────►│ (deployed in k8s) │ +│ users │ x > │ │ +│ │ x > └─────────────────────┘ +└──────────────────┘ x > + > + > + closed-source > open-source + > + > +``` + +Notes: + +- diagram is simplified in the less-important places +- directed arrows are strict and mean that connections in the reverse direction are forbidden + +This split is quite complex and this RFC proposes several smaller steps to achieve the larger goal: + +1. Start by refactoring the console code, the goal is to have console and control-plane code in the different directories without dependencies on each other. +2. Do similar refactoring for tables in the console database, remove queries selecting data from both console and control-plane; move control-plane tables to a separate database. +3. Implement control-plane HTTP API serving on a separate TCP port; make all console→control-plane calls to go through that HTTP API. +4. Move control-plane source code to the neon repo; start control-plane as a separate service. + +## Motivation + +These are the two most important problems we want to solve: + +- Publish open-source implementation of all our cloud/storage features +- Make a unified control-plane that is used in all cloud (serverless) and local (tests) setups + +Right now we have some closed-source code in the cloud repo. That code contains implementation for running Neon computes in k8s and without that code it’s impossible to automatically scale PostgreSQL computes. That means that we don’t have an open-source serverless PostgreSQL at the moment. + +After splitting and open-sourcing control-plane service we will have source code and Docker images for all storage services. That control-plane service should have HTTP API for creating and managing tenants (including all our storage features), while proxy will listen for incoming connections and create computes on-demand. + +Improving our test suite is an important task, but requires a lot of prerequisites and may require a separate RFC. Possible implementation of that is described in the section [Next steps](#next-steps). + +Another piece of motivation can be a better involvement of storage development team into a control-plane. By splitting control-plane from the console, it can be more convenient to test and develop control-plane with paying less attention to “business” features, such as user management, billing and analytics. + +For example, console currently requires authentication providers such as GitHub OAuth to work at all, as well as nodejs to be able to build it locally. It will be more convenient to build and run it locally without these requirements. + +## Proposed implementation + +### Current state of things + +Let’s start with defining the current state of things at the moment of this proposal. We have three repositories containing source code: + +- open-source `postgres` — our fork of postgres +- open-source `neon` — our main repository for storage source code +- closed-source `cloud` — mostly console backend and UI frontend + +This proposal aims not to change anything at the existing code in `neon` and `postgres` repositories, but to create control-plane service and move it’s source code from `cloud` to the `neon` repository. That means that we need to split code in `cloud` repo only, and will consider only this repository for exploring its source code. + +Let’s look at the miscellaneous things in the `cloud` repo which are NOT part of the console application, i.e. NOT the Go source code that is compiled to the `./console` binary. There we have: + +- command-line tools, such as cloudbench, neonadmin +- markdown documentation +- cloud operations scripts (helm, terraform, ansible) +- configs and other things +- e2e python tests +- incidents playbooks +- UI frontend +- Make build scripts, code generation scripts +- database migrations +- swagger definitions + +And also let’s take a look at what we have in the console source code, which is the service we’d like to split: + +- API Servers + - Public API v2 + - Management API v2 + - Public API v1 + - Admin API v1 (same port as Public API v1) + - Management API v1 +- Workers + - Monitor Compute Activity + - Watch Failed Operations + - Availability Checker + - Business Metrics Collector +- Internal Services + - Auth Middleware, UserIsAdmin, Cookies + - Cable Websocket Server + - Admin Services + - Global Settings, Operations, Pageservers, Platforms, Projects, Safekeepers, Users + - Authenticate Proxy + - API Keys + - App Controller, serving UI HTML + - Auth Controller + - Branches + - Projects + - Psql Connect + Passwordless login + - Users + - Cloud Metrics + - User Metrics + - Invites + - Pageserver/Safekeeper management + - Operations, k8s/docker/common logic + - Platforms, Regions + - Project State + - Projects Roles, SCRAM + - Global Settings +- Other things + - segment analytics integration + - sentry integration + - other common utilities packages + +### Drawing the splitting line + +The most challenging and the most important thing is to define the line that will split new control-plane service from the existing cloud service. If we don’t get it right, then we can end up with having a lot more issues without many benefits. + +We propose to define that line as follows: + +- everything user-related stays in the console service +- everything storage-related should be in the control-plane service +- something that falls in between should be decided where to go, but most likely should stay in the console service +- some similar parts should be in both services, such as admin/management/db_migrations + +We call user-related all requests that can be connected to some user. The general idea is don’t have any user_id in the control-plane service and operate exclusively on tenant_id+timeline_id, the same way as existing storage services work now (compute, safekeeper, pageserver). + +Storage-related things can be defined as doing any of the following: + +- using k8s API +- doing requests to any of the storage services (proxy, compute, safekeeper, pageserver, etc..) +- tracking current status of tenants/timelines, managing lifetime of computes + +Based on that idea, we can say that new control-plane service should have the following components: + +- single HTTP API for everything + - Create and manage tenants and timelines + - Manage global settings and storage configuration (regions, platforms, safekeepers, pageservers) + - Admin API for storage health inspection and debugging +- Workers + - Monitor Compute Activity + - Watch Failed Operations + - Availability Checker +- Internal Services + - Admin Services + - Global Settings, Operations, Pageservers, Platforms, Tenants, Safekeepers + - Authenticate Proxy + - Branches + - Psql Connect + - Cloud Metrics + - Pageserver/Safekeeper management + - Operations, k8s/docker/common logic + - Platforms, Regions + - Tenant State + - Compute Roles, SCRAM + - Global Settings + +--- + +And other components should probably stay in the console service: + +- API Servers (no changes here) + - Public API v2 + - Management API v2 + - Public API v1 + - Admin API v1 (same port as Public API v1) + - Management API v1 +- Workers + - Business Metrics Collector +- Internal Services + - Auth Middleware, UserIsAdmin, Cookies + - Cable Websocket Server + - Admin Services + - Users admin stays the same + - Other admin services can redirect requests to the control-plane + - API Keys + - App Controller, serving UI HTML + - Auth Controller + - Projects + - User Metrics + - Invites + - Users + - Passwordless login +- Other things + - segment analytics integration + - sentry integration + - other common utilities packages + +There are also miscellaneous things that are useful for all kinds of services. So we can say that these things can be in both services: + +- markdown documentation +- e2e python tests +- make build scripts, code generation scripts +- database migrations +- swagger definitions + +The single entrypoint to the storage should be control-plane API. After we define that API, we can have code-generated implementation for the client and for the server. The general idea is to move code implementing storage components from the console to the API implementation inside the new control-plane service. + +After the code is moved to the new service, we can fill the created void by making API calls to the new service: + +- authorization of the client +- mapping user_id + project_id to the tenant_id +- calling the control-plane API + +### control-plane API + +Currently we have the following projects API in the console: + +``` +GET /projects/{project_id} +PATCH /projects/{project_id} +POST /projects/{project_id}/branches +GET /projects/{project_id}/databases +POST /projects/{project_id}/databases +GET /projects/{project_id}/databases/{database_id} +PUT /projects/{project_id}/databases/{database_id} +DELETE /projects/{project_id}/databases/{database_id} +POST /projects/{project_id}/delete +GET /projects/{project_id}/issue_token +GET /projects/{project_id}/operations +GET /projects/{project_id}/operations/{operation_id} +POST /projects/{project_id}/query +GET /projects/{project_id}/roles +POST /projects/{project_id}/roles +GET /projects/{project_id}/roles/{role_name} +DELETE /projects/{project_id}/roles/{role_name} +POST /projects/{project_id}/roles/{role_name}/reset_password +POST /projects/{project_id}/start +POST /projects/{project_id}/stop +POST /psql_session/{psql_session_id} +``` + +It looks fine and we probably already have clients relying on it. So we should not change it, at least for now. But most of these endpoints (if not all) are related to storage, and it can suggest us what control-plane API should look like: + +``` +GET /tenants/{tenant_id} +PATCH /tenants/{tenant_id} +POST /tenants/{tenant_id}/branches +GET /tenants/{tenant_id}/databases +POST /tenants/{tenant_id}/databases +GET /tenants/{tenant_id}/databases/{database_id} +PUT /tenants/{tenant_id}/databases/{database_id} +DELETE /tenants/{tenant_id}/databases/{database_id} +POST /tenants/{tenant_id}/delete +GET /tenants/{tenant_id}/issue_token +GET /tenants/{tenant_id}/operations +GET /tenants/{tenant_id}/operations/{operation_id} +POST /tenants/{tenant_id}/query +GET /tenants/{tenant_id}/roles +POST /tenants/{tenant_id}/roles +GET /tenants/{tenant_id}/roles/{role_name} +DELETE /tenants/{tenant_id}/roles/{role_name} +POST /tenants/{tenant_id}/roles/{role_name}/reset_password +POST /tenants/{tenant_id}/start +POST /tenants/{tenant_id}/stop +POST /psql_session/{psql_session_id} +``` + +One of the options here is to use gRPC instead of the HTTP, which has some useful features, but there are some strong points towards using plain HTTP: + +- HTTP API is easier to use for the clients +- we already have HTTP API in pageserver/safekeeper/console +- we probably want control-plane API to be similar to the console API, available in the cloud + +### Getting updates from the storage + +There can be some valid cases, when we would like to know what is changed in the storage. For example, console might want to know when user has queried and started compute and when compute was scaled to zero after that, to know how much user should pay for the service. Another example is to get info about reaching the disk space limits. Yet another example is to do analytics, such as how many users had at least one active project in a month. + +All of the above cases can happen without using the console, just by accessing compute through the proxy. + +To solve this, we can have a log of events occurring in the storage (event logs). That is very similar to operations table we have right now, the only difference is that events are immutable and we cannot change them after saving to the database. For example, we might want to have events for the following activities: + +- We finished processing some HTTP API query, such as resetting the password +- We changed some state, such as started or stopped a compute +- Operation is created +- Operation is started for the first time +- Operation is failed for the first time +- Operation is finished + +Once we save these events to the database, we can create HTTP API to subscribe to these events. That API can look like this: + +``` +GET /events/ + +{ + "events": [...], + "next_cursor": 123 +} +``` + +It should be possible to replay event logs from some point of time, to get a state of almost anything from the storage services. That means that if we maintain some state in the control-plane database and we have a reason to have the same state in the console database, it is possible by polling events from the control-plane API and changing the state in the console database according to the events. + +### Next steps + +After implementing control-plane HTTP API and starting control-plane as a separate service, we might want to think of exploiting benefits of the new architecture, such as reorganizing test infrastructure. Possible options are listed in the [Next steps](#next-steps-1). + +## Non Goals + +RFC doesn’t cover the actual cloud deployment scripts and schemas, such as terraform, ansible, k8s yaml’s and so on. + +## Impacted components + +Mostly console, but can also affect some storage service. + +## Scalability + +We should support starting several instances of the new control-plane service at the same time. + +At the same time, it should be possible to use only single instance of control-plane, which can be useful for local tests. + +## Security implications + +New control-plane service is an internal service, so no external requests can reach it. But at the same time, it contains API to do absolutely anything with any of the tenants. That means that bad internal actor can potentially read and write all of the tenants. To make this safer, we can have one of these: + +- Simple option is to protect all requests with a single private key, so that no one can make requests without having that one key. +- Another option is to have a separate token for every tenant and store these tokens in another secure place. This way it’s harder to access all tenants at once, because they have the different tokens. + +## Alternative implementation + +There was an idea to create a k8s operator for managing storage services and computes, but author of this RFC is not really familiar with it. + +Regarding less alternative ideas, there are another options for the name of the new control-plane service: + +- storage-ctl +- cloud +- cloud-ctl + +## Pros/cons of proposed approaches (TODO) + +Pros: + +- All storage features are completely open-source +- Better tests coverage, less difference between cloud and local setups +- Easier to develop storage and cloud features, because there is no need to setup console for that +- Easier to deploy storage-only services to the any cloud + +Cons: + +- All storage features are completely open-source +- Distributed services mean more code to connect different services and potential network issues +- Console needs to have a dependency on storage API, there can be complications with developing new feature in a branch +- More code to JOIN data from different services (console and control-plane) + +## Definition of Done + +We have a new control-plane service running in the k8s. Source code for that control-plane service is located in the open-source neon repo. + +## Next steps + +After we’ve reached DoD, we can make further improvements. + +First thing that can benefit from the split is local testing. The same control-plane service can implement starting computes as a local processes instead of k8s deployments. If it will also support starting pageservers/safekeepers/proxy for the local setup, then it can completely replace `./neon_local` binary, which is currently used for testing. The local testing environment can look like this: + +``` +┌─────────────────────┐ ┌───────────────────────┐ +│ │ │ Storage (local) │ +│ control-plane db │ │ │ +│ (local process) │ │ - safekeepers │ +│ │ │ - pageservers │ +└──────────▲──────────┘ │ │ + │ │ Dependencies │ +┌──────────┴──────────┐ │ │ +│ │ │ - etcd │ +│ control-plane ├────►│ - S3 │ +│ (local process) │ │ - more? │ +│ │ │ │ +└──────────┬──────────┘ └───────────────────────┘ + ▲ │ ▲ + │ │ │ + │ │ ┌───────────┴───────────┐ + │ │ │ │ + │ └───────────────►│ computes │ + │ │ (local processes) │ + │ │ │ +┌──────┴──────────────┐ └───────────────────────┘ +│ │ ▲ +│ proxy │ │ +│ (local process) ├─────────────────┘ +│ │ +└─────────────────────┘ +``` + +The key thing here is that control-plane local service have the same API and almost the same implementation as the one deployed in the k8s. This allows to run the same e2e tests against both cloud and local setups. + +For the python test_runner tests everything can stay mostly the same. To do that, we just need to replace `./neon_local` cli commands with API calls to the control-plane. + +The benefit here will be in having fast local tests that are really close to our cloud setup. Bugs in k8s queries are still cannot be found when running computes as a local processes, but it should be really easy to start k8s locally (for example in k3s) and run the same tests with control-plane connected to the local k8s. + +Talking about console and UI tests, after the split there should be a way to test these without spinning up all the storage locally. New control-plane service has a well-defined API, allowing us to mock it. This way we can create UI tests to verify the right calls are issued after specific UI interactions and verify that we render correct messages when API returns errors. \ No newline at end of file From d820d64e382f052ba92a736557da47728be8aa90 Mon Sep 17 00:00:00 2001 From: Em Sharnoff Date: Fri, 2 Feb 2024 14:39:20 -0800 Subject: [PATCH 06/34] Bump vm-builder v0.21.0 -> v0.23.2 (#6480) Relevant changes were all from v0.23.0: - neondatabase/autoscaling#724 - neondatabase/autoscaling#726 - neondatabase/autoscaling#732 Co-authored-by: Alexander Bayandin --- .github/workflows/build_and_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 201c77f138..2d7edf2e22 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -872,7 +872,7 @@ jobs: run: shell: sh -eu {0} env: - VM_BUILDER_VERSION: v0.21.0 + VM_BUILDER_VERSION: v0.23.2 steps: - name: Checkout From 0ac2606c8ac0b09859ce6b6a32e9e97066de0130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Fri, 2 Feb 2024 23:45:57 +0100 Subject: [PATCH 07/34] S3 restore test: Use a workaround to enable moto's self-copy support (#6594) While working on https://github.com/getmoto/moto/pull/7303 I discovered that if you enable bucket encryption, moto allows self-copies. So we can un-ignore the test. I tried it out locally, it works great. Followup of #6533, part of https://github.com/neondatabase/cloud/issues/8233 --- test_runner/fixtures/pageserver/utils.py | 18 +++++++++++++++++- test_runner/regress/test_s3_restore.py | 2 -- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index 4cfdee6e01..c2281ae25a 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -356,10 +356,26 @@ def enable_remote_storage_versioning( """ Enable S3 versioning for the remote storage """ - # local_fs has no + # local_fs has no support for versioning assert isinstance(remote, S3Storage), "localfs is currently not supported" assert remote.client is not None + # The SDK supports enabling versioning on normal S3 as well but we don't want to change + # these settings from a test in a live bucket (also, our access isn't enough nor should it be) + assert not remote.real, "Enabling storage versioning only supported on Mock S3" + + # Workaround to enable self-copy until upstream bug is fixed: https://github.com/getmoto/moto/issues/7300 + remote.client.put_bucket_encryption( + Bucket=remote.bucket_name, + ServerSideEncryptionConfiguration={ + "Rules": [ + { + "ApplyServerSideEncryptionByDefault": {"SSEAlgorithm": "AES256"}, + "BucketKeyEnabled": False, + }, + ] + }, + ) # Note that this doesnt use pagination, so list is not guaranteed to be exhaustive. response = remote.client.put_bucket_versioning( Bucket=remote.bucket_name, diff --git a/test_runner/regress/test_s3_restore.py b/test_runner/regress/test_s3_restore.py index 188d8a3b33..aaa33f0bcb 100644 --- a/test_runner/regress/test_s3_restore.py +++ b/test_runner/regress/test_s3_restore.py @@ -1,7 +1,6 @@ import time from datetime import datetime, timezone -import pytest from fixtures.neon_fixtures import ( NeonEnvBuilder, PgBin, @@ -32,7 +31,6 @@ def test_tenant_s3_restore( remote_storage = neon_env_builder.pageserver_remote_storage assert remote_storage, "remote storage not configured" enable_remote_storage_versioning(remote_storage) - pytest.skip("moto doesn't support self-copy: https://github.com/getmoto/moto/issues/7300") env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG) env.pageserver.allowed_errors.extend( From 3d1b08496a066a1784b179bfee6cb41b6ac56aeb Mon Sep 17 00:00:00 2001 From: Clarence Date: Sat, 3 Feb 2024 01:59:39 +0100 Subject: [PATCH 08/34] Update words in docs for better readability (#6600) ## Problem Found typos while reading the docs ## Summary of changes Fixed the typos found --- docs/rfcs/018-storage-messaging-2.md | 6 +++--- docs/rfcs/019-tenant-timeline-lifecycles.md | 4 ++-- docs/rfcs/020-pageserver-s3-coordination.md | 12 ++++++------ docs/rfcs/022-pageserver-delete-from-s3.md | 18 +++++++++--------- ...he-state-of-pageserver-tenant-relocation.md | 4 ++-- docs/rfcs/024-extension-loading.md | 2 +- docs/rfcs/025-generation-numbers.md | 8 ++++---- docs/rfcs/026-pageserver-s3-mvcc.md | 12 ++++++------ ...-consistent-layer-map-through-index-part.md | 16 ++++++++-------- docs/rfcs/028-pageserver-migration.md | 2 +- .../029-pageserver-wal-disaster-recovery.md | 4 ++-- docs/rfcs/030-vectored-timeline-get.md | 2 +- 12 files changed, 45 insertions(+), 45 deletions(-) diff --git a/docs/rfcs/018-storage-messaging-2.md b/docs/rfcs/018-storage-messaging-2.md index 364f62dd2e..2419dd5fc5 100644 --- a/docs/rfcs/018-storage-messaging-2.md +++ b/docs/rfcs/018-storage-messaging-2.md @@ -78,7 +78,7 @@ with grpc streams and tokio mpsc channels. The implementation description is at It is just 500 lines of code and core functionality is complete. 1-1 pub sub gives about 120k received messages per second; having multiple subscribers in -different connecitons quickly scales to 1 million received messages per second. +different connections quickly scales to 1 million received messages per second. I had concerns about many concurrent streams in singe connection, but 2^20 subscribers still work (though eat memory, with 10 publishers 20GB are consumed; in this implementation each publisher holds full copy of all subscribers). There @@ -95,12 +95,12 @@ other members, with best-effort this is simple. ### Security implications Communication happens in a private network that is not exposed to users; -additionaly we can add auth to the broker. +additionally we can add auth to the broker. ## Alternative: get existing pub-sub We could take some existing pub sub solution, e.g. RabbitMQ, Redis. But in this -case IMV simplicity of our own outweights external dependency costs (RabbitMQ is +case IMV simplicity of our own outweighs external dependency costs (RabbitMQ is much more complicated and needs VM; Redis Rust client maintenance is not ideal...). Also note that projects like CockroachDB and TiDB are based on gRPC as well. diff --git a/docs/rfcs/019-tenant-timeline-lifecycles.md b/docs/rfcs/019-tenant-timeline-lifecycles.md index 2734bf17b9..558b5335e7 100644 --- a/docs/rfcs/019-tenant-timeline-lifecycles.md +++ b/docs/rfcs/019-tenant-timeline-lifecycles.md @@ -74,7 +74,7 @@ TenantMaintenanceGuard: Like ActiveTenantGuard, but can be held even when the tenant is not in Active state. Used for operations like attach/detach. Perhaps allow only one such guard on a Tenant at a time. -Similarly for Timelines. We don't currentl have a "state" on Timeline, but I think +Similarly for Timelines. We don't currently have a "state" on Timeline, but I think we need at least two states: Active and Stopping. The Stopping state is used at deletion, to prevent new TimelineActiveGuards from appearing, while you wait for existing TimelineActiveGuards to die out. @@ -85,7 +85,7 @@ have a TenantActiveGuard, and the tenant's state changes from Active to Stopping, the is_shutdown_requested() function should return true, and shutdown_watcher() future should return. -This signaling doesn't neessarily need to cover all cases. For example, if you +This signaling doesn't necessarily need to cover all cases. For example, if you have a block of code in spawn_blocking(), it might be acceptable if is_shutdown_requested() doesn't return true even though the tenant is in Stopping state, as long as the code finishes reasonably fast. diff --git a/docs/rfcs/020-pageserver-s3-coordination.md b/docs/rfcs/020-pageserver-s3-coordination.md index 5e2912ba99..90ba3a6f4d 100644 --- a/docs/rfcs/020-pageserver-s3-coordination.md +++ b/docs/rfcs/020-pageserver-s3-coordination.md @@ -37,7 +37,7 @@ sequenceDiagram ``` At this point it is not possible to restore from index, it contains L2 which -is no longer available in s3 and doesnt contain L3 added by compaction by the +is no longer available in s3 and doesn't contain L3 added by compaction by the first pageserver. So if any of the pageservers restart initial sync will fail (or in on-demand world it will fail a bit later during page request from missing layer) @@ -74,7 +74,7 @@ One possible solution for relocation case is to orchestrate background jobs from outside. The oracle who runs migration can turn off background jobs on PS1 before migration and then run migration -> enable them on PS2. The problem comes if migration fails. In this case in order to resume background jobs -oracle needs to guarantee that PS2 doesnt run background jobs and if it doesnt +oracle needs to guarantee that PS2 doesn't run background jobs and if it doesn't respond then PS1 is stuck unable to run compaction/gc. This cannot be solved without human ensuring that no upload from PS2 can happen. In order to be able to resolve this automatically CAS is required on S3 side so pageserver can @@ -128,7 +128,7 @@ During discussion it seems that we converged on the approach consisting of: whether we need to apply change to the index state or not. - Responsibility for running background jobs is assigned externally. Pageserver keeps locally persistent flag for each tenant that indicates whether this - pageserver is considered as primary one or not. TODO what happends if we + pageserver is considered as primary one or not. TODO what happens if we crash and cannot start for some extended period of time? Control plane can assign ownership to some other pageserver. Pageserver needs some way to check if its still the blessed one. Maybe by explicit request to control plane on @@ -138,7 +138,7 @@ Requirement for deterministic layer generation was considered overly strict because of two reasons: - It can limit possible optimizations e g when pageserver wants to reshuffle - some data locally and doesnt want to coordinate this + some data locally and doesn't want to coordinate this - The deterministic algorithm itself can change so during deployments for some time there will be two different version running at the same time which can cause non determinism @@ -164,7 +164,7 @@ sequenceDiagram CP->>PS1: Yes deactivate CP PS1->>S3: Fetch PS1 index. - note over PS1: Continue operations, start backround jobs + note over PS1: Continue operations, start background jobs note over PS1,PS2: PS1 starts up and still and is not a leader anymore PS1->>CP: Am I still the leader for Tenant X? CP->>PS1: No @@ -203,7 +203,7 @@ sequenceDiagram ### Eviction When two pageservers operate on a tenant for extended period of time follower -doesnt perform write operations in s3. When layer is evicted follower relies +doesn't perform write operations in s3. When layer is evicted follower relies on updates from primary to get info about layers it needs to cover range for evicted layer. diff --git a/docs/rfcs/022-pageserver-delete-from-s3.md b/docs/rfcs/022-pageserver-delete-from-s3.md index 260e549670..c237a3edb8 100644 --- a/docs/rfcs/022-pageserver-delete-from-s3.md +++ b/docs/rfcs/022-pageserver-delete-from-s3.md @@ -4,7 +4,7 @@ Created on 08.03.23 ## Motivation -Currently we dont delete pageserver part of the data from s3 when project is deleted. (The same is true for safekeepers, but this outside of the scope of this RFC). +Currently we don't delete pageserver part of the data from s3 when project is deleted. (The same is true for safekeepers, but this outside of the scope of this RFC). This RFC aims to spin a discussion to come to a robust deletion solution that wont put us in into a corner for features like postponed deletion (when we keep data for user to be able to restore a project if it was deleted by accident) @@ -75,9 +75,9 @@ Remote one is needed for cases when pageserver is lost during deletion so other Why local mark file is needed? -If we dont have one, we have two choices, delete local data before deleting the remote part or do that after. +If we don't have one, we have two choices, delete local data before deleting the remote part or do that after. -If we delete local data before remote then during restart pageserver wont pick up remote tenant at all because nothing is available locally (pageserver looks for remote conuterparts of locally available tenants). +If we delete local data before remote then during restart pageserver wont pick up remote tenant at all because nothing is available locally (pageserver looks for remote counterparts of locally available tenants). If we delete local data after remote then at the end of the sequence when remote mark file is deleted if pageserver restart happens then the state is the same to situation when pageserver just missing data on remote without knowing the fact that this data is intended to be deleted. In this case the current behavior is upload everything local-only to remote. @@ -145,7 +145,7 @@ sequenceDiagram CP->>PS: Retry delete tenant PS->>CP: Not modified else Mark is missing - note over PS: Continue to operate the tenant as if deletion didnt happen + note over PS: Continue to operate the tenant as if deletion didn't happen note over CP: Eventually console should
retry delete request @@ -168,7 +168,7 @@ sequenceDiagram PS->>CP: True ``` -Similar sequence applies when both local and remote marks were persisted but Control Plane still didnt receive a response. +Similar sequence applies when both local and remote marks were persisted but Control Plane still didn't receive a response. If pageserver crashes after both mark files were deleted then it will reply to control plane status poll request with 404 which should be treated by control plane as success. @@ -187,7 +187,7 @@ If pageseserver is lost then the deleted tenant should be attached to different ##### Restrictions for tenant that is in progress of being deleted -I propose to add another state to tenant/timeline - PendingDelete. This state shouldnt allow executing any operations aside from polling the deletion status. +I propose to add another state to tenant/timeline - PendingDelete. This state shouldn't allow executing any operations aside from polling the deletion status. #### Summary @@ -237,7 +237,7 @@ New branch gets created PS1 starts up (is it possible or we just recycle it?) PS1 is unaware of the new branch. It can either fall back to s3 ls, or ask control plane. -So here comes the dependency of storage on control plane. During restart storage needs to know which timelines are valid for operation. If there is nothing on s3 that can answer that question storage neeeds to ask control plane. +So here comes the dependency of storage on control plane. During restart storage needs to know which timelines are valid for operation. If there is nothing on s3 that can answer that question storage needs to ask control plane. ### Summary @@ -250,7 +250,7 @@ Cons: Pros: -- Easier to reason about if you dont have to account for pageserver restarts +- Easier to reason about if you don't have to account for pageserver restarts ### Extra notes @@ -262,7 +262,7 @@ Delayed deletion can be done with both approaches. As discussed with Anna (@step After discussion in comments I see that we settled on two options (though a bit different from ones described in rfc). First one is the same - pageserver owns as much as possible. The second option is that pageserver owns markers thing, but actual deletion happens in control plane by repeatedly calling ls + delete. -To my mind the only benefit of the latter approach is possible code reuse between safekeepers and pageservers. Otherwise poking around integrating s3 library into control plane, configuring shared knowledge abouth paths in s3 - are the downsides. Another downside of relying on control plane is the testing process. Control plane resides in different repository so it is quite hard to test pageserver related changes there. e2e test suite there doesnt support shutting down pageservers, which are separate docker containers there instead of just processes. +To my mind the only benefit of the latter approach is possible code reuse between safekeepers and pageservers. Otherwise poking around integrating s3 library into control plane, configuring shared knowledge about paths in s3 - are the downsides. Another downside of relying on control plane is the testing process. Control plane resides in different repository so it is quite hard to test pageserver related changes there. e2e test suite there doesn't support shutting down pageservers, which are separate docker containers there instead of just processes. With pageserver owning everything we still give the retry logic to control plane but its easier to duplicate if needed compared to sharing inner s3 workings. We will have needed tests for retry logic in neon repo. diff --git a/docs/rfcs/023-the-state-of-pageserver-tenant-relocation.md b/docs/rfcs/023-the-state-of-pageserver-tenant-relocation.md index 836c91fb25..97e62bf8c6 100644 --- a/docs/rfcs/023-the-state-of-pageserver-tenant-relocation.md +++ b/docs/rfcs/023-the-state-of-pageserver-tenant-relocation.md @@ -75,7 +75,7 @@ sequenceDiagram ``` At this point it is not possible to restore the state from index, it contains L2 which -is no longer available in s3 and doesnt contain L3 added by compaction by the +is no longer available in s3 and doesn't contain L3 added by compaction by the first pageserver. So if any of the pageservers restart, initial sync will fail (or in on-demand world it will fail a bit later during page request from missing layer) @@ -171,7 +171,7 @@ sequenceDiagram Another problem is a possibility of concurrent branch creation calls. -I e during migration create_branch can be called on old pageserver and newly created branch wont be seen on new pageserver. Prior art includes prototyping an approach of trying to mirror such branches, but currently it lost its importance, because now attach is fast because we dont need to download all data, and additionally to the best of my knowledge of control plane internals (cc @ololobus to confirm) operations on one project are executed sequentially, so it is not possible to have such case. So branch create operation will be executed only when relocation is completed. As a safety measure we can forbid branch creation for tenants that are in readonly remote state. +I e during migration create_branch can be called on old pageserver and newly created branch wont be seen on new pageserver. Prior art includes prototyping an approach of trying to mirror such branches, but currently it lost its importance, because now attach is fast because we don't need to download all data, and additionally to the best of my knowledge of control plane internals (cc @ololobus to confirm) operations on one project are executed sequentially, so it is not possible to have such case. So branch create operation will be executed only when relocation is completed. As a safety measure we can forbid branch creation for tenants that are in readonly remote state. ## Simplistic approach diff --git a/docs/rfcs/024-extension-loading.md b/docs/rfcs/024-extension-loading.md index 26ba4f7927..7e243b23e3 100644 --- a/docs/rfcs/024-extension-loading.md +++ b/docs/rfcs/024-extension-loading.md @@ -55,7 +55,7 @@ When PostgreSQL requests a file, `compute_ctl` downloads it. PostgreSQL requests files in the following cases: - When loading a preload library set in `local_preload_libraries` - When explicitly loading a library with `LOAD` -- Wnen creating extension with `CREATE EXTENSION` (download sql scripts, (optional) extension data files and (optional) library files))) +- When creating extension with `CREATE EXTENSION` (download sql scripts, (optional) extension data files and (optional) library files))) #### Summary diff --git a/docs/rfcs/025-generation-numbers.md b/docs/rfcs/025-generation-numbers.md index 6a0131c66a..dfc8529d2d 100644 --- a/docs/rfcs/025-generation-numbers.md +++ b/docs/rfcs/025-generation-numbers.md @@ -26,7 +26,7 @@ plane guarantee prevents robust response to failures, as if a pageserver is unre we may not detach from it. The mechanism in this RFC fixes this, by making it safe to attach to a new, different pageserver even if an unresponsive pageserver may be running. -Futher, lack of safety during split-brain conditions blocks two important features where occasional +Further lack of safety during split-brain conditions blocks two important features where occasional split-brain conditions are part of the design assumptions: - seamless tenant migration ([RFC PR](https://github.com/neondatabase/neon/pull/5029)) @@ -490,11 +490,11 @@ The above makes it safe for control plane to change the assignment of tenant to pageserver in control plane while a timeline creation is ongoing. The reason is that the creation request against the new assigned pageserver uses a new generation number. However, care must be taken by control plane -to ensure that a "timeline creation successul" response from some pageserver +to ensure that a "timeline creation successful" response from some pageserver is checked for the pageserver's generation for that timeline's tenant still being the latest. If it is not the latest, the response does not constitute a successful timeline creation. It is acceptable to discard such responses, the scrubber will clean up the S3 state. -It is better to issue a timelien deletion request to the stale attachment. +It is better to issue a timeline deletion request to the stale attachment. #### Timeline Deletion @@ -633,7 +633,7 @@ As outlined in the Part 1 on correctness, it is critical that deletions are only executed once the key is not referenced anywhere in S3. This property is obviously upheld by the scheme above. -#### We Accept Object Leakage In Acceptable Circumcstances +#### We Accept Object Leakage In Acceptable Circumstances If we crash in the flow above between (2) and (3), we lose track of unreferenced object. Further, enqueuing a single to the persistent queue may not be durable immediately to amortize cost of flush to disk. diff --git a/docs/rfcs/026-pageserver-s3-mvcc.md b/docs/rfcs/026-pageserver-s3-mvcc.md index 2a8c925781..473d5a2bd0 100644 --- a/docs/rfcs/026-pageserver-s3-mvcc.md +++ b/docs/rfcs/026-pageserver-s3-mvcc.md @@ -162,7 +162,7 @@ struct Tenant { ... txns: HashMap, - // the most recently started txn's id; only most recently sarted can win + // the most recently started txn's id; only most recently started can win next_winner_txn: Option, } struct Transaction { @@ -186,7 +186,7 @@ A transaction T in state Committed has subsequent transactions that may or may n So, for garbage collection, we need to assess transactions in state Committed and RejectAcknowledged: -- Commited: delete objects on the deadlist. +- Committed: delete objects on the deadlist. - We don’t need a LIST request here, the deadlist is sufficient. So, it’s really cheap. - This is **not true MVCC garbage collection**; by deleting the objects on Committed transaction T ’s deadlist, we might delete data referenced by other transactions that were concurrent with T, i.e., they started while T was still open. However, the fact that T is committed means that the other transactions are RejectPending or RejectAcknowledged, so, they don’t matter. Pageservers executing these doomed RejectPending transactions must handle 404 for GETs gracefully, e.g., by trying to commit txn so they observe the rejection they’re destined to get anyways. 404’s for RejectAcknowledged is handled below. - RejectAcknowledged: delete all objects created in that txn, and discard deadlists. @@ -242,15 +242,15 @@ If a pageserver is unresponsive from Control Plane’s / Compute’s perspective At this point, availability is restored and user pain relieved. -What’s left is to somehow close the doomed transaction of the unresponsive pageserver, so that it beomes RejectAcknowledged, and GC can make progress. Since S3 is cheap, we can afford to wait a really long time here, especially if we put a soft bound on the amount of data a transaction may produce before it must commit. Procedure: +What’s left is to somehow close the doomed transaction of the unresponsive pageserver, so that it becomes RejectAcknowledged, and GC can make progress. Since S3 is cheap, we can afford to wait a really long time here, especially if we put a soft bound on the amount of data a transaction may produce before it must commit. Procedure: 1. Ensure the unresponsive pageserver is taken out of rotation for new attachments. That probably should happen as part of the routine above. 2. Make a human operator investigate decide what to do (next morning, NO ONCALL ALERT): 1. Inspect the instance, investigate logs, understand root cause. 2. Try to re-establish connectivity between pageserver and Control Plane so that pageserver can retry commits, get rejected, ack rejection ⇒ enable GC. - 3. Use below procedure to decomission pageserver. + 3. Use below procedure to decommission pageserver. -### Decomissioning A Pageserver (Dead or Alive-but-Unrespsonive) +### Decommissioning A Pageserver (Dead or Alive-but-Unresponsive) The solution, enabled by this proposal: @@ -310,7 +310,7 @@ Issues that we discussed: 1. In abstract terms, this proposal provides a linearized history for a given S3 prefix. 2. In concrete terms, this proposal provides a linearized history per tenant. 3. There can be multiple writers at a given time, but only one of them will win to become part of the linearized history. -4. ************************************************************************************Alternative ideas mentioned during meetings that should be turned into a written prospoal like this one:************************************************************************************ +4. ************************************************************************************Alternative ideas mentioned during meetings that should be turned into a written proposal like this one:************************************************************************************ 1. @Dmitry Rodionov : having linearized storage of index_part.json in some database that allows serializable transactions / atomic compare-and-swap PUT 2. @Dmitry Rodionov : 3. @Stas : something like this scheme, but somehow find a way to equate attachment duration with transaction duration, without losing work if pageserver dies months after attachment. diff --git a/docs/rfcs/027-crash-consistent-layer-map-through-index-part.md b/docs/rfcs/027-crash-consistent-layer-map-through-index-part.md index 2c6b46eabe..e18b7c16c9 100644 --- a/docs/rfcs/027-crash-consistent-layer-map-through-index-part.md +++ b/docs/rfcs/027-crash-consistent-layer-map-through-index-part.md @@ -54,7 +54,7 @@ If the compaction algorithm doesn't change between the two compaction runs, is d *However*: 1. the file size of the overwritten L1s may not be identical, and 2. the bit pattern of the overwritten L1s may not be identical, and, -3. in the future, we may want to make the compaction code non-determinstic, influenced by past access patterns, or otherwise change it, resulting in L1 overwrites with a different set of delta records than before the overwrite +3. in the future, we may want to make the compaction code non-deterministic, influenced by past access patterns, or otherwise change it, resulting in L1 overwrites with a different set of delta records than before the overwrite The items above are a problem for the [split-brain protection RFC](https://github.com/neondatabase/neon/pull/4919) because it assumes that layer files in S3 are only ever deleted, but never replaced (overPUTted). @@ -63,7 +63,7 @@ But node B based its world view on the version of node A's `index_part.json` fro That earlier `index_part.json`` contained the file size of the pre-overwrite L1. If the overwritten L1 has a different file size, node B will refuse to read data from the overwritten L1. Effectively, the data in the L1 has become inaccessible to node B. -If node B already uploaded an index part itself, all subsequent attachments will use node B's index part, and run into the same probem. +If node B already uploaded an index part itself, all subsequent attachments will use node B's index part, and run into the same problem. If we ever introduce checksums instead of checking just the file size, then a mismatching bit pattern (2) will cause similar problems. @@ -121,7 +121,7 @@ Multi-object changes that previously created and removed files in timeline dir a * atomic `index_part.json` update in S3, as per guarantee that S3 PUT is atomic * local timeline dir state: * irrelevant for layer map content => irrelevant for atomic updates / crash consistency - * if we crash after index part PUT, local layer files will be used, so, no on-demand downloads neede for them + * if we crash after index part PUT, local layer files will be used, so, no on-demand downloads needed for them * if we crash before index part PUT, local layer files will be deleted ## Trade-Offs @@ -140,7 +140,7 @@ Assuming upload queue allows for unlimited queue depth (that's what it does toda * wal ingest: currently unbounded * L0 => L1 compaction: CPU time proportional to `O(sum(L0 size))` and upload work proportional to `O()` * Compaction threshold is 10 L0s and each L0 can be up to 256M in size. Target size for L1 is 128M. - * In practive, most L0s are tiny due to 10minute `DEFAULT_CHECKPOINT_TIMEOUT`. + * In practice, most L0s are tiny due to 10minute `DEFAULT_CHECKPOINT_TIMEOUT`. * image layer generation: CPU time `O(sum(input data))` + upload work `O(sum(new image layer size))` * I have no intuition how expensive / long-running it is in reality. * gc: `update_gc_info`` work (not substantial, AFAIK) @@ -158,7 +158,7 @@ Pageserver crashes are very rare ; it would likely be acceptable to re-do the lo However, regular pageserver restart happen frequently, e.g., during weekly deploys. In general, pageserver restart faces the problem of tenants that "take too long" to shut down. -They are a problem because other tenants that shut down quickly are unavailble while we wait for the slow tenants to shut down. +They are a problem because other tenants that shut down quickly are unavailable while we wait for the slow tenants to shut down. We currently allot 10 seconds for graceful shutdown until we SIGKILL the pageserver process (as per `pageserver.service` unit file). A longer budget would expose tenants that are done early to a longer downtime. A short budget would risk throwing away more work that'd have to be re-done after restart. @@ -236,7 +236,7 @@ tenants/$tenant/timelines/$timeline/$key_and_lsn_range tenants/$tenant/timelines/$timeline/$layer_file_id-$key_and_lsn_range ``` -To guarantee uniqueness, the unqiue number is a sequence number, stored in `index_part.json`. +To guarantee uniqueness, the unique number is a sequence number, stored in `index_part.json`. This alternative does not solve atomic layer map updates. In our crash-during-compaction scenario above, the compaction run after the crash will not overwrite the L1s, but write/PUT new files with new sequence numbers. @@ -246,11 +246,11 @@ We'd need to write a deduplication pass that checks if perfectly overlapping lay However, this alternative is appealing because it systematically prevents overwrites at a lower level than this RFC. So, this alternative is sufficient for the needs of the split-brain safety RFC (immutable layer files locally and in S3). -But it doesn't solve the problems with crash-during-compaction outlined earlier in this RFC, and in fact, makes it much more accute. +But it doesn't solve the problems with crash-during-compaction outlined earlier in this RFC, and in fact, makes it much more acute. The proposed design in this RFC addresses both. So, if this alternative sounds appealing, we should implement the proposal in this RFC first, then implement this alternative on top. -That way, we avoid a phase where the crash-during-compaction problem is accute. +That way, we avoid a phase where the crash-during-compaction problem is acute. ## Related issues diff --git a/docs/rfcs/028-pageserver-migration.md b/docs/rfcs/028-pageserver-migration.md index f708f641aa..17ef9aef52 100644 --- a/docs/rfcs/028-pageserver-migration.md +++ b/docs/rfcs/028-pageserver-migration.md @@ -596,4 +596,4 @@ pageservers are updated to be aware of it. As well as simplifying implementation, putting heatmaps in S3 will be useful for future analytics purposes -- gathering aggregated statistics on activity -pattersn across many tenants may be done directly from data in S3. +patterns across many tenants may be done directly from data in S3. diff --git a/docs/rfcs/029-pageserver-wal-disaster-recovery.md b/docs/rfcs/029-pageserver-wal-disaster-recovery.md index 15ebd72bfe..229e40100e 100644 --- a/docs/rfcs/029-pageserver-wal-disaster-recovery.md +++ b/docs/rfcs/029-pageserver-wal-disaster-recovery.md @@ -147,7 +147,7 @@ Separating corrupt writes from non-corrupt ones is a hard problem in general, and if the application was involved in making the corrupt write, a recovery would also involve the application. Therefore, corruption that has made it into the WAL is outside of the scope of this feature. However, the WAL replay can be -issued to right before the point in time where the corruption occured. Then the +issued to right before the point in time where the corruption occurred. Then the data loss is isolated to post-corruption writes only. ## Impacted components (e.g. pageserver, safekeeper, console, etc) @@ -161,7 +161,7 @@ limits and billing we apply to existing timelines. ## Proposed implementation -The first problem to keep in mind is the reproducability of `initdb`. +The first problem to keep in mind is the reproducibility of `initdb`. So an initial step would be to upload `initdb` snapshots to S3. After that, we'd have the endpoint spawn a background process which diff --git a/docs/rfcs/030-vectored-timeline-get.md b/docs/rfcs/030-vectored-timeline-get.md index d4017471b7..093a964f38 100644 --- a/docs/rfcs/030-vectored-timeline-get.md +++ b/docs/rfcs/030-vectored-timeline-get.md @@ -69,7 +69,7 @@ However, unlike above, an ideal solution will * This means, read each `DiskBtree` page at most once. * Facilitate merging of the reads we issue to the OS and eventually NVMe. -Each of these items above represents a signficant amount of work. +Each of these items above represents a significant amount of work. ## Performance From aac8eb2c364e4386674b9d9e99a09e3f38fe31a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Sat, 3 Feb 2024 02:16:20 +0100 Subject: [PATCH 09/34] Minor logging improvements (#6593) * log when `lsn_by_timestamp` finished together with its result * add back logging of the layer name as suggested in https://github.com/neondatabase/neon/pull/6549#discussion_r1475756808 --- pageserver/src/http/routes.rs | 11 +++++++++-- pageserver/src/tenant/timeline.rs | 1 + 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 57ee746726..5735489742 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -682,7 +682,7 @@ async fn get_lsn_by_timestamp_handler( let result = timeline .find_lsn_for_timestamp(timestamp_pg, &cancel, &ctx) .await?; - #[derive(serde::Serialize)] + #[derive(serde::Serialize, Debug)] struct Result { lsn: Lsn, kind: &'static str, @@ -693,7 +693,14 @@ async fn get_lsn_by_timestamp_handler( LsnForTimestamp::Past(lsn) => (lsn, "past"), LsnForTimestamp::NoData(lsn) => (lsn, "nodata"), }; - json_response(StatusCode::OK, Result { lsn, kind }) + let result = Result { lsn, kind }; + tracing::info!( + lsn=?result.lsn, + kind=%result.kind, + timestamp=%timestamp_raw, + "lsn_by_timestamp finished" + ); + json_response(StatusCode::OK, result) } async fn get_timestamp_of_lsn_handler( diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 0ffe0b6418..0ba3fe728a 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -2843,6 +2843,7 @@ impl Timeline { } /// Flush one frozen in-memory layer to disk, as a new delta layer. + #[instrument(skip_all, fields(layer=%frozen_layer))] async fn flush_frozen_layer( self: &Arc, frozen_layer: Arc, From c96aead5029a7d4d2cc026f2d05c0c6286af612a Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Fri, 2 Feb 2024 22:37:43 +0200 Subject: [PATCH 10/34] Reorganize .dockerignore Author: Alexander Bayandin --- .dockerignore | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.dockerignore b/.dockerignore index ae0ad8fd77..8b378b5dab 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,27 +1,27 @@ * -!rust-toolchain.toml -!Cargo.toml +# Files !Cargo.lock +!Cargo.toml !Makefile +!rust-toolchain.toml +!scripts/combine_control_files.py +!scripts/ninstall.sh +!vm-cgconfig.conf +# Directories !.cargo/ !.config/ -!control_plane/ !compute_tools/ +!control_plane/ !libs/ +!neon_local/ !pageserver/ !pgxn/ !proxy/ -!safekeeper/ !s3_scrubber/ +!safekeeper/ !storage_broker/ !trace/ -!vendor/postgres-v14/ -!vendor/postgres-v15/ -!vendor/postgres-v16/ +!vendor/postgres-*/ !workspace_hack/ -!neon_local/ -!scripts/ninstall.sh -!scripts/combine_control_files.py -!vm-cgconfig.conf From 647b85fc15a31861608dfe767b625ce889471359 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Fri, 2 Feb 2024 22:28:45 +0200 Subject: [PATCH 11/34] Update pgvector to v0.6.0, third attempt This includes a compatibility patch that is needed because pgvector now skips WAL-logging during the index build, and WAL-logs the index only in one go at the end. That's how GIN, GiST and SP-GIST index builds work in core PostgreSQL too, but we need some Neon-specific calls to mark the beginning and end of those build phases. pgvector is the first index AM that does that with parallel workers, so I had to modify those functions in the Neon extension to be aware of parallel workers. Only the leader needs to create the underlying file and perform the WAL-logging. (In principle, the parallel workers could participate in the WAL-logging too, but pgvector doesn't do that. This will need some further work if that changes). The previous attempt at this (#6592) missed that parallel workers needed those changes, and segfaulted in parallel build that spilled to disk. Testing ------- We don't have a place for regression tests of extensions at the moment. I tested this manually with the following script: ``` CREATE EXTENSION IF NOT EXISTS vector; DROP TABLE IF EXISTS tst; CREATE TABLE tst (i serial, v vector(3)); INSERT INTO tst (v) SELECT ARRAY[random(), random(), random()] FROM generate_series(1, 15000) g; -- Serial build, in memory ALTER TABLE tst SET (parallel_workers=0); SET maintenance_work_mem='50 MB'; CREATE INDEX idx ON tst USING hnsw (v vector_l2_ops); -- Test that the index works. (The table contents are random, and the -- search is approximate anyway, so we cannot check the exact values. -- For now, just eyeball that they look reasonable) set enable_seqscan=off; explain SELECT * FROM tst ORDER BY v <-> ARRAY[0, 0, 0]::vector LIMIT 5; SELECT * FROM tst ORDER BY v <-> ARRAY[0, 0, 0]::vector LIMIT 5; DROP INDEX idx; -- Serial build, spills to on disk ALTER TABLE tst SET (parallel_workers=0); SET maintenance_work_mem='5 MB'; CREATE INDEX idx ON tst USING hnsw (v vector_l2_ops); SELECT * FROM tst ORDER BY v <-> ARRAY[0, 0, 0]::vector LIMIT 5; DROP INDEX idx; -- Parallel build, in memory ALTER TABLE tst SET (parallel_workers=4); SET maintenance_work_mem='50 MB'; CREATE INDEX idx ON tst USING hnsw (v vector_l2_ops); SELECT * FROM tst ORDER BY v <-> ARRAY[0, 0, 0]::vector LIMIT 5; DROP INDEX idx; -- Parallel build, spills to disk ALTER TABLE tst SET (parallel_workers=4); SET maintenance_work_mem='5 MB'; CREATE INDEX idx ON tst USING hnsw (v vector_l2_ops); SELECT * FROM tst ORDER BY v <-> ARRAY[0, 0, 0]::vector LIMIT 5; DROP INDEX idx; ``` --- .dockerignore | 1 + Dockerfile.compute-node | 7 +++- patches/pgvector.patch | 78 ++++++++++++++++++++++++++++++++++++++ pgxn/neon/pagestore_smgr.c | 19 +++++++++- 4 files changed, 101 insertions(+), 4 deletions(-) create mode 100644 patches/pgvector.patch diff --git a/.dockerignore b/.dockerignore index 8b378b5dab..29abdc37aa 100644 --- a/.dockerignore +++ b/.dockerignore @@ -17,6 +17,7 @@ !libs/ !neon_local/ !pageserver/ +!patches/ !pgxn/ !proxy/ !s3_scrubber/ diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index d91c7cfd72..b13225172d 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -241,9 +241,12 @@ RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz - FROM build-deps AS vector-pg-build COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ -RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.5.1.tar.gz -O pgvector.tar.gz && \ - echo "cc7a8e034a96e30a819911ac79d32f6bc47bdd1aa2de4d7d4904e26b83209dc8 pgvector.tar.gz" | sha256sum --check && \ +COPY patches/pgvector.patch /pgvector.patch + +RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.6.0.tar.gz -O pgvector.tar.gz && \ + echo "b0cf4ba1ab016335ac8fb1cada0d2106235889a194fffeece217c5bda90b2f19 pgvector.tar.gz" | sha256sum --check && \ mkdir pgvector-src && cd pgvector-src && tar xvzf ../pgvector.tar.gz --strip-components=1 -C . && \ + patch -p1 < /pgvector.patch && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/vector.control diff --git a/patches/pgvector.patch b/patches/pgvector.patch new file mode 100644 index 0000000000..84ac6644c5 --- /dev/null +++ b/patches/pgvector.patch @@ -0,0 +1,78 @@ +From 0b0194a57bd0f3598bd57dbedd0df3932330169d Mon Sep 17 00:00:00 2001 +From: Heikki Linnakangas +Date: Fri, 2 Feb 2024 22:26:45 +0200 +Subject: [PATCH 1/1] Make v0.6.0 work with Neon + +Now that the WAL-logging happens as a separate step at the end of the +build, we need a few neon-specific hints to make it work. +--- + src/hnswbuild.c | 36 ++++++++++++++++++++++++++++++++++++ + 1 file changed, 36 insertions(+) + +diff --git a/src/hnswbuild.c b/src/hnswbuild.c +index 680789b..ec54dea 100644 +--- a/src/hnswbuild.c ++++ b/src/hnswbuild.c +@@ -840,9 +840,17 @@ HnswParallelBuildMain(dsm_segment *seg, shm_toc *toc) + + hnswarea = shm_toc_lookup(toc, PARALLEL_KEY_HNSW_AREA, false); + ++#ifdef NEON_SMGR ++ smgr_start_unlogged_build(RelationGetSmgr(indexRel)); ++#endif ++ + /* Perform inserts */ + HnswParallelScanAndInsert(heapRel, indexRel, hnswshared, hnswarea, false); + ++#ifdef NEON_SMGR ++ smgr_finish_unlogged_build_phase_1(RelationGetSmgr(indexRel)); ++#endif ++ + /* Close relations within worker */ + index_close(indexRel, indexLockmode); + table_close(heapRel, heapLockmode); +@@ -1089,13 +1097,41 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo, + SeedRandom(42); + #endif + ++#ifdef NEON_SMGR ++ smgr_start_unlogged_build(RelationGetSmgr(index)); ++#endif ++ + InitBuildState(buildstate, heap, index, indexInfo, forkNum); + + BuildGraph(buildstate, forkNum); + ++#ifdef NEON_SMGR ++ smgr_finish_unlogged_build_phase_1(RelationGetSmgr(index)); ++#endif ++ + if (RelationNeedsWAL(index)) ++ { + log_newpage_range(index, forkNum, 0, RelationGetNumberOfBlocks(index), true); + ++#ifdef NEON_SMGR ++ { ++#if PG_VERSION_NUM >= 160000 ++ RelFileLocator rlocator = RelationGetSmgr(index)->smgr_rlocator.locator; ++#else ++ RelFileNode rlocator = RelationGetSmgr(index)->smgr_rnode.node; ++#endif ++ ++ SetLastWrittenLSNForBlockRange(XactLastRecEnd, rlocator, ++ MAIN_FORKNUM, 0, RelationGetNumberOfBlocks(index)); ++ SetLastWrittenLSNForRelation(XactLastRecEnd, rlocator, MAIN_FORKNUM); ++ } ++#endif ++ } ++ ++#ifdef NEON_SMGR ++ smgr_end_unlogged_build(RelationGetSmgr(index)); ++#endif ++ + FreeBuildState(buildstate); + } + +-- +2.39.2 + diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 63e8b8dc1f..f54c86702f 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -45,6 +45,7 @@ */ #include "postgres.h" +#include "access/parallel.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xlogdefs.h" @@ -2712,10 +2713,14 @@ neon_start_unlogged_build(SMgrRelation reln) reln->smgr_relpersistence = RELPERSISTENCE_UNLOGGED; /* + * Create the local file. In a parallel build, the leader is expected to + * call this first and do it. + * * FIXME: should we pass isRedo true to create the tablespace dir if it * doesn't exist? Is it needed? */ - mdcreate(reln, MAIN_FORKNUM, false); + if (!IsParallelWorker()) + mdcreate(reln, MAIN_FORKNUM, false); } /* @@ -2739,7 +2744,17 @@ neon_finish_unlogged_build_phase_1(SMgrRelation reln) Assert(unlogged_build_phase == UNLOGGED_BUILD_PHASE_1); Assert(reln->smgr_relpersistence == RELPERSISTENCE_UNLOGGED); - unlogged_build_phase = UNLOGGED_BUILD_PHASE_2; + /* + * In a parallel build, (only) the leader process performs the 2nd + * phase. + */ + if (IsParallelWorker()) + { + unlogged_build_rel = NULL; + unlogged_build_phase = UNLOGGED_BUILD_NOT_IN_PROGRESS; + } + else + unlogged_build_phase = UNLOGGED_BUILD_PHASE_2; } /* From 9dd69194d48b46e3f32b2cb9ce688a35669d48ec Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Sun, 4 Feb 2024 00:15:59 +0200 Subject: [PATCH 12/34] refactor(proxy): std::io::Write for BytesMut exists (#6606) Replace TODO with an existing implementation via `BufMut::writer``. --- proxy/src/context/parquet.rs | 48 ++++++++++++++---------------------- 1 file changed, 18 insertions(+), 30 deletions(-) diff --git a/proxy/src/context/parquet.rs b/proxy/src/context/parquet.rs index 1e9e723938..e920d7be01 100644 --- a/proxy/src/context/parquet.rs +++ b/proxy/src/context/parquet.rs @@ -1,7 +1,7 @@ use std::{sync::Arc, time::SystemTime}; use anyhow::Context; -use bytes::BytesMut; +use bytes::{buf::Writer, BufMut, BytesMut}; use chrono::{Datelike, Timelike}; use futures::{Stream, StreamExt}; use parquet::{ @@ -192,8 +192,9 @@ async fn worker_inner( let mut rows = Vec::with_capacity(config.rows_per_group); let schema = rows.as_slice().schema()?; - let file = BytesWriter::default(); - let mut w = SerializedFileWriter::new(file, schema.clone(), config.propeties.clone())?; + let buffer = BytesMut::new(); + let w = buffer.writer(); + let mut w = SerializedFileWriter::new(w, schema.clone(), config.propeties.clone())?; let mut last_upload = time::Instant::now(); @@ -221,20 +222,23 @@ async fn worker_inner( } if !w.flushed_row_groups().is_empty() { - let _: BytesWriter = upload_parquet(w, len, &storage).await?; + let _: Writer = upload_parquet(w, len, &storage).await?; } Ok(()) } -async fn flush_rows( +async fn flush_rows( rows: Vec, - mut w: SerializedFileWriter, + mut w: SerializedFileWriter, ) -> anyhow::Result<( Vec, - SerializedFileWriter, + SerializedFileWriter, RowGroupMetaDataPtr, -)> { +)> +where + W: std::io::Write + Send + 'static, +{ let span = Span::current(); let (mut rows, w, rg_meta) = tokio::task::spawn_blocking(move || { let _enter = span.enter(); @@ -258,10 +262,10 @@ async fn flush_rows( } async fn upload_parquet( - w: SerializedFileWriter, + w: SerializedFileWriter>, len: i64, storage: &GenericRemoteStorage, -) -> anyhow::Result { +) -> anyhow::Result> { let len_uncompressed = w .flushed_row_groups() .iter() @@ -270,11 +274,12 @@ async fn upload_parquet( // I don't know how compute intensive this is, although it probably isn't much... better be safe than sorry. // finish method only available on the fork: https://github.com/apache/arrow-rs/issues/5253 - let (mut file, metadata) = tokio::task::spawn_blocking(move || w.finish()) + let (writer, metadata) = tokio::task::spawn_blocking(move || w.finish()) .await .unwrap()?; - let data = file.buf.split().freeze(); + let mut buffer = writer.into_inner(); + let data = buffer.split().freeze(); let compression = len as f64 / len_uncompressed as f64; let size = data.len(); @@ -315,24 +320,7 @@ async fn upload_parquet( .await .context("request_data_upload")?; - Ok(file) -} - -// why doesn't BytesMut impl io::Write? -#[derive(Default)] -struct BytesWriter { - buf: BytesMut, -} - -impl std::io::Write for BytesWriter { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - self.buf.extend_from_slice(buf); - Ok(buf.len()) - } - - fn flush(&mut self) -> std::io::Result<()> { - Ok(()) - } + Ok(buffer.writer()) } #[cfg(test)] From 09519c1773724fbceec1257d4e495aa20f901afc Mon Sep 17 00:00:00 2001 From: Clarence Date: Sun, 4 Feb 2024 20:33:38 +0100 Subject: [PATCH 13/34] chore: update wording in docs to improve readability (#6607) ## Problem Found typos while reading the docs ## Summary of changes Fixed the typos found --- docs/docker.md | 4 ++-- docs/pageserver-storage.md | 2 +- docs/pageserver-thread-mgmt.md | 2 +- docs/pageserver-walredo.md | 2 +- docs/synthetic-size.md | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/docker.md b/docs/docker.md index 9761cc4346..cbf68be3a7 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -21,7 +21,7 @@ We build all images after a successful `release` tests run and push automaticall ## Docker Compose example -You can see a [docker compose](https://docs.docker.com/compose/) example to create a neon cluster in [/docker-compose/docker-compose.yml](/docker-compose/docker-compose.yml). It creates the following conatainers. +You can see a [docker compose](https://docs.docker.com/compose/) example to create a neon cluster in [/docker-compose/docker-compose.yml](/docker-compose/docker-compose.yml). It creates the following containers. - pageserver x 1 - safekeeper x 3 @@ -38,7 +38,7 @@ You can specify version of neon cluster using following environment values. - TAG: the tag version of [docker image](https://registry.hub.docker.com/r/neondatabase/neon/tags) (default is latest), which is tagged in [CI test](/.github/workflows/build_and_test.yml) ``` $ cd docker-compose/ -$ docker-compose down # remove the conainers if exists +$ docker-compose down # remove the containers if exists $ PG_VERSION=15 TAG=2937 docker-compose up --build -d # You can specify the postgres and image version Creating network "dockercompose_default" with the default driver Creating docker-compose_storage_broker_1 ... done diff --git a/docs/pageserver-storage.md b/docs/pageserver-storage.md index 77e7ff35bc..9902f6b930 100644 --- a/docs/pageserver-storage.md +++ b/docs/pageserver-storage.md @@ -64,7 +64,7 @@ Storage. The LayerMap tracks what layers exist in a timeline. -Currently, the layer map is just a resizeable array (Vec). On a GetPage@LSN or +Currently, the layer map is just a resizable array (Vec). On a GetPage@LSN or other read request, the layer map scans through the array to find the right layer that contains the data for the requested page. The read-code in LayeredTimeline is aware of the ancestor, and returns data from the ancestor timeline if it's diff --git a/docs/pageserver-thread-mgmt.md b/docs/pageserver-thread-mgmt.md index c911d2c53d..5d862415eb 100644 --- a/docs/pageserver-thread-mgmt.md +++ b/docs/pageserver-thread-mgmt.md @@ -22,7 +22,7 @@ timeline to shutdown. It will also wait for them to finish. A task registered in the task registry can check if it has been requested to shut down, by calling `is_shutdown_requested()`. There's -also a `shudown_watcher()` Future that can be used with `tokio::select!` +also a `shutdown_watcher()` Future that can be used with `tokio::select!` or similar, to wake up on shutdown. diff --git a/docs/pageserver-walredo.md b/docs/pageserver-walredo.md index 1de9c177cc..7b366ff616 100644 --- a/docs/pageserver-walredo.md +++ b/docs/pageserver-walredo.md @@ -74,4 +74,4 @@ somewhat wasteful, but because most WAL records only affect one page, the overhead is acceptable. The WAL redo always happens for one particular page. If the WAL record -coantains changes to other pages, they are ignored. +contains changes to other pages, they are ignored. diff --git a/docs/synthetic-size.md b/docs/synthetic-size.md index 407d7b525a..3acb4e18cb 100644 --- a/docs/synthetic-size.md +++ b/docs/synthetic-size.md @@ -21,7 +21,7 @@ implementation where we keep more data than we would need to, do not change the synthetic size or incur any costs to the user. The synthetic size is calculated for the whole project. It is not -straighforward to attribute size to individual branches. See "What is +straightforward to attribute size to individual branches. See "What is the size of an individual branch?" for discussion on those difficulties. @@ -248,7 +248,7 @@ and truncate the WAL. Synthetic size is calculated for the whole project, and includes all branches. There is no such thing as the size of a branch, because it -is not straighforward to attribute the parts of size to individual +is not straightforward to attribute the parts of size to individual branches. ## Example: attributing size to branches From 7e8529bec127aa13f5f4a819a24495c0a8e18aea Mon Sep 17 00:00:00 2001 From: Vadim Kharitonov Date: Sun, 4 Feb 2024 23:27:07 +0100 Subject: [PATCH 14/34] Revert "Update pgvector to v0.6.0, third attempt" (#6610) The issue is still unsolved because of shmem size in VMs. Need to figure it out before applying this patch. For more details: ``` ERROR: could not resize shared memory segment "/PostgreSQL.2892504480" to 16774205952 bytes: No space left on device ``` As an example, the same issue in community pgvector/pgvector#453. --- .dockerignore | 1 - Dockerfile.compute-node | 7 +--- patches/pgvector.patch | 78 -------------------------------------- pgxn/neon/pagestore_smgr.c | 19 +--------- 4 files changed, 4 insertions(+), 101 deletions(-) delete mode 100644 patches/pgvector.patch diff --git a/.dockerignore b/.dockerignore index 29abdc37aa..8b378b5dab 100644 --- a/.dockerignore +++ b/.dockerignore @@ -17,7 +17,6 @@ !libs/ !neon_local/ !pageserver/ -!patches/ !pgxn/ !proxy/ !s3_scrubber/ diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index b13225172d..d91c7cfd72 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -241,12 +241,9 @@ RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz - FROM build-deps AS vector-pg-build COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ -COPY patches/pgvector.patch /pgvector.patch - -RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.6.0.tar.gz -O pgvector.tar.gz && \ - echo "b0cf4ba1ab016335ac8fb1cada0d2106235889a194fffeece217c5bda90b2f19 pgvector.tar.gz" | sha256sum --check && \ +RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.5.1.tar.gz -O pgvector.tar.gz && \ + echo "cc7a8e034a96e30a819911ac79d32f6bc47bdd1aa2de4d7d4904e26b83209dc8 pgvector.tar.gz" | sha256sum --check && \ mkdir pgvector-src && cd pgvector-src && tar xvzf ../pgvector.tar.gz --strip-components=1 -C . && \ - patch -p1 < /pgvector.patch && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/vector.control diff --git a/patches/pgvector.patch b/patches/pgvector.patch deleted file mode 100644 index 84ac6644c5..0000000000 --- a/patches/pgvector.patch +++ /dev/null @@ -1,78 +0,0 @@ -From 0b0194a57bd0f3598bd57dbedd0df3932330169d Mon Sep 17 00:00:00 2001 -From: Heikki Linnakangas -Date: Fri, 2 Feb 2024 22:26:45 +0200 -Subject: [PATCH 1/1] Make v0.6.0 work with Neon - -Now that the WAL-logging happens as a separate step at the end of the -build, we need a few neon-specific hints to make it work. ---- - src/hnswbuild.c | 36 ++++++++++++++++++++++++++++++++++++ - 1 file changed, 36 insertions(+) - -diff --git a/src/hnswbuild.c b/src/hnswbuild.c -index 680789b..ec54dea 100644 ---- a/src/hnswbuild.c -+++ b/src/hnswbuild.c -@@ -840,9 +840,17 @@ HnswParallelBuildMain(dsm_segment *seg, shm_toc *toc) - - hnswarea = shm_toc_lookup(toc, PARALLEL_KEY_HNSW_AREA, false); - -+#ifdef NEON_SMGR -+ smgr_start_unlogged_build(RelationGetSmgr(indexRel)); -+#endif -+ - /* Perform inserts */ - HnswParallelScanAndInsert(heapRel, indexRel, hnswshared, hnswarea, false); - -+#ifdef NEON_SMGR -+ smgr_finish_unlogged_build_phase_1(RelationGetSmgr(indexRel)); -+#endif -+ - /* Close relations within worker */ - index_close(indexRel, indexLockmode); - table_close(heapRel, heapLockmode); -@@ -1089,13 +1097,41 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo, - SeedRandom(42); - #endif - -+#ifdef NEON_SMGR -+ smgr_start_unlogged_build(RelationGetSmgr(index)); -+#endif -+ - InitBuildState(buildstate, heap, index, indexInfo, forkNum); - - BuildGraph(buildstate, forkNum); - -+#ifdef NEON_SMGR -+ smgr_finish_unlogged_build_phase_1(RelationGetSmgr(index)); -+#endif -+ - if (RelationNeedsWAL(index)) -+ { - log_newpage_range(index, forkNum, 0, RelationGetNumberOfBlocks(index), true); - -+#ifdef NEON_SMGR -+ { -+#if PG_VERSION_NUM >= 160000 -+ RelFileLocator rlocator = RelationGetSmgr(index)->smgr_rlocator.locator; -+#else -+ RelFileNode rlocator = RelationGetSmgr(index)->smgr_rnode.node; -+#endif -+ -+ SetLastWrittenLSNForBlockRange(XactLastRecEnd, rlocator, -+ MAIN_FORKNUM, 0, RelationGetNumberOfBlocks(index)); -+ SetLastWrittenLSNForRelation(XactLastRecEnd, rlocator, MAIN_FORKNUM); -+ } -+#endif -+ } -+ -+#ifdef NEON_SMGR -+ smgr_end_unlogged_build(RelationGetSmgr(index)); -+#endif -+ - FreeBuildState(buildstate); - } - --- -2.39.2 - diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index f54c86702f..63e8b8dc1f 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -45,7 +45,6 @@ */ #include "postgres.h" -#include "access/parallel.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xlogdefs.h" @@ -2713,14 +2712,10 @@ neon_start_unlogged_build(SMgrRelation reln) reln->smgr_relpersistence = RELPERSISTENCE_UNLOGGED; /* - * Create the local file. In a parallel build, the leader is expected to - * call this first and do it. - * * FIXME: should we pass isRedo true to create the tablespace dir if it * doesn't exist? Is it needed? */ - if (!IsParallelWorker()) - mdcreate(reln, MAIN_FORKNUM, false); + mdcreate(reln, MAIN_FORKNUM, false); } /* @@ -2744,17 +2739,7 @@ neon_finish_unlogged_build_phase_1(SMgrRelation reln) Assert(unlogged_build_phase == UNLOGGED_BUILD_PHASE_1); Assert(reln->smgr_relpersistence == RELPERSISTENCE_UNLOGGED); - /* - * In a parallel build, (only) the leader process performs the 2nd - * phase. - */ - if (IsParallelWorker()) - { - unlogged_build_rel = NULL; - unlogged_build_phase = UNLOGGED_BUILD_NOT_IN_PROGRESS; - } - else - unlogged_build_phase = UNLOGGED_BUILD_PHASE_2; + unlogged_build_phase = UNLOGGED_BUILD_PHASE_2; } /* From 70f646ffe2fe9829316f1ed02a5a1529bc296fd6 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Mon, 5 Feb 2024 09:34:03 +0200 Subject: [PATCH 15/34] More logging fixes (#6584) I was on-call this week, these would had made me understand more/faster of the system: - move stray attaching start logging inside the span it starts, add generation - log ancestor timeline_id or bootstrapping in the beginning of timeline creation --- pageserver/src/http/routes.rs | 6 +++++ pageserver/src/tenant.rs | 28 ++++++++++------------- pageserver/src/tenant/config.rs | 4 ++-- pageserver/src/tenant/mgr.rs | 7 ------ test_runner/regress/test_timeline_size.py | 2 +- 5 files changed, 21 insertions(+), 26 deletions(-) diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 5735489742..b97e272c86 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -489,6 +489,12 @@ async fn timeline_create_handler( tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?; + if let Some(ancestor_id) = request_data.ancestor_timeline_id.as_ref() { + tracing::info!(%ancestor_id, "starting to branch"); + } else { + tracing::info!("bootstrapping"); + } + match tenant.create_timeline( new_timeline_id, request_data.ancestor_timeline_id.map(TimelineId::from), diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 58af80238d..dd4f9107f9 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -205,7 +205,7 @@ impl AttachedTenantConf { match &location_conf.mode { LocationMode::Attached(attach_conf) => Ok(Self { tenant_conf: location_conf.tenant_conf, - location: attach_conf.clone(), + location: *attach_conf, }), LocationMode::Secondary(_) => { anyhow::bail!("Attempted to construct AttachedTenantConf from a LocationConf in secondary mode") @@ -625,6 +625,9 @@ impl Tenant { deletion_queue_client, } = resources; + let attach_mode = attached_conf.location.attach_mode; + let generation = attached_conf.location.generation; + let tenant = Arc::new(Tenant::new( TenantState::Attaching, conf, @@ -654,6 +657,12 @@ impl Tenant { "attach tenant", false, async move { + + info!( + ?attach_mode, + "Attaching tenant" + ); + let _gate_guard = attach_gate_guard; // Is this tenant being spawned as part of process startup? @@ -865,7 +874,7 @@ impl Tenant { Ok(()) } .instrument({ - let span = tracing::info_span!(parent: None, "attach", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()); + let span = tracing::info_span!(parent: None, "attach", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), gen=?generation); span.follows_from(Span::current()); span }), @@ -2354,12 +2363,7 @@ impl Tenant { } pub(crate) fn get_attach_mode(&self) -> AttachmentMode { - self.tenant_conf - .read() - .unwrap() - .location - .attach_mode - .clone() + self.tenant_conf.read().unwrap().location.attach_mode } /// For API access: generate a LocationConfig equivalent to the one that would be used to @@ -3225,8 +3229,6 @@ impl Tenant { .context("branch initial metadata upload")?; } - info!("branched timeline {dst_id} from {src_id} at {start_lsn}"); - Ok(new_timeline) } @@ -3444,12 +3446,6 @@ impl Tenant { // All done! let timeline = raw_timeline.finish_creation()?; - info!( - "created root timeline {} timeline.lsn {}", - timeline_id, - timeline.get_last_record_lsn() - ); - Ok(timeline) } diff --git a/pageserver/src/tenant/config.rs b/pageserver/src/tenant/config.rs index 63bd56cf5f..563887088d 100644 --- a/pageserver/src/tenant/config.rs +++ b/pageserver/src/tenant/config.rs @@ -51,7 +51,7 @@ pub mod defaults { pub const DEFAULT_INGEST_BATCH_SIZE: u64 = 100; } -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)] pub(crate) enum AttachmentMode { /// Our generation is current as far as we know, and as far as we know we are the only attached /// pageserver. This is the "normal" attachment mode. @@ -66,7 +66,7 @@ pub(crate) enum AttachmentMode { Stale, } -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)] pub(crate) struct AttachedLocationConfig { pub(crate) generation: Generation, pub(crate) attach_mode: AttachmentMode, diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 64fd709386..de0b636d47 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -607,13 +607,6 @@ pub(crate) fn tenant_spawn( "Cannot load tenant, ignore mark found at {tenant_ignore_mark:?}" ); - info!( - tenant_id = %tenant_shard_id.tenant_id, - shard_id = %tenant_shard_id.shard_slug(), - generation = ?location_conf.location.generation, - attach_mode = ?location_conf.location.attach_mode, - "Attaching tenant" - ); let tenant = match Tenant::spawn( conf, tenant_shard_id, diff --git a/test_runner/regress/test_timeline_size.py b/test_runner/regress/test_timeline_size.py index 303aabb58d..cd7203bba6 100644 --- a/test_runner/regress/test_timeline_size.py +++ b/test_runner/regress/test_timeline_size.py @@ -883,7 +883,7 @@ def test_ondemand_activation(neon_env_builder: NeonEnvBuilder): # Deletion itself won't complete due to our failpoint: Tenant::shutdown can't complete while calculating # logical size is paused in a failpoint. So instead we will use a log observation to check that # on-demand activation was triggered by the tenant deletion - log_match = f".*attach{{tenant_id={delete_tenant_id} shard_id=0000}}: Activating tenant \\(on-demand\\).*" + log_match = f".*attach{{tenant_id={delete_tenant_id} shard_id=0000 gen=[0-9a-f]+}}: Activating tenant \\(on-demand\\).*" def activated_on_demand(): assert env.pageserver.log_contains(log_match) is not None From df7bee7cfaba8f2129fd9ea88976da5d079684a5 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Sat, 3 Feb 2024 00:02:33 +0200 Subject: [PATCH 16/34] Fix compilation with recent glibc headers with close_range(2). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I was getting an error: /home/heikki/git-sandbox/neon//pgxn/neon_walredo/walredoproc.c:161:5: error: conflicting types for ‘close_range’; have ‘int(unsigned int, unsigned int, unsigned int)’ 161 | int close_range(unsigned int start_fd, unsigned int count, unsigned int flags) { | ^~~~~~~~~~~ In file included from /usr/include/x86_64-linux-gnu/bits/sigstksz.h:24, from /usr/include/signal.h:328, from /home/heikki/git-sandbox/neon//pgxn/neon_walredo/walredoproc.c:50: /usr/include/unistd.h:1208:12: note: previous declaration of ‘close_range’ with type ‘int(unsigned int, unsigned int, int)’ 1208 | extern int close_range (unsigned int __fd, unsigned int __max_fd, | ^~~~~~~~~~~ The discrepancy is in the 3rd argument. Apparently in the glibc wrapper it's signed. As a quick fix, rename our close_range() function, the one that calls syscall() directly, to avoid the clash with the glibc wrapper. In the long term, an autoconf test would be nice, and some equivalent on macOS, see issue #6580. --- pgxn/neon_walredo/walredoproc.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pgxn/neon_walredo/walredoproc.c b/pgxn/neon_walredo/walredoproc.c index 6ca0b2a274..1fdd3801c6 100644 --- a/pgxn/neon_walredo/walredoproc.c +++ b/pgxn/neon_walredo/walredoproc.c @@ -158,7 +158,10 @@ static XLogReaderState *reader_state; #include #include #include -int close_range(unsigned int start_fd, unsigned int count, unsigned int flags) { + +static int +close_range_syscall(unsigned int start_fd, unsigned int count, unsigned int flags) +{ return syscall(__NR_close_range, start_fd, count, flags); } @@ -172,7 +175,7 @@ enter_seccomp_mode(void) * wal records. See the comment in the Rust code that launches this process. */ int err; - if (err = close_range(3, ~0U, 0)) { + if (err = close_range_syscall(3, ~0U, 0)) { ereport(FATAL, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("seccomp: could not close files >= fd 3"))); } From 56cf3604395125b9283ba643cfbb98efd926ff49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Mon, 5 Feb 2024 10:53:37 +0100 Subject: [PATCH 17/34] Don't preserve temp files on creation errors of delta layers (#6612) There is currently no cleanup done after a delta layer creation error, so delta layers can accumulate. The problem gets worse as the operation gets retried and delta layers accumulate on the disk. Therefore, delete them from disk (if something has been written to disk). --- pageserver/src/tenant/storage_layer/delta_layer.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index ec031d6089..2a51884c0b 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -609,7 +609,19 @@ impl DeltaLayerWriter { key_end: Key, timeline: &Arc, ) -> anyhow::Result { - self.inner.take().unwrap().finish(key_end, timeline).await + let inner = self.inner.take().unwrap(); + let temp_path = inner.path.clone(); + let result = inner.finish(key_end, timeline).await; + // The delta layer files can sometimes be really large. Clean them up. + if result.is_err() { + tracing::warn!( + "Cleaning up temporary delta file {temp_path} after error during writing" + ); + if let Err(e) = std::fs::remove_file(&temp_path) { + tracing::warn!("Error cleaning up temporary delta layer file {temp_path}: {e:?}") + } + } + result } } From 01c57ec547cb701f2253c8c445931644cc9f60b9 Mon Sep 17 00:00:00 2001 From: Abhijeet Patil Date: Mon, 5 Feb 2024 10:08:20 +0000 Subject: [PATCH 18/34] Removed Uploading of perf result to git repo 'zenith-perf-data' (#6590) ## Problem We were archiving the pref benchmarks to - neon DB - git repo `zenith-perf-data` As the pref batch ran in parallel when the uploading of results to zenith-perf-data` git repo resulted in merge conflicts. Which made the run flaky and as a side effect the build started failing . The problem is been expressed in https://github.com/neondatabase/neon/issues/5160 ## Summary of changes As the results were not used from the git repo it was redundant hence in this PR cleaning up the results uploading of of perf results to git repo The shell script `generate_and_push_perf_report.sh` was using a py script [git-upload](https://github.com/neondatabase/neon/compare/remove-perf-benchmark-git-upload?expand=1#diff-c6d938e7f060e487367d9dc8055245c82b51a73c1f97956111a495a8a86e9a33) and [scripts/generate_perf_report_page.py](https://github.com/neondatabase/neon/pull/6590/files#diff-81af2147e72d07e4cf8ee4395632596d805d6168ba75c71cab58db2659956ef8) which are not used anywhere else in repo hence also cleaning that up ## Checklist before requesting a review - [ ] I have performed a self-review of my code. - [ ] If it is a core feature, I have added thorough tests. - [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard? - [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section. ## Checklist before merging - [ ] Do not forget to reformat the commit message to not include the above checklist --- scripts/generate_and_push_perf_report.sh | 14 -- scripts/generate_perf_report_page.py | 219 ----------------------- scripts/git-upload | 170 ------------------ 3 files changed, 403 deletions(-) delete mode 100755 scripts/generate_perf_report_page.py delete mode 100755 scripts/git-upload diff --git a/scripts/generate_and_push_perf_report.sh b/scripts/generate_and_push_perf_report.sh index 9e03302b0f..178c570b13 100755 --- a/scripts/generate_and_push_perf_report.sh +++ b/scripts/generate_and_push_perf_report.sh @@ -8,17 +8,3 @@ SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) echo "Uploading perf report to neon pg" # ingest per test results data into neon backed postgres running in staging to build grafana reports on that data DATABASE_URL="$PERF_TEST_RESULT_CONNSTR" poetry run python "$SCRIPT_DIR"/ingest_perf_test_result.py --ingest "$REPORT_FROM" - -# Activate poetry's venv. Needed because git upload does not run in a project dir (it uses tmp to store the repository) -# so the problem occurs because poetry cannot find pyproject.toml in temp dir created by git upload -# shellcheck source=/dev/null -. "$(poetry env info --path)"/bin/activate - -echo "Uploading perf result to zenith-perf-data" -scripts/git-upload \ - --repo=https://"$VIP_VAP_ACCESS_TOKEN"@github.com/neondatabase/zenith-perf-data.git \ - --message="add performance test result for $GITHUB_SHA neon revision" \ - --branch=master \ - copy "$REPORT_FROM" "data/$REPORT_TO" `# COPY FROM TO_RELATIVE`\ - --merge \ - --run-cmd "python $SCRIPT_DIR/generate_perf_report_page.py --input-dir data/$REPORT_TO --out reports/$REPORT_TO.html" diff --git a/scripts/generate_perf_report_page.py b/scripts/generate_perf_report_page.py deleted file mode 100755 index b5b49bb600..0000000000 --- a/scripts/generate_perf_report_page.py +++ /dev/null @@ -1,219 +0,0 @@ -#!/usr/bin/env python3 -import argparse -import json -from dataclasses import dataclass -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, cast - -from jinja2 import Template - -# skip 'input' columns. They are included in the header and just blow the table -EXCLUDE_COLUMNS = frozenset( - { - "scale", - "duration", - "number_of_clients", - "number_of_threads", - "init_start_timestamp", - "init_end_timestamp", - "run_start_timestamp", - "run_end_timestamp", - } -) - -KEY_EXCLUDE_FIELDS = frozenset( - { - "init_start_timestamp", - "init_end_timestamp", - "run_start_timestamp", - "run_end_timestamp", - } -) -NEGATIVE_COLOR = "negative" -POSITIVE_COLOR = "positive" -EPS = 1e-6 - - -@dataclass -class SuitRun: - revision: str - values: Dict[str, Any] - - -@dataclass -class SuitRuns: - platform: str - suit: str - common_columns: List[Tuple[str, str]] - value_columns: List[str] - runs: List[SuitRun] - - -@dataclass -class RowValue: - value: str - color: str - ratio: str - - -def get_columns(values: List[Dict[Any, Any]]) -> Tuple[List[Tuple[str, str]], List[str]]: - value_columns = [] - common_columns = [] - for item in values: - if item["name"] in KEY_EXCLUDE_FIELDS: - continue - if item["report"] != "test_param": - value_columns.append(cast(str, item["name"])) - else: - common_columns.append((cast(str, item["name"]), cast(str, item["value"]))) - value_columns.sort() - common_columns.sort(key=lambda x: x[0]) # sort by name - return common_columns, value_columns - - -def format_ratio(ratio: float, report: str) -> Tuple[str, str]: - color = "" - sign = "+" if ratio > 0 else "" - if abs(ratio) < 0.05: - return f" ({sign}{ratio:.2f})", color - - if report not in {"test_param", "higher_is_better", "lower_is_better"}: - raise ValueError(f"Unknown report type: {report}") - - if report == "test_param": - return f"{ratio:.2f}", color - - if ratio > 0: - if report == "higher_is_better": - color = POSITIVE_COLOR - elif report == "lower_is_better": - color = NEGATIVE_COLOR - elif ratio < 0: - if report == "higher_is_better": - color = NEGATIVE_COLOR - elif report == "lower_is_better": - color = POSITIVE_COLOR - - return f" ({sign}{ratio:.2f})", color - - -def extract_value(name: str, suit_run: SuitRun) -> Optional[Dict[str, Any]]: - for item in suit_run.values["data"]: - if item["name"] == name: - return cast(Dict[str, Any], item) - return None - - -def get_row_values( - columns: List[str], run_result: SuitRun, prev_result: Optional[SuitRun] -) -> List[RowValue]: - row_values = [] - for column in columns: - current_value = extract_value(column, run_result) - if current_value is None: - # should never happen - raise ValueError(f"{column} not found in {run_result.values}") - - value = current_value["value"] - if isinstance(value, float): - value = f"{value:.2f}" - - if prev_result is None: - row_values.append(RowValue(value, "", "")) - continue - - prev_value = extract_value(column, prev_result) - if prev_value is None: - # this might happen when new metric is added and there is no value for it in previous run - # let this be here, TODO add proper handling when this actually happens - raise ValueError(f"{column} not found in previous result") - # adding `EPS` to each term to avoid ZeroDivisionError when the denominator is zero - ratio = (float(value) + EPS) / (float(prev_value["value"]) + EPS) - 1 - ratio_display, color = format_ratio(ratio, current_value["report"]) - row_values.append(RowValue(value, color, ratio_display)) - return row_values - - -@dataclass -class SuiteRunTableRow: - revision: str - values: List[RowValue] - - -def prepare_rows_from_runs(value_columns: List[str], runs: List[SuitRun]) -> List[SuiteRunTableRow]: - rows = [] - prev_run = None - for run in runs: - rows.append( - SuiteRunTableRow( - revision=run.revision, values=get_row_values(value_columns, run, prev_run) - ) - ) - prev_run = run - - return rows - - -def main(args: argparse.Namespace) -> None: - input_dir = Path(args.input_dir) - grouped_runs: Dict[str, SuitRuns] = {} - # we have files in form: _.json - # fill them in the hashmap so we have grouped items for the - # same run configuration (scale, duration etc.) ordered by counter. - for item in sorted(input_dir.iterdir(), key=lambda x: int(x.name.split("_")[0])): - run_data = json.loads(item.read_text()) - revision = run_data["revision"] - - for suit_result in run_data["result"]: - key = "{}{}".format(run_data["platform"], suit_result["suit"]) - # pack total duration as a synthetic value - total_duration = suit_result["total_duration"] - suit_result["data"].append( - { - "name": "total_duration", - "value": total_duration, - "unit": "s", - "report": "lower_is_better", - } - ) - common_columns, value_columns = get_columns(suit_result["data"]) - - grouped_runs.setdefault( - key, - SuitRuns( - platform=run_data["platform"], - suit=suit_result["suit"], - common_columns=common_columns, - value_columns=value_columns, - runs=[], - ), - ) - - grouped_runs[key].runs.append(SuitRun(revision=revision, values=suit_result)) - context = {} - for result in grouped_runs.values(): - suit = result.suit - context[suit] = { - "common_columns": result.common_columns, - "value_columns": result.value_columns, - "platform": result.platform, - # reverse the order so newest results are on top of the table - "rows": reversed(prepare_rows_from_runs(result.value_columns, result.runs)), - } - - template = Template((Path(__file__).parent / "perf_report_template.html").read_text()) - - Path(args.out).write_text(template.render(context=context)) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--input-dir", - dest="input_dir", - required=True, - help="Directory with jsons generated by the test suite", - ) - parser.add_argument("--out", required=True, help="Output html file path") - args = parser.parse_args() - main(args) diff --git a/scripts/git-upload b/scripts/git-upload deleted file mode 100755 index d56c0f8e94..0000000000 --- a/scripts/git-upload +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import os -import shlex -import shutil -import subprocess -import sys -import textwrap -from contextlib import contextmanager -from distutils.dir_util import copy_tree -from pathlib import Path -from tempfile import TemporaryDirectory -from typing import Optional - - -def absolute_path(path): - return Path(path).resolve() - - -def relative_path(path): - path = Path(path) - if path.is_absolute(): - raise Exception(f'path `{path}` must be relative!') - return path - - -@contextmanager -def chdir(cwd: Path): - old = os.getcwd() - os.chdir(cwd) - try: - yield cwd - finally: - os.chdir(old) - - -def run(cmd, *args, **kwargs): - print('$', ' '.join(cmd)) - subprocess.check_call(cmd, *args, **kwargs) - - -class GitRepo: - def __init__(self, url, branch: Optional[str] = None): - self.url = url - self.cwd = TemporaryDirectory() - self.branch = branch - - args = [ - 'git', - 'clone', - '--single-branch', - ] - if self.branch: - args.extend(['--branch', self.branch]) - - subprocess.check_call([ - *args, - str(url), - self.cwd.name, - ]) - - def is_dirty(self): - res = subprocess.check_output(['git', 'status', '--porcelain'], text=True).strip() - return bool(res) - - def update(self, message, action, branch=None): - with chdir(self.cwd.name): - if not branch: - cmd = ['git', 'branch', '--show-current'] - branch = subprocess.check_output(cmd, text=True).strip() - - # Run action in repo's directory - action() - - run(['git', 'add', '.']) - - if not self.is_dirty(): - print('No changes detected, quitting') - return - - git_with_user = [ - 'git', - '-c', - 'user.name=vipvap', - '-c', - 'user.email=vipvap@zenith.tech', - ] - run(git_with_user + [ - 'commit', - '--author="vipvap "', - f'--message={message}', - ]) - - for _ in range(5): - try: - run(['git', 'fetch', 'origin', branch]) - run(git_with_user + ['rebase', f'origin/{branch}']) - run(['git', 'push', 'origin', branch]) - return - - except subprocess.CalledProcessError as e: - print(f'failed to update branch `{branch}`: {e}', file=sys.stderr) - - raise Exception(f'failed to update branch `{branch}`') - - -def do_copy(args): - src = args.src - dst = args.dst - - if args.forbid_overwrite and dst.exists(): - raise FileExistsError(f"File exists: '{dst}'") - - if src.is_dir(): - if not args.merge: - shutil.rmtree(dst, ignore_errors=True) - # distutils is deprecated, but this is a temporary workaround before python version bump - # here we need dir_exists_ok=True from shutil.copytree which is available in python 3.8+ - copy_tree(str(src), str(dst)) - else: - shutil.copy(src, dst) - - if args.run_cmd: - run(shlex.split(args.run_cmd)) - - -def main(): - parser = argparse.ArgumentParser(description='Git upload tool') - parser.add_argument('--repo', type=str, metavar='URL', required=True, help='git repo url') - parser.add_argument('--message', type=str, metavar='TEXT', help='commit message') - parser.add_argument('--branch', type=str, metavar='TEXT', help='target git repo branch') - - commands = parser.add_subparsers(title='commands', dest='subparser_name') - - p_copy = commands.add_parser( - 'copy', - help='copy file into the repo', - formatter_class=argparse.RawTextHelpFormatter, - ) - p_copy.add_argument('src', type=absolute_path, help='source path') - p_copy.add_argument('dst', type=relative_path, help='relative dest path') - p_copy.add_argument('--forbid-overwrite', action='store_true', help='do not allow overwrites') - p_copy.add_argument( - '--merge', - action='store_true', - help='when copying a directory do not delete existing data, but add new files') - p_copy.add_argument('--run-cmd', - help=textwrap.dedent('''\ - run arbitrary cmd on top of copied files, - example usage is static content generation - based on current repository state\ - ''')) - - args = parser.parse_args() - - commands = { - 'copy': do_copy, - } - - action = commands.get(args.subparser_name) - if action: - message = args.message or 'update' - GitRepo(args.repo, args.branch).update(message, lambda: action(args)) - else: - parser.print_usage() - - -if __name__ == '__main__': - main() From db89b13aaa45266227b89884490c11e10abb8054 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Mon, 5 Feb 2024 14:10:08 +0200 Subject: [PATCH 19/34] fix: use the shared constant download buffer size (#6620) Noticed that we had forgotten to use `remote_timeline_client.rs::BUFFER_SIZE` in one instance. --- pageserver/src/tenant/remote_timeline_client/download.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pageserver/src/tenant/remote_timeline_client/download.rs b/pageserver/src/tenant/remote_timeline_client/download.rs index 4309c683e2..b84b5ca33b 100644 --- a/pageserver/src/tenant/remote_timeline_client/download.rs +++ b/pageserver/src/tenant/remote_timeline_client/download.rs @@ -471,7 +471,7 @@ pub(crate) async fn download_initdb_tar_zst( Err(other) => Err(other)?, }; let mut download = tokio_util::io::StreamReader::new(download.download_stream); - let mut writer = tokio::io::BufWriter::with_capacity(8 * 1024, file); + let mut writer = tokio::io::BufWriter::with_capacity(super::BUFFER_SIZE, file); // TODO: this consumption of the response body should be subject to timeout + cancellation, but // not without thinking carefully about how to recover safely from cancelling a write to From 5e8deca26862f190e6f38b31ccea5f0a22c36c69 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Mon, 5 Feb 2024 14:49:35 +0200 Subject: [PATCH 20/34] metrics: remove broken tenants (#6586) Before tenant migration it made sense to leak broken tenants in the metrics until restart. Nowdays it makes less sense because on cancellations we set the tenant broken. The set metric still allows filterable alerting. Fixes: #6507 --- pageserver/src/tenant.rs | 45 +++++++++++------------ test_runner/fixtures/metrics.py | 2 +- test_runner/regress/test_tenant_detach.py | 39 +++++--------------- 3 files changed, 32 insertions(+), 54 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index dd4f9107f9..b801347c06 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -67,7 +67,9 @@ use crate::deletion_queue::DeletionQueueError; use crate::import_datadir; use crate::is_uninit_mark; use crate::metrics::TENANT; -use crate::metrics::{remove_tenant_metrics, TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC}; +use crate::metrics::{ + remove_tenant_metrics, BROKEN_TENANTS_SET, TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC, +}; use crate::repository::GcResult; use crate::task_mgr; use crate::task_mgr::TaskKind; @@ -2637,9 +2639,16 @@ impl Tenant { let (state, mut rx) = watch::channel(state); tokio::spawn(async move { - // Strings for metric labels + // reflect tenant state in metrics: + // - global per tenant state: TENANT_STATE_METRIC + // - "set" of broken tenants: BROKEN_TENANTS_SET + // + // set of broken tenants should not have zero counts so that it remains accessible for + // alerting. + let tid = tenant_shard_id.to_string(); - let shard_id_str = format!("{}", tenant_shard_id.shard_slug()); + let shard_id = tenant_shard_id.shard_slug().to_string(); + let set_key = &[tid.as_str(), shard_id.as_str()][..]; fn inspect_state(state: &TenantState) -> ([&'static str; 1], bool) { ([state.into()], matches!(state, TenantState::Broken { .. })) @@ -2648,21 +2657,13 @@ impl Tenant { let mut tuple = inspect_state(&rx.borrow_and_update()); let is_broken = tuple.1; - let mut counted_broken = if !is_broken { - // the tenant might be ignored and reloaded, so first remove any previous set - // element. it most likely has already been scraped, as these are manual operations - // right now. most likely we will add it back very soon. - drop( - crate::metrics::BROKEN_TENANTS_SET.remove_label_values(&[&tid, &shard_id_str]), - ); - false - } else { + let mut counted_broken = if is_broken { // add the id to the set right away, there should not be any updates on the channel - // after - crate::metrics::BROKEN_TENANTS_SET - .with_label_values(&[&tid, &shard_id_str]) - .set(1); + // after before tenant is removed, if ever + BROKEN_TENANTS_SET.with_label_values(set_key).set(1); true + } else { + false }; loop { @@ -2671,10 +2672,9 @@ impl Tenant { current.inc(); if rx.changed().await.is_err() { - // tenant has been dropped; decrement the counter because a tenant with that - // state is no longer in tenant map, but allow any broken set item to exist - // still. + // tenant has been dropped current.dec(); + drop(BROKEN_TENANTS_SET.remove_label_values(set_key)); break; } @@ -2684,10 +2684,9 @@ impl Tenant { let is_broken = tuple.1; if is_broken && !counted_broken { counted_broken = true; - // insert the tenant_id (back) into the set - crate::metrics::BROKEN_TENANTS_SET - .with_label_values(&[&tid, &shard_id_str]) - .inc(); + // insert the tenant_id (back) into the set while avoiding needless counter + // access + BROKEN_TENANTS_SET.with_label_values(set_key).set(1); } } }); diff --git a/test_runner/fixtures/metrics.py b/test_runner/fixtures/metrics.py index 7c489bda67..ef41774289 100644 --- a/test_runner/fixtures/metrics.py +++ b/test_runner/fixtures/metrics.py @@ -96,5 +96,5 @@ PAGESERVER_PER_TENANT_METRICS: Tuple[str, ...] = ( "pageserver_evictions_total", "pageserver_evictions_with_low_residence_duration_total", *PAGESERVER_PER_TENANT_REMOTE_TIMELINE_CLIENT_METRICS, - # pageserver_broken_tenants_count is a leaked "metric" which is "cleared" on restart or reload + # "pageserver_broken_tenants_count" -- used only for broken ) diff --git a/test_runner/regress/test_tenant_detach.py b/test_runner/regress/test_tenant_detach.py index 8d5ef4e3c4..4752699abb 100644 --- a/test_runner/regress/test_tenant_detach.py +++ b/test_runner/regress/test_tenant_detach.py @@ -742,8 +742,6 @@ def ensure_test_data(data_id: int, data: str, endpoint: Endpoint): def test_metrics_while_ignoring_broken_tenant_and_reloading( neon_env_builder: NeonEnvBuilder, ): - neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS) - env = neon_env_builder.init_start() client = env.pageserver.http_client() @@ -761,56 +759,37 @@ def test_metrics_while_ignoring_broken_tenant_and_reloading( client.tenant_break(env.initial_tenant) - found_broken = False - active, broken, broken_set = ([], [], []) - for _ in range(10): + def found_broken(): m = client.get_metrics() active = m.query_all("pageserver_tenant_states_count", {"state": "Active"}) broken = m.query_all("pageserver_tenant_states_count", {"state": "Broken"}) broken_set = m.query_all( "pageserver_broken_tenants_count", {"tenant_id": str(env.initial_tenant)} ) - found_broken = only_int(active) == 0 and only_int(broken) == 1 and only_int(broken_set) == 1 + assert only_int(active) == 0 and only_int(broken) == 1 and only_int(broken_set) == 1 - if found_broken: - break - log.info(f"active: {active}, broken: {broken}, broken_set: {broken_set}") - time.sleep(0.5) - assert ( - found_broken - ), f"tenant shows up as broken; active={active}, broken={broken}, broken_set={broken_set}" + wait_until(10, 0.5, found_broken) client.tenant_ignore(env.initial_tenant) - found_broken = False - broken, broken_set = ([], []) - for _ in range(10): + def found_cleaned_up(): m = client.get_metrics() broken = m.query_all("pageserver_tenant_states_count", {"state": "Broken"}) broken_set = m.query_all( "pageserver_broken_tenants_count", {"tenant_id": str(env.initial_tenant)} ) - found_broken = only_int(broken) == 0 and only_int(broken_set) == 1 + assert only_int(broken) == 0 and len(broken_set) == 0 - if found_broken: - break - time.sleep(0.5) - assert found_broken, f"broken should still be in set, but it is not in the tenant state count: broken={broken}, broken_set={broken_set}" + wait_until(10, 0.5, found_cleaned_up) env.pageserver.tenant_load(env.initial_tenant) - found_active = False - active, broken_set = ([], []) - for _ in range(10): + def found_active(): m = client.get_metrics() active = m.query_all("pageserver_tenant_states_count", {"state": "Active"}) broken_set = m.query_all( "pageserver_broken_tenants_count", {"tenant_id": str(env.initial_tenant)} ) - found_active = only_int(active) == 1 and len(broken_set) == 0 + assert only_int(active) == 1 and len(broken_set) == 0 - if found_active: - break - time.sleep(0.5) - - assert found_active, f"reloaded tenant should be active, and broken tenant set item removed: active={active}, broken_set={broken_set}" + wait_until(10, 0.5, found_active) From 74c5e3d9b877ae006c0c5c4b4ea176ed36f647c1 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Mon, 5 Feb 2024 14:27:25 +0000 Subject: [PATCH 21/34] use string interner for project cache (#6578) ## Problem Running some memory profiling with high concurrent request rate shows seemingly some memory fragmentation. ## Summary of changes Eventually, we will want to separate global memory (caches) from local memory (per connection handshake and per passthrough). Using a string interner for project info cache helps reduce some of the fragmentation of the global cache by having a single heap dedicated to project strings, and not scattering them throughout all a requests. At the same time, the interned key is 4 bytes vs the 24 bytes that `SmolStr` offers. Important: we should only store verified strings in the interner because there's no way to remove them afterwards. Good for caching responses from console. --- Cargo.lock | 13 ++ Cargo.toml | 1 + proxy/Cargo.toml | 2 + proxy/src/cache/project_info.rs | 84 ++++++----- proxy/src/intern.rs | 237 +++++++++++++++++++++++++++++++ proxy/src/lib.rs | 1 + proxy/src/redis/notifications.rs | 31 ++-- workspace_hack/Cargo.toml | 5 +- 8 files changed, 321 insertions(+), 53 deletions(-) create mode 100644 proxy/src/intern.rs diff --git a/Cargo.lock b/Cargo.lock index 02450709d1..c16331636a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2718,6 +2718,16 @@ dependencies = [ "libc", ] +[[package]] +name = "lasso" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4644821e1c3d7a560fe13d842d13f587c07348a1a05d3a797152d41c90c56df2" +dependencies = [ + "dashmap", + "hashbrown 0.13.2", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -4075,6 +4085,7 @@ dependencies = [ "hyper-tungstenite", "ipnet", "itertools", + "lasso", "md5", "metrics", "native-tls", @@ -4091,6 +4102,7 @@ dependencies = [ "pq_proto", "prometheus", "rand 0.8.5", + "rand_distr", "rcgen", "redis", "regex", @@ -6803,6 +6815,7 @@ dependencies = [ "futures-sink", "futures-util", "getrandom 0.2.11", + "hashbrown 0.13.2", "hashbrown 0.14.0", "hex", "hmac", diff --git a/Cargo.toml b/Cargo.toml index 0cfe522ff9..271edee742 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -95,6 +95,7 @@ inotify = "0.10.2" ipnet = "2.9.0" itertools = "0.10" jsonwebtoken = "9" +lasso = "0.7" libc = "0.2" md5 = "0.7.0" memoffset = "0.8" diff --git a/proxy/Cargo.toml b/proxy/Cargo.toml index 79abe639ed..1247f08ee6 100644 --- a/proxy/Cargo.toml +++ b/proxy/Cargo.toml @@ -31,6 +31,7 @@ hyper-tungstenite.workspace = true hyper.workspace = true ipnet.workspace = true itertools.workspace = true +lasso = { workspace = true, features = ["multi-threaded"] } md5.workspace = true metrics.workspace = true once_cell.workspace = true @@ -92,3 +93,4 @@ rcgen.workspace = true rstest.workspace = true tokio-postgres-rustls.workspace = true walkdir.workspace = true +rand_distr = "0.4" diff --git a/proxy/src/cache/project_info.rs b/proxy/src/cache/project_info.rs index 6f37868a8c..62015312a9 100644 --- a/proxy/src/cache/project_info.rs +++ b/proxy/src/cache/project_info.rs @@ -12,15 +12,18 @@ use tokio::time::Instant; use tracing::{debug, info}; use crate::{ - auth::IpPattern, config::ProjectInfoCacheOptions, console::AuthSecret, EndpointId, ProjectId, - RoleName, + auth::IpPattern, + config::ProjectInfoCacheOptions, + console::AuthSecret, + intern::{EndpointIdInt, ProjectIdInt, RoleNameInt}, + EndpointId, ProjectId, RoleName, }; use super::{Cache, Cached}; pub trait ProjectInfoCache { - fn invalidate_allowed_ips_for_project(&self, project_id: &ProjectId); - fn invalidate_role_secret_for_project(&self, project_id: &ProjectId, role_name: &RoleName); + fn invalidate_allowed_ips_for_project(&self, project_id: ProjectIdInt); + fn invalidate_role_secret_for_project(&self, project_id: ProjectIdInt, role_name: RoleNameInt); fn enable_ttl(&self); fn disable_ttl(&self); } @@ -47,7 +50,7 @@ impl From for Entry { #[derive(Default)] struct EndpointInfo { - secret: std::collections::HashMap>>, + secret: std::collections::HashMap>>, allowed_ips: Option>>>, } @@ -60,11 +63,11 @@ impl EndpointInfo { } pub fn get_role_secret( &self, - role_name: &RoleName, + role_name: RoleNameInt, valid_since: Instant, ignore_cache_since: Option, ) -> Option<(Option, bool)> { - if let Some(secret) = self.secret.get(role_name) { + if let Some(secret) = self.secret.get(&role_name) { if valid_since < secret.created_at { return Some(( secret.value.clone(), @@ -93,8 +96,8 @@ impl EndpointInfo { pub fn invalidate_allowed_ips(&mut self) { self.allowed_ips = None; } - pub fn invalidate_role_secret(&mut self, role_name: &RoleName) { - self.secret.remove(role_name); + pub fn invalidate_role_secret(&mut self, role_name: RoleNameInt) { + self.secret.remove(&role_name); } } @@ -106,9 +109,9 @@ impl EndpointInfo { /// One may ask, why the data is stored per project, when on the user request there is only data about the endpoint available? /// On the cplane side updates are done per project (or per branch), so it's easier to invalidate the whole project cache. pub struct ProjectInfoCacheImpl { - cache: DashMap, + cache: DashMap, - project2ep: DashMap>, + project2ep: DashMap>, config: ProjectInfoCacheOptions, start_time: Instant, @@ -116,11 +119,11 @@ pub struct ProjectInfoCacheImpl { } impl ProjectInfoCache for ProjectInfoCacheImpl { - fn invalidate_allowed_ips_for_project(&self, project_id: &ProjectId) { + fn invalidate_allowed_ips_for_project(&self, project_id: ProjectIdInt) { info!("invalidating allowed ips for project `{}`", project_id); let endpoints = self .project2ep - .get(project_id) + .get(&project_id) .map(|kv| kv.value().clone()) .unwrap_or_default(); for endpoint_id in endpoints { @@ -129,14 +132,14 @@ impl ProjectInfoCache for ProjectInfoCacheImpl { } } } - fn invalidate_role_secret_for_project(&self, project_id: &ProjectId, role_name: &RoleName) { + fn invalidate_role_secret_for_project(&self, project_id: ProjectIdInt, role_name: RoleNameInt) { info!( "invalidating role secret for project_id `{}` and role_name `{}`", - project_id, role_name + project_id, role_name, ); let endpoints = self .project2ep - .get(project_id) + .get(&project_id) .map(|kv| kv.value().clone()) .unwrap_or_default(); for endpoint_id in endpoints { @@ -173,15 +176,17 @@ impl ProjectInfoCacheImpl { endpoint_id: &EndpointId, role_name: &RoleName, ) -> Option>> { + let endpoint_id = EndpointIdInt::get(endpoint_id)?; + let role_name = RoleNameInt::get(role_name)?; let (valid_since, ignore_cache_since) = self.get_cache_times(); - let endpoint_info = self.cache.get(endpoint_id)?; + let endpoint_info = self.cache.get(&endpoint_id)?; let (value, ignore_cache) = endpoint_info.get_role_secret(role_name, valid_since, ignore_cache_since)?; if !ignore_cache { let cached = Cached { token: Some(( self, - CachedLookupInfo::new_role_secret(endpoint_id.clone(), role_name.clone()), + CachedLookupInfo::new_role_secret(endpoint_id, role_name), )), value, }; @@ -193,13 +198,14 @@ impl ProjectInfoCacheImpl { &self, endpoint_id: &EndpointId, ) -> Option>>> { + let endpoint_id = EndpointIdInt::get(endpoint_id)?; let (valid_since, ignore_cache_since) = self.get_cache_times(); - let endpoint_info = self.cache.get(endpoint_id)?; + let endpoint_info = self.cache.get(&endpoint_id)?; let value = endpoint_info.get_allowed_ips(valid_since, ignore_cache_since); let (value, ignore_cache) = value?; if !ignore_cache { let cached = Cached { - token: Some((self, CachedLookupInfo::new_allowed_ips(endpoint_id.clone()))), + token: Some((self, CachedLookupInfo::new_allowed_ips(endpoint_id))), value, }; return Some(cached); @@ -213,14 +219,17 @@ impl ProjectInfoCacheImpl { role_name: &RoleName, secret: Option, ) { + let project_id = ProjectIdInt::from(project_id); + let endpoint_id = EndpointIdInt::from(endpoint_id); + let role_name = RoleNameInt::from(role_name); if self.cache.len() >= self.config.size { // If there are too many entries, wait until the next gc cycle. return; } - self.inser_project2endpoint(project_id, endpoint_id); - let mut entry = self.cache.entry(endpoint_id.clone()).or_default(); + self.insert_project2endpoint(project_id, endpoint_id); + let mut entry = self.cache.entry(endpoint_id).or_default(); if entry.secret.len() < self.config.max_roles { - entry.secret.insert(role_name.clone(), secret.into()); + entry.secret.insert(role_name, secret.into()); } } pub fn insert_allowed_ips( @@ -229,22 +238,21 @@ impl ProjectInfoCacheImpl { endpoint_id: &EndpointId, allowed_ips: Arc>, ) { + let project_id = ProjectIdInt::from(project_id); + let endpoint_id = EndpointIdInt::from(endpoint_id); if self.cache.len() >= self.config.size { // If there are too many entries, wait until the next gc cycle. return; } - self.inser_project2endpoint(project_id, endpoint_id); - self.cache - .entry(endpoint_id.clone()) - .or_default() - .allowed_ips = Some(allowed_ips.into()); + self.insert_project2endpoint(project_id, endpoint_id); + self.cache.entry(endpoint_id).or_default().allowed_ips = Some(allowed_ips.into()); } - fn inser_project2endpoint(&self, project_id: &ProjectId, endpoint_id: &EndpointId) { - if let Some(mut endpoints) = self.project2ep.get_mut(project_id) { - endpoints.insert(endpoint_id.clone()); + fn insert_project2endpoint(&self, project_id: ProjectIdInt, endpoint_id: EndpointIdInt) { + if let Some(mut endpoints) = self.project2ep.get_mut(&project_id) { + endpoints.insert(endpoint_id); } else { self.project2ep - .insert(project_id.clone(), HashSet::from([endpoint_id.clone()])); + .insert(project_id, HashSet::from([endpoint_id])); } } fn get_cache_times(&self) -> (Instant, Option) { @@ -300,18 +308,18 @@ impl ProjectInfoCacheImpl { /// This is used to invalidate cache entries. pub struct CachedLookupInfo { /// Search by this key. - endpoint_id: EndpointId, + endpoint_id: EndpointIdInt, lookup_type: LookupType, } impl CachedLookupInfo { - pub(self) fn new_role_secret(endpoint_id: EndpointId, role_name: RoleName) -> Self { + pub(self) fn new_role_secret(endpoint_id: EndpointIdInt, role_name: RoleNameInt) -> Self { Self { endpoint_id, lookup_type: LookupType::RoleSecret(role_name), } } - pub(self) fn new_allowed_ips(endpoint_id: EndpointId) -> Self { + pub(self) fn new_allowed_ips(endpoint_id: EndpointIdInt) -> Self { Self { endpoint_id, lookup_type: LookupType::AllowedIps, @@ -320,7 +328,7 @@ impl CachedLookupInfo { } enum LookupType { - RoleSecret(RoleName), + RoleSecret(RoleNameInt), AllowedIps, } @@ -335,7 +343,7 @@ impl Cache for ProjectInfoCacheImpl { match &key.lookup_type { LookupType::RoleSecret(role_name) => { if let Some(mut endpoint_info) = self.cache.get_mut(&key.endpoint_id) { - endpoint_info.invalidate_role_secret(role_name); + endpoint_info.invalidate_role_secret(*role_name); } } LookupType::AllowedIps => { @@ -457,7 +465,7 @@ mod tests { assert_eq!(cached.value, secret2); // The only way to invalidate this value is to invalidate via the api. - cache.invalidate_role_secret_for_project(&project_id, &user2); + cache.invalidate_role_secret_for_project((&project_id).into(), (&user2).into()); assert!(cache.get_role_secret(&endpoint_id, &user2).is_none()); let cached = cache.get_allowed_ips(&endpoint_id).unwrap(); diff --git a/proxy/src/intern.rs b/proxy/src/intern.rs new file mode 100644 index 0000000000..a6519bdff9 --- /dev/null +++ b/proxy/src/intern.rs @@ -0,0 +1,237 @@ +use std::{ + hash::BuildHasherDefault, marker::PhantomData, num::NonZeroUsize, ops::Index, sync::OnceLock, +}; + +use lasso::{Capacity, MemoryLimits, Spur, ThreadedRodeo}; +use rustc_hash::FxHasher; + +use crate::{BranchId, EndpointId, ProjectId, RoleName}; + +pub trait InternId: Sized + 'static { + fn get_interner() -> &'static StringInterner; +} + +pub struct StringInterner { + inner: ThreadedRodeo>, + _id: PhantomData, +} + +#[derive(PartialEq, Debug, Clone, Copy, Eq, Hash)] +pub struct InternedString { + inner: Spur, + _id: PhantomData, +} + +impl std::fmt::Display for InternedString { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.as_str().fmt(f) + } +} + +impl InternedString { + pub fn as_str(&self) -> &'static str { + Id::get_interner().inner.resolve(&self.inner) + } + pub fn get(s: &str) -> Option { + Id::get_interner().get(s) + } +} + +impl AsRef for InternedString { + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl std::ops::Deref for InternedString { + type Target = str; + fn deref(&self) -> &str { + self.as_str() + } +} + +impl<'de, Id: InternId> serde::de::Deserialize<'de> for InternedString { + fn deserialize>(d: D) -> Result { + struct Visitor(PhantomData); + impl<'de, Id: InternId> serde::de::Visitor<'de> for Visitor { + type Value = InternedString; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a string") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + Ok(Id::get_interner().get_or_intern(v)) + } + } + d.deserialize_str(Visitor::(PhantomData)) + } +} + +impl serde::Serialize for InternedString { + fn serialize(&self, s: S) -> Result { + self.as_str().serialize(s) + } +} + +impl StringInterner { + pub fn new() -> Self { + StringInterner { + inner: ThreadedRodeo::with_capacity_memory_limits_and_hasher( + Capacity::new(2500, NonZeroUsize::new(1 << 16).unwrap()), + // unbounded + MemoryLimits::for_memory_usage(usize::MAX), + BuildHasherDefault::::default(), + ), + _id: PhantomData, + } + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn current_memory_usage(&self) -> usize { + self.inner.current_memory_usage() + } + + pub fn get_or_intern(&self, s: &str) -> InternedString { + InternedString { + inner: self.inner.get_or_intern(s), + _id: PhantomData, + } + } + + pub fn get(&self, s: &str) -> Option> { + Some(InternedString { + inner: self.inner.get(s)?, + _id: PhantomData, + }) + } +} + +impl Index> for StringInterner { + type Output = str; + + fn index(&self, index: InternedString) -> &Self::Output { + self.inner.resolve(&index.inner) + } +} + +impl Default for StringInterner { + fn default() -> Self { + Self::new() + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct RoleNameTag; +impl InternId for RoleNameTag { + fn get_interner() -> &'static StringInterner { + pub static ROLE_NAMES: OnceLock> = OnceLock::new(); + ROLE_NAMES.get_or_init(Default::default) + } +} +pub type RoleNameInt = InternedString; +impl From<&RoleName> for RoleNameInt { + fn from(value: &RoleName) -> Self { + RoleNameTag::get_interner().get_or_intern(value) + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct EndpointIdTag; +impl InternId for EndpointIdTag { + fn get_interner() -> &'static StringInterner { + pub static ROLE_NAMES: OnceLock> = OnceLock::new(); + ROLE_NAMES.get_or_init(Default::default) + } +} +pub type EndpointIdInt = InternedString; +impl From<&EndpointId> for EndpointIdInt { + fn from(value: &EndpointId) -> Self { + EndpointIdTag::get_interner().get_or_intern(value) + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct BranchIdTag; +impl InternId for BranchIdTag { + fn get_interner() -> &'static StringInterner { + pub static ROLE_NAMES: OnceLock> = OnceLock::new(); + ROLE_NAMES.get_or_init(Default::default) + } +} +pub type BranchIdInt = InternedString; +impl From<&BranchId> for BranchIdInt { + fn from(value: &BranchId) -> Self { + BranchIdTag::get_interner().get_or_intern(value) + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct ProjectIdTag; +impl InternId for ProjectIdTag { + fn get_interner() -> &'static StringInterner { + pub static ROLE_NAMES: OnceLock> = OnceLock::new(); + ROLE_NAMES.get_or_init(Default::default) + } +} +pub type ProjectIdInt = InternedString; +impl From<&ProjectId> for ProjectIdInt { + fn from(value: &ProjectId) -> Self { + ProjectIdTag::get_interner().get_or_intern(value) + } +} + +#[cfg(test)] +mod tests { + use std::sync::OnceLock; + + use crate::intern::StringInterner; + + use super::InternId; + + struct MyId; + impl InternId for MyId { + fn get_interner() -> &'static StringInterner { + pub static ROLE_NAMES: OnceLock> = OnceLock::new(); + ROLE_NAMES.get_or_init(Default::default) + } + } + + #[test] + fn push_many_strings() { + use rand::{rngs::StdRng, Rng, SeedableRng}; + use rand_distr::Zipf; + + let endpoint_dist = Zipf::new(500000, 0.8).unwrap(); + let endpoints = StdRng::seed_from_u64(272488357).sample_iter(endpoint_dist); + + let interner = MyId::get_interner(); + + const N: usize = 100_000; + let mut verify = Vec::with_capacity(N); + for endpoint in endpoints.take(N) { + let endpoint = format!("ep-string-interning-{endpoint}"); + let key = interner.get_or_intern(&endpoint); + verify.push((endpoint, key)); + } + + for (s, key) in verify { + assert_eq!(interner[key], s); + } + + // 2031616/59861 = 34 bytes per string + assert_eq!(interner.len(), 59_861); + // will have other overhead for the internal hashmaps that are not accounted for. + assert_eq!(interner.current_memory_usage(), 2_031_616); + } +} diff --git a/proxy/src/lib.rs b/proxy/src/lib.rs index db6256d611..da7c7f3ed2 100644 --- a/proxy/src/lib.rs +++ b/proxy/src/lib.rs @@ -16,6 +16,7 @@ pub mod console; pub mod context; pub mod error; pub mod http; +pub mod intern; pub mod jemalloc; pub mod logging; pub mod metrics; diff --git a/proxy/src/redis/notifications.rs b/proxy/src/redis/notifications.rs index 9cd70b109b..158884aa17 100644 --- a/proxy/src/redis/notifications.rs +++ b/proxy/src/redis/notifications.rs @@ -4,7 +4,10 @@ use futures::StreamExt; use redis::aio::PubSub; use serde::Deserialize; -use crate::{cache::project_info::ProjectInfoCache, ProjectId, RoleName}; +use crate::{ + cache::project_info::ProjectInfoCache, + intern::{ProjectIdInt, RoleNameInt}, +}; const CHANNEL_NAME: &str = "neondb-proxy-ws-updates"; const RECONNECT_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(20); @@ -45,12 +48,12 @@ enum Notification { } #[derive(Clone, Debug, Deserialize, Eq, PartialEq)] struct AllowedIpsUpdate { - project_id: ProjectId, + project_id: ProjectIdInt, } #[derive(Clone, Debug, Deserialize, Eq, PartialEq)] struct PasswordUpdate { - project_id: ProjectId, - role_name: RoleName, + project_id: ProjectIdInt, + role_name: RoleNameInt, } fn deserialize_json_string<'de, D, T>(deserializer: D) -> Result where @@ -65,11 +68,11 @@ fn invalidate_cache(cache: Arc, msg: Notification) { use Notification::*; match msg { AllowedIpsUpdate { allowed_ips_update } => { - cache.invalidate_allowed_ips_for_project(&allowed_ips_update.project_id) + cache.invalidate_allowed_ips_for_project(allowed_ips_update.project_id) } PasswordUpdate { password_update } => cache.invalidate_role_secret_for_project( - &password_update.project_id, - &password_update.role_name, + password_update.project_id, + password_update.role_name, ), } } @@ -141,12 +144,14 @@ where #[cfg(test)] mod tests { + use crate::{ProjectId, RoleName}; + use super::*; use serde_json::json; #[test] fn parse_allowed_ips() -> anyhow::Result<()> { - let project_id = "new_project".to_string(); + let project_id: ProjectId = "new_project".into(); let data = format!("{{\"project_id\": \"{project_id}\"}}"); let text = json!({ "type": "message", @@ -161,7 +166,7 @@ mod tests { result, Notification::AllowedIpsUpdate { allowed_ips_update: AllowedIpsUpdate { - project_id: project_id.into() + project_id: (&project_id).into() } } ); @@ -171,8 +176,8 @@ mod tests { #[test] fn parse_password_updated() -> anyhow::Result<()> { - let project_id = "new_project".to_string(); - let role_name = "new_role".to_string(); + let project_id: ProjectId = "new_project".into(); + let role_name: RoleName = "new_role".into(); let data = format!("{{\"project_id\": \"{project_id}\", \"role_name\": \"{role_name}\"}}"); let text = json!({ "type": "message", @@ -187,8 +192,8 @@ mod tests { result, Notification::PasswordUpdate { password_update: PasswordUpdate { - project_id: project_id.into(), - role_name: role_name.into() + project_id: (&project_id).into(), + role_name: (&role_name).into(), } } ); diff --git a/workspace_hack/Cargo.toml b/workspace_hack/Cargo.toml index f58b912a77..74464dd4c8 100644 --- a/workspace_hack/Cargo.toml +++ b/workspace_hack/Cargo.toml @@ -39,7 +39,8 @@ futures-io = { version = "0.3" } futures-sink = { version = "0.3" } futures-util = { version = "0.3", features = ["channel", "io", "sink"] } getrandom = { version = "0.2", default-features = false, features = ["std"] } -hashbrown = { version = "0.14", default-features = false, features = ["raw"] } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", default-features = false, features = ["raw"] } +hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13", features = ["raw"] } hex = { version = "0.4", features = ["serde"] } hmac = { version = "0.12", default-features = false, features = ["reset"] } hyper = { version = "0.14", features = ["full"] } @@ -91,7 +92,7 @@ cc = { version = "1", default-features = false, features = ["parallel"] } chrono = { version = "0.4", default-features = false, features = ["clock", "serde", "wasmbind"] } either = { version = "1" } getrandom = { version = "0.2", default-features = false, features = ["std"] } -hashbrown = { version = "0.14", default-features = false, features = ["raw"] } +hashbrown-582f2526e08bb6a0 = { package = "hashbrown", version = "0.14", default-features = false, features = ["raw"] } indexmap = { version = "1", default-features = false, features = ["std"] } itertools = { version = "0.10" } libc = { version = "0.2", features = ["extra_traits", "use_std"] } From cb7c89332f25c652fa7dd06a9be7d984f8cc3989 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 5 Feb 2024 14:29:05 +0000 Subject: [PATCH 22/34] control_plane: fix tenant GET, clean up endpoints (#6553) Cleanups from https://github.com/neondatabase/neon/pull/6394 - There was a rogue `*` breaking the `GET /tenant/:tenant_id`, which passes through to shard zero - There was a duplicate migrate endpoint - There are un-prefixed API endpoints that were only needed for compat tests and can now be removed. --- control_plane/attachment_service/src/http.rs | 10 +--------- test_runner/regress/test_sharding_service.py | 7 +++++++ 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/control_plane/attachment_service/src/http.rs b/control_plane/attachment_service/src/http.rs index aa8c73c493..049e66fddf 100644 --- a/control_plane/attachment_service/src/http.rs +++ b/control_plane/attachment_service/src/http.rs @@ -403,10 +403,6 @@ pub fn make_router( .put("/v1/tenant/:tenant_id/location_config", |r| { tenant_service_handler(r, handle_tenant_location_config) }) - // Tenant Shard operations (low level/maintenance) - .put("/tenant/:tenant_shard_id/migrate", |r| { - tenant_service_handler(r, handle_tenant_shard_migrate) - }) // Timeline operations .delete("/v1/tenant/:tenant_id/timeline/:timeline_id", |r| { tenant_service_handler(r, handle_tenant_timeline_delete) @@ -415,7 +411,7 @@ pub fn make_router( tenant_service_handler(r, handle_tenant_timeline_create) }) // Tenant detail GET passthrough to shard zero - .get("/v1/tenant/:tenant_id*", |r| { + .get("/v1/tenant/:tenant_id", |r| { tenant_service_handler(r, handle_tenant_timeline_passthrough) }) // Timeline GET passthrough to shard zero. Note that the `*` in the URL is a wildcard: any future @@ -423,8 +419,4 @@ pub fn make_router( .get("/v1/tenant/:tenant_id/timeline*", |r| { tenant_service_handler(r, handle_tenant_timeline_passthrough) }) - // Path aliases for tests_forward_compatibility - // TODO: remove these in future PR - .post("/re-attach", |r| request_span(r, handle_re_attach)) - .post("/validate", |r| request_span(r, handle_validate)) } diff --git a/test_runner/regress/test_sharding_service.py b/test_runner/regress/test_sharding_service.py index 346df708de..5c70378ab0 100644 --- a/test_runner/regress/test_sharding_service.py +++ b/test_runner/regress/test_sharding_service.py @@ -140,6 +140,13 @@ def test_sharding_service_passthrough( timelines = client.timeline_list(tenant_id=env.initial_tenant) assert len(timelines) == 1 + status = client.tenant_status(env.initial_tenant) + assert TenantId(status["id"]) == env.initial_tenant + assert set(TimelineId(t) for t in status["timelines"]) == { + env.initial_timeline, + } + assert status["state"]["slug"] == "Active" + def test_sharding_service_restart(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_start() From 8e114bd6101dee117e1125ea68dfbdbbc59c965f Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 5 Feb 2024 19:31:55 +0000 Subject: [PATCH 23/34] control_plane/attachment_service: make --database-url optional (#6636) ## Problem This change was left out of #6585 accidentally -- just forgot to push the very last version of my branch. Now that we can load database url from Secrets Manager, we don't always need it on the CLI any more. We should let the user omit it instead of passing `--database-url ""` ## Summary of changes - Make `--database-url` optional --- control_plane/attachment_service/src/main.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/control_plane/attachment_service/src/main.rs b/control_plane/attachment_service/src/main.rs index eda9c7aad6..37b06c4090 100644 --- a/control_plane/attachment_service/src/main.rs +++ b/control_plane/attachment_service/src/main.rs @@ -53,7 +53,7 @@ struct Cli { /// URL to connect to postgres, like postgresql://localhost:1234/attachment_service #[arg(long)] - database_url: String, + database_url: Option, } /// Secrets may either be provided on the command line (for testing), or loaded from AWS SecretManager: this @@ -74,10 +74,9 @@ impl Secrets { const PUBLIC_KEY_SECRET: &'static str = "neon-storage-controller-public-key"; async fn load(args: &Cli) -> anyhow::Result { - if args.database_url.is_empty() { - Self::load_aws_sm().await - } else { - Self::load_cli(args) + match &args.database_url { + Some(url) => Self::load_cli(url, args), + None => Self::load_aws_sm().await, } } @@ -153,13 +152,13 @@ impl Secrets { }) } - fn load_cli(args: &Cli) -> anyhow::Result { + fn load_cli(database_url: &str, args: &Cli) -> anyhow::Result { let public_key = match &args.public_key { None => None, Some(key_path) => Some(JwtAuth::from_key_path(key_path)?), }; Ok(Self { - database_url: args.database_url.clone(), + database_url: database_url.to_owned(), public_key, jwt_token: args.jwt_token.clone(), control_plane_jwt_token: args.control_plane_jwt_token.clone(), From 947165788dc2447b17b8cd163568d10b8c4ddeaa Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Tue, 6 Feb 2024 09:39:06 +0200 Subject: [PATCH 24/34] refactor: needless cancellation token cloning (#6618) The solution we ended up for `backoff::retry` requires always cloning of cancellation tokens even though there is just `.await`. Fix that, and also turn the return type into `Option>` avoiding the need for the `E::cancelled()` fn passed in. Cc: #6096 --- .../attachment_service/src/compute_hook.rs | 4 +- libs/remote_storage/src/azure_blob.rs | 2 +- libs/remote_storage/src/lib.rs | 4 +- libs/remote_storage/src/local_fs.rs | 2 +- libs/remote_storage/src/s3_bucket.rs | 14 ++- libs/remote_storage/src/simulate_failures.rs | 2 +- libs/remote_storage/tests/test_real_s3.rs | 11 ++- libs/utils/src/backoff.rs | 92 ++++++++----------- pageserver/src/consumption_metrics/upload.rs | 56 +++++------ pageserver/src/control_plane_client.rs | 35 ++----- pageserver/src/deletion_queue/deleter.rs | 4 +- pageserver/src/tenant.rs | 8 +- pageserver/src/tenant/delete.rs | 8 +- .../src/tenant/remote_timeline_client.rs | 16 +++- .../tenant/remote_timeline_client/download.rs | 19 ++-- .../tenant/remote_timeline_client/upload.rs | 8 +- pageserver/src/tenant/secondary/downloader.rs | 8 +- .../src/tenant/secondary/heatmap_uploader.rs | 4 +- proxy/src/context/parquet.rs | 4 +- safekeeper/src/wal_backup.rs | 9 +- 20 files changed, 156 insertions(+), 154 deletions(-) diff --git a/control_plane/attachment_service/src/compute_hook.rs b/control_plane/attachment_service/src/compute_hook.rs index 9c1185f259..4ca26431ca 100644 --- a/control_plane/attachment_service/src/compute_hook.rs +++ b/control_plane/attachment_service/src/compute_hook.rs @@ -244,9 +244,11 @@ impl ComputeHook { 3, 10, "Send compute notification", - backoff::Cancel::new(cancel.clone(), || NotifyError::ShuttingDown), + cancel, ) .await + .ok_or_else(|| NotifyError::ShuttingDown) + .and_then(|x| x) } /// Call this to notify the compute (postgres) tier of new pageservers to use diff --git a/libs/remote_storage/src/azure_blob.rs b/libs/remote_storage/src/azure_blob.rs index 57c57a2b70..c6d5224706 100644 --- a/libs/remote_storage/src/azure_blob.rs +++ b/libs/remote_storage/src/azure_blob.rs @@ -379,7 +379,7 @@ impl RemoteStorage for AzureBlobStorage { _prefix: Option<&RemotePath>, _timestamp: SystemTime, _done_if_after: SystemTime, - _cancel: CancellationToken, + _cancel: &CancellationToken, ) -> Result<(), TimeTravelError> { // TODO use Azure point in time recovery feature for this // https://learn.microsoft.com/en-us/azure/storage/blobs/point-in-time-restore-overview diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index 4aeaee70b1..e64b1de6f9 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -218,7 +218,7 @@ pub trait RemoteStorage: Send + Sync + 'static { prefix: Option<&RemotePath>, timestamp: SystemTime, done_if_after: SystemTime, - cancel: CancellationToken, + cancel: &CancellationToken, ) -> Result<(), TimeTravelError>; } @@ -442,7 +442,7 @@ impl GenericRemoteStorage> { prefix: Option<&RemotePath>, timestamp: SystemTime, done_if_after: SystemTime, - cancel: CancellationToken, + cancel: &CancellationToken, ) -> Result<(), TimeTravelError> { match self { Self::LocalFs(s) => { diff --git a/libs/remote_storage/src/local_fs.rs b/libs/remote_storage/src/local_fs.rs index d47fa75b37..36ec15e1b1 100644 --- a/libs/remote_storage/src/local_fs.rs +++ b/libs/remote_storage/src/local_fs.rs @@ -431,7 +431,7 @@ impl RemoteStorage for LocalFs { _prefix: Option<&RemotePath>, _timestamp: SystemTime, _done_if_after: SystemTime, - _cancel: CancellationToken, + _cancel: &CancellationToken, ) -> Result<(), TimeTravelError> { Err(TimeTravelError::Unimplemented) } diff --git a/libs/remote_storage/src/s3_bucket.rs b/libs/remote_storage/src/s3_bucket.rs index 4d6564cba6..c9ad9ef225 100644 --- a/libs/remote_storage/src/s3_bucket.rs +++ b/libs/remote_storage/src/s3_bucket.rs @@ -638,7 +638,7 @@ impl RemoteStorage for S3Bucket { prefix: Option<&RemotePath>, timestamp: SystemTime, done_if_after: SystemTime, - cancel: CancellationToken, + cancel: &CancellationToken, ) -> Result<(), TimeTravelError> { let kind = RequestKind::TimeTravel; let _guard = self.permit(kind).await; @@ -678,9 +678,11 @@ impl RemoteStorage for S3Bucket { warn_threshold, max_retries, "listing object versions for time_travel_recover", - backoff::Cancel::new(cancel.clone(), || TimeTravelError::Cancelled), + cancel, ) - .await?; + .await + .ok_or_else(|| TimeTravelError::Cancelled) + .and_then(|x| x)?; tracing::trace!( " Got List response version_id_marker={:?}, key_marker={:?}", @@ -805,9 +807,11 @@ impl RemoteStorage for S3Bucket { warn_threshold, max_retries, "copying object version for time_travel_recover", - backoff::Cancel::new(cancel.clone(), || TimeTravelError::Cancelled), + cancel, ) - .await?; + .await + .ok_or_else(|| TimeTravelError::Cancelled) + .and_then(|x| x)?; tracing::info!(%version_id, %key, "Copied old version in S3"); } VerOrDelete { diff --git a/libs/remote_storage/src/simulate_failures.rs b/libs/remote_storage/src/simulate_failures.rs index ee9792232a..82d5a61fda 100644 --- a/libs/remote_storage/src/simulate_failures.rs +++ b/libs/remote_storage/src/simulate_failures.rs @@ -190,7 +190,7 @@ impl RemoteStorage for UnreliableWrapper { prefix: Option<&RemotePath>, timestamp: SystemTime, done_if_after: SystemTime, - cancel: CancellationToken, + cancel: &CancellationToken, ) -> Result<(), TimeTravelError> { self.attempt(RemoteOp::TimeTravelRecover(prefix.map(|p| p.to_owned()))) .map_err(|e| TimeTravelError::Other(anyhow::Error::new(e)))?; diff --git a/libs/remote_storage/tests/test_real_s3.rs b/libs/remote_storage/tests/test_real_s3.rs index 679be66bf7..fc52dabc36 100644 --- a/libs/remote_storage/tests/test_real_s3.rs +++ b/libs/remote_storage/tests/test_real_s3.rs @@ -56,9 +56,10 @@ async fn s3_time_travel_recovery_works(ctx: &mut MaybeEnabledStorage) -> anyhow: warn_threshold, max_retries, "test retry", - backoff::Cancel::new(CancellationToken::new(), || unreachable!()), + &CancellationToken::new(), ) .await + .expect("never cancelled") } async fn time_point() -> SystemTime { @@ -76,6 +77,8 @@ async fn s3_time_travel_recovery_works(ctx: &mut MaybeEnabledStorage) -> anyhow: .collect::>()) } + let cancel = CancellationToken::new(); + let path1 = RemotePath::new(Utf8Path::new(format!("{}/path1", ctx.base_prefix).as_str())) .with_context(|| "RemotePath conversion")?; @@ -142,7 +145,7 @@ async fn s3_time_travel_recovery_works(ctx: &mut MaybeEnabledStorage) -> anyhow: // No changes after recovery to t2 (no-op) let t_final = time_point().await; ctx.client - .time_travel_recover(None, t2, t_final, CancellationToken::new()) + .time_travel_recover(None, t2, t_final, &cancel) .await?; let t2_files_recovered = list_files(&ctx.client).await?; println!("after recovery to t2: {t2_files_recovered:?}"); @@ -153,7 +156,7 @@ async fn s3_time_travel_recovery_works(ctx: &mut MaybeEnabledStorage) -> anyhow: // after recovery to t1: path1 is back, path2 has the old content let t_final = time_point().await; ctx.client - .time_travel_recover(None, t1, t_final, CancellationToken::new()) + .time_travel_recover(None, t1, t_final, &cancel) .await?; let t1_files_recovered = list_files(&ctx.client).await?; println!("after recovery to t1: {t1_files_recovered:?}"); @@ -164,7 +167,7 @@ async fn s3_time_travel_recovery_works(ctx: &mut MaybeEnabledStorage) -> anyhow: // after recovery to t0: everything is gone except for path1 let t_final = time_point().await; ctx.client - .time_travel_recover(None, t0, t_final, CancellationToken::new()) + .time_travel_recover(None, t0, t_final, &cancel) .await?; let t0_files_recovered = list_files(&ctx.client).await?; println!("after recovery to t0: {t0_files_recovered:?}"); diff --git a/libs/utils/src/backoff.rs b/libs/utils/src/backoff.rs index d50ad39585..096c7e5854 100644 --- a/libs/utils/src/backoff.rs +++ b/libs/utils/src/backoff.rs @@ -37,69 +37,53 @@ pub fn exponential_backoff_duration_seconds(n: u32, base_increment: f64, max_sec } } -/// Configure cancellation for a retried operation: when to cancel (the token), and -/// what kind of error to return on cancellation -pub struct Cancel -where - E: Display + Debug + 'static, - CF: Fn() -> E, -{ - token: CancellationToken, - on_cancel: CF, -} - -impl Cancel -where - E: Display + Debug + 'static, - CF: Fn() -> E, -{ - pub fn new(token: CancellationToken, on_cancel: CF) -> Self { - Self { token, on_cancel } - } -} - -/// retries passed operation until one of the following conditions are met: -/// Encountered error is considered as permanent (non-retryable) -/// Retries have been exhausted. -/// `is_permanent` closure should be used to provide distinction between permanent/non-permanent errors -/// When attempts cross `warn_threshold` function starts to emit log warnings. +/// Retries passed operation until one of the following conditions are met: +/// - encountered error is considered as permanent (non-retryable) +/// - retries have been exhausted +/// - cancellation token has been cancelled +/// +/// `is_permanent` closure should be used to provide distinction between permanent/non-permanent +/// errors. When attempts cross `warn_threshold` function starts to emit log warnings. /// `description` argument is added to log messages. Its value should identify the `op` is doing -/// `cancel` argument is required: any time we are looping on retry, we should be using a CancellationToken -/// to drop out promptly on shutdown. -pub async fn retry( +/// `cancel` cancels new attempts and the backoff sleep. +/// +/// If attempts fail, they are being logged with `{:#}` which works for anyhow, but does not work +/// for any other error type. Final failed attempt is logged with `{:?}`. +/// +/// Returns `None` if cancellation was noticed during backoff or the terminal result. +pub async fn retry( mut op: O, is_permanent: impl Fn(&E) -> bool, warn_threshold: u32, max_retries: u32, description: &str, - cancel: Cancel, -) -> Result + cancel: &CancellationToken, +) -> Option> where // Not std::error::Error because anyhow::Error doesnt implement it. // For context see https://github.com/dtolnay/anyhow/issues/63 E: Display + Debug + 'static, O: FnMut() -> F, F: Future>, - CF: Fn() -> E, { let mut attempts = 0; loop { - if cancel.token.is_cancelled() { - return Err((cancel.on_cancel)()); + if cancel.is_cancelled() { + return None; } let result = op().await; - match result { + match &result { Ok(_) => { if attempts > 0 { tracing::info!("{description} succeeded after {attempts} retries"); } - return result; + return Some(result); } // These are "permanent" errors that should not be retried. - Err(ref e) if is_permanent(e) => { - return result; + Err(e) if is_permanent(e) => { + return Some(result); } // Assume that any other failure might be transient, and the operation might // succeed if we just keep trying. @@ -109,12 +93,12 @@ where Err(err) if attempts < max_retries => { tracing::warn!("{description} failed, will retry (attempt {attempts}): {err:#}"); } - Err(ref err) => { + Err(err) => { // Operation failed `max_attempts` times. Time to give up. tracing::warn!( "{description} still failed after {attempts} retries, giving up: {err:?}" ); - return result; + return Some(result); } } // sleep and retry @@ -122,7 +106,7 @@ where attempts, DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS, - &cancel.token, + cancel, ) .await; attempts += 1; @@ -131,11 +115,9 @@ where #[cfg(test)] mod tests { - use std::io; - - use tokio::sync::Mutex; - use super::*; + use std::io; + use tokio::sync::Mutex; #[test] fn backoff_defaults_produce_growing_backoff_sequence() { @@ -166,7 +148,7 @@ mod tests { #[tokio::test(start_paused = true)] async fn retry_always_error() { let count = Mutex::new(0); - let err_result = retry( + retry( || async { *count.lock().await += 1; Result::<(), io::Error>::Err(io::Error::from(io::ErrorKind::Other)) @@ -175,11 +157,11 @@ mod tests { 1, 1, "work", - Cancel::new(CancellationToken::new(), || -> io::Error { unreachable!() }), + &CancellationToken::new(), ) - .await; - - assert!(err_result.is_err()); + .await + .expect("not cancelled") + .expect_err("it can only fail"); assert_eq!(*count.lock().await, 2); } @@ -201,10 +183,11 @@ mod tests { 2, 2, "work", - Cancel::new(CancellationToken::new(), || -> io::Error { unreachable!() }), + &CancellationToken::new(), ) .await - .unwrap(); + .expect("not cancelled") + .expect("success on second try"); } #[tokio::test(start_paused = true)] @@ -224,10 +207,11 @@ mod tests { 2, 2, "work", - Cancel::new(CancellationToken::new(), || -> io::Error { unreachable!() }), + &CancellationToken::new(), ) .await - .unwrap_err(); + .expect("was not cancellation") + .expect_err("it was permanent error"); assert_eq!(*count.lock().await, 1); } diff --git a/pageserver/src/consumption_metrics/upload.rs b/pageserver/src/consumption_metrics/upload.rs index 322ed95cc8..6b840a3136 100644 --- a/pageserver/src/consumption_metrics/upload.rs +++ b/pageserver/src/consumption_metrics/upload.rs @@ -262,35 +262,33 @@ async fn upload( ) -> Result<(), UploadError> { let warn_after = 3; let max_attempts = 10; + + // this is used only with tests so far + let last_value = if is_last { "true" } else { "false" }; + let res = utils::backoff::retry( - move || { - let body = body.clone(); - async move { - let res = client - .post(metric_collection_endpoint.clone()) - .header(reqwest::header::CONTENT_TYPE, "application/json") - .header( - LAST_IN_BATCH.clone(), - if is_last { "true" } else { "false" }, - ) - .body(body) - .send() - .await; + || async { + let res = client + .post(metric_collection_endpoint.clone()) + .header(reqwest::header::CONTENT_TYPE, "application/json") + .header(LAST_IN_BATCH.clone(), last_value) + .body(body.clone()) + .send() + .await; - let res = res.and_then(|res| res.error_for_status()); + let res = res.and_then(|res| res.error_for_status()); - // 10 redirects are normally allowed, so we don't need worry about 3xx - match res { - Ok(_response) => Ok(()), - Err(e) => { - let status = e.status().filter(|s| s.is_client_error()); - if let Some(status) = status { - // rejection used to be a thing when the server could reject a - // whole batch of metrics if one metric was bad. - Err(UploadError::Rejected(status)) - } else { - Err(UploadError::Reqwest(e)) - } + // 10 redirects are normally allowed, so we don't need worry about 3xx + match res { + Ok(_response) => Ok(()), + Err(e) => { + let status = e.status().filter(|s| s.is_client_error()); + if let Some(status) = status { + // rejection used to be a thing when the server could reject a + // whole batch of metrics if one metric was bad. + Err(UploadError::Rejected(status)) + } else { + Err(UploadError::Reqwest(e)) } } } @@ -299,9 +297,11 @@ async fn upload( warn_after, max_attempts, "upload consumption_metrics", - utils::backoff::Cancel::new(cancel.clone(), || UploadError::Cancelled), + cancel, ) - .await; + .await + .ok_or_else(|| UploadError::Cancelled) + .and_then(|x| x); match &res { Ok(_) => {} diff --git a/pageserver/src/control_plane_client.rs b/pageserver/src/control_plane_client.rs index 950791ea48..61c7d03408 100644 --- a/pageserver/src/control_plane_client.rs +++ b/pageserver/src/control_plane_client.rs @@ -82,46 +82,29 @@ impl ControlPlaneClient { R: Serialize, T: DeserializeOwned, { - #[derive(thiserror::Error, Debug)] - enum RemoteAttemptError { - #[error("shutdown")] - Shutdown, - #[error("remote: {0}")] - Remote(reqwest::Error), - } - - match backoff::retry( + let res = backoff::retry( || async { let response = self .http_client .post(url.clone()) .json(&request) .send() - .await - .map_err(RemoteAttemptError::Remote)?; + .await?; - response - .error_for_status_ref() - .map_err(RemoteAttemptError::Remote)?; - response - .json::() - .await - .map_err(RemoteAttemptError::Remote) + response.error_for_status_ref()?; + response.json::().await }, |_| false, 3, u32::MAX, "calling control plane generation validation API", - backoff::Cancel::new(self.cancel.clone(), || RemoteAttemptError::Shutdown), + &self.cancel, ) .await - { - Err(RemoteAttemptError::Shutdown) => Err(RetryForeverError::ShuttingDown), - Err(RemoteAttemptError::Remote(_)) => { - panic!("We retry forever, this should never be reached"); - } - Ok(r) => Ok(r), - } + .ok_or(RetryForeverError::ShuttingDown)? + .expect("We retry forever, this should never be reached"); + + Ok(res) } } diff --git a/pageserver/src/deletion_queue/deleter.rs b/pageserver/src/deletion_queue/deleter.rs index 57421b1547..a75c73f2b1 100644 --- a/pageserver/src/deletion_queue/deleter.rs +++ b/pageserver/src/deletion_queue/deleter.rs @@ -77,9 +77,11 @@ impl Deleter { 3, 10, "executing deletion batch", - backoff::Cancel::new(self.cancel.clone(), || anyhow::anyhow!("Shutting down")), + &self.cancel, ) .await + .ok_or_else(|| anyhow::anyhow!("Shutting down")) + .and_then(|x| x) } /// Block until everything in accumulator has been executed diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index b801347c06..624c3e365f 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -3294,11 +3294,11 @@ impl Tenant { 3, u32::MAX, "persist_initdb_tar_zst", - backoff::Cancel::new(self.cancel.clone(), || anyhow::anyhow!("Cancelled")), + &self.cancel, ) - .await?; - - Ok(()) + .await + .ok_or_else(|| anyhow::anyhow!("Cancelled")) + .and_then(|x| x) } /// - run initdb to init temporary instance and get bootstrap data diff --git a/pageserver/src/tenant/delete.rs b/pageserver/src/tenant/delete.rs index 0dbaa3ec93..7c35914b61 100644 --- a/pageserver/src/tenant/delete.rs +++ b/pageserver/src/tenant/delete.rs @@ -91,9 +91,11 @@ async fn create_remote_delete_mark( FAILED_UPLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES, "mark_upload", - backoff::Cancel::new(cancel.clone(), || anyhow::anyhow!("Cancelled")), + cancel, ) .await + .ok_or_else(|| anyhow::anyhow!("Cancelled")) + .and_then(|x| x) .context("mark_upload")?; Ok(()) @@ -187,9 +189,11 @@ async fn remove_tenant_remote_delete_mark( FAILED_UPLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES, "remove_tenant_remote_delete_mark", - backoff::Cancel::new(cancel.clone(), || anyhow::anyhow!("Cancelled")), + cancel, ) .await + .ok_or_else(|| anyhow::anyhow!("Cancelled")) + .and_then(|x| x) .context("remove_tenant_remote_delete_mark")?; } Ok(()) diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index 2e429ee9bc..831a073d17 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -1046,9 +1046,11 @@ impl RemoteTimelineClient { // when executed as part of tenant deletion this happens in the background 2, "persist_index_part_with_deleted_flag", - backoff::Cancel::new(self.cancel.clone(), || anyhow::anyhow!("Cancelled")), + &self.cancel, ) - .await?; + .await + .ok_or_else(|| anyhow::anyhow!("Cancelled")) + .and_then(|x| x)?; // all good, disarm the guard and mark as success ScopeGuard::into_inner(undo_deleted_at); @@ -1083,9 +1085,11 @@ impl RemoteTimelineClient { FAILED_DOWNLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES, "preserve_initdb_tar_zst", - backoff::Cancel::new(cancel.clone(), || anyhow::anyhow!("Cancelled!")), + &cancel.clone(), ) .await + .ok_or_else(|| anyhow::anyhow!("Cancellled")) + .and_then(|x| x) .context("backing up initdb archive")?; Ok(()) } @@ -1141,6 +1145,8 @@ impl RemoteTimelineClient { // taking the burden of listing all the layers that we already know we should delete. self.deletion_queue_client.flush_immediate().await?; + let cancel = shutdown_token(); + let remaining = backoff::retry( || async { self.storage_impl @@ -1151,9 +1157,11 @@ impl RemoteTimelineClient { FAILED_DOWNLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES, "list_prefixes", - backoff::Cancel::new(shutdown_token(), || anyhow::anyhow!("Cancelled!")), + &cancel, ) .await + .ok_or_else(|| anyhow::anyhow!("Cancelled!")) + .and_then(|x| x) .context("list prefixes")?; // We will delete the current index_part object last, since it acts as a deletion diff --git a/pageserver/src/tenant/remote_timeline_client/download.rs b/pageserver/src/tenant/remote_timeline_client/download.rs index b84b5ca33b..2c50726b43 100644 --- a/pageserver/src/tenant/remote_timeline_client/download.rs +++ b/pageserver/src/tenant/remote_timeline_client/download.rs @@ -76,7 +76,6 @@ pub async fn download_layer_file<'a>( // If pageserver crashes the temp file will be deleted on startup and re-downloaded. let temp_file_path = path_with_suffix_extension(&local_path, TEMP_DOWNLOAD_EXTENSION); - let cancel_inner = cancel.clone(); let (mut destination_file, bytes_amount) = download_retry( || async { let destination_file = tokio::fs::File::create(&temp_file_path) @@ -87,7 +86,7 @@ pub async fn download_layer_file<'a>( // Cancellation safety: it is safe to cancel this future, because it isn't writing to a local // file: the write to local file doesn't start until after the request header is returned // and we start draining the body stream below - let download = download_cancellable(&cancel_inner, storage.download(&remote_path)) + let download = download_cancellable(cancel, storage.download(&remote_path)) .await .with_context(|| { format!( @@ -107,7 +106,7 @@ pub async fn download_layer_file<'a>( // we will imminiently try and write to again. let bytes_amount: u64 = match timeout_cancellable( DOWNLOAD_TIMEOUT, - &cancel_inner, + cancel, tokio::io::copy_buf(&mut reader, &mut destination_file), ) .await @@ -386,9 +385,11 @@ pub(super) async fn download_index_part( FAILED_DOWNLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES, "listing index_part files", - backoff::Cancel::new(cancel.clone(), || anyhow::anyhow!("Cancelled")), + &cancel, ) .await + .ok_or_else(|| anyhow::anyhow!("Cancelled")) + .and_then(|x| x) .map_err(DownloadError::Other)?; // General case logic for which index to use: the latest index whose generation @@ -510,7 +511,7 @@ pub(crate) async fn download_initdb_tar_zst( /// Helper function to handle retries for a download operation. /// -/// Remote operations can fail due to rate limits (IAM, S3), spurious network +/// Remote operations can fail due to rate limits (S3), spurious network /// problems, or other external reasons. Retry FAILED_DOWNLOAD_RETRIES times, /// with backoff. /// @@ -530,9 +531,11 @@ where FAILED_DOWNLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES, description, - backoff::Cancel::new(cancel.clone(), || DownloadError::Cancelled), + cancel, ) .await + .ok_or_else(|| DownloadError::Cancelled) + .and_then(|x| x) } async fn download_retry_forever( @@ -550,7 +553,9 @@ where FAILED_DOWNLOAD_WARN_THRESHOLD, u32::MAX, description, - backoff::Cancel::new(cancel, || DownloadError::Cancelled), + &cancel, ) .await + .ok_or_else(|| DownloadError::Cancelled) + .and_then(|x| x) } diff --git a/pageserver/src/tenant/remote_timeline_client/upload.rs b/pageserver/src/tenant/remote_timeline_client/upload.rs index 76df9ba5c4..e8ba1d3d6e 100644 --- a/pageserver/src/tenant/remote_timeline_client/upload.rs +++ b/pageserver/src/tenant/remote_timeline_client/upload.rs @@ -188,16 +188,18 @@ pub(crate) async fn time_travel_recover_tenant( backoff::retry( || async { storage - .time_travel_recover(Some(prefix), timestamp, done_if_after, cancel.clone()) + .time_travel_recover(Some(prefix), timestamp, done_if_after, cancel) .await }, |e| !matches!(e, TimeTravelError::Other(_)), warn_after, max_attempts, "time travel recovery of tenant prefix", - backoff::Cancel::new(cancel.clone(), || TimeTravelError::Cancelled), + cancel, ) - .await?; + .await + .ok_or_else(|| TimeTravelError::Cancelled) + .and_then(|x| x)?; } Ok(()) } diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 702c0b1ec1..55af4f9f2b 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -537,11 +537,11 @@ impl<'a> TenantDownloader<'a> { FAILED_DOWNLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES, "download heatmap", - backoff::Cancel::new(self.secondary_state.cancel.clone(), || { - UpdateError::Cancelled - }), + &self.secondary_state.cancel, ) - .await?; + .await + .ok_or_else(|| UpdateError::Cancelled) + .and_then(|x| x)?; SECONDARY_MODE.download_heatmap.inc(); diff --git a/pageserver/src/tenant/secondary/heatmap_uploader.rs b/pageserver/src/tenant/secondary/heatmap_uploader.rs index df865658a4..fff29b2487 100644 --- a/pageserver/src/tenant/secondary/heatmap_uploader.rs +++ b/pageserver/src/tenant/secondary/heatmap_uploader.rs @@ -426,9 +426,11 @@ async fn upload_tenant_heatmap( 3, u32::MAX, "Uploading heatmap", - backoff::Cancel::new(tenant_cancel.clone(), || anyhow::anyhow!("Shutting down")), + &tenant_cancel, ) .await + .ok_or_else(|| anyhow::anyhow!("Shutting down")) + .and_then(|x| x) { if tenant_cancel.is_cancelled() { return Err(UploadHeatmapError::Cancelled); diff --git a/proxy/src/context/parquet.rs b/proxy/src/context/parquet.rs index e920d7be01..8510c5c586 100644 --- a/proxy/src/context/parquet.rs +++ b/proxy/src/context/parquet.rs @@ -315,9 +315,11 @@ async fn upload_parquet( FAILED_UPLOAD_MAX_RETRIES, "request_data_upload", // we don't want cancellation to interrupt here, so we make a dummy cancel token - backoff::Cancel::new(CancellationToken::new(), || anyhow::anyhow!("Cancelled")), + &CancellationToken::new(), ) .await + .ok_or_else(|| anyhow::anyhow!("Cancelled")) + .and_then(|x| x) .context("request_data_upload")?; Ok(buffer.writer()) diff --git a/safekeeper/src/wal_backup.rs b/safekeeper/src/wal_backup.rs index c47381351d..df99244770 100644 --- a/safekeeper/src/wal_backup.rs +++ b/safekeeper/src/wal_backup.rs @@ -558,16 +558,17 @@ pub async fn delete_timeline(ttid: &TenantTimelineId) -> Result<()> { backoff::retry( || async { let files = storage.list_files(Some(&remote_path)).await?; - storage.delete_objects(&files).await?; - Ok(()) + storage.delete_objects(&files).await }, |_| false, 3, 10, "executing WAL segments deletion batch", - backoff::Cancel::new(token, || anyhow::anyhow!("canceled")), + &token, ) - .await?; + .await + .ok_or_else(|| anyhow::anyhow!("canceled")) + .and_then(|x| x)?; Ok(()) } From e196d974cc585341ee38f8fd6b54c257a3ad78a4 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Tue, 6 Feb 2024 10:34:16 +0100 Subject: [PATCH 25/34] pagebench: actually implement `--num_clients` (#6640) Will need this to validate per-tenant throttling in https://github.com/neondatabase/neon/issues/5899 --- .../pagebench/src/cmd/getpage_latest_lsn.rs | 139 ++++++++++-------- 1 file changed, 78 insertions(+), 61 deletions(-) diff --git a/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs b/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs index 400b5476b7..aa809d8d26 100644 --- a/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs +++ b/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs @@ -79,6 +79,12 @@ impl KeyRange { } } +#[derive(PartialEq, Eq, Hash, Copy, Clone)] +struct WorkerId { + timeline: TenantTimelineId, + num_client: usize, // from 0..args.num_clients +} + #[derive(serde::Serialize)] struct Output { total: request_stats::Output, @@ -206,7 +212,7 @@ async fn main_impl( let live_stats = Arc::new(LiveStats::default()); - let num_client_tasks = timelines.len(); + let num_client_tasks = args.num_clients.get() * timelines.len(); let num_live_stats_dump = 1; let num_work_sender_tasks = 1; let num_main_impl = 1; @@ -235,19 +241,25 @@ async fn main_impl( let cancel = CancellationToken::new(); - let mut work_senders: HashMap = HashMap::new(); + let mut work_senders: HashMap = HashMap::new(); let mut tasks = Vec::new(); - for tl in &timelines { - let (sender, receiver) = tokio::sync::mpsc::channel(10); // TODO: not sure what the implications of this are - work_senders.insert(*tl, sender); - tasks.push(tokio::spawn(client( - args, - *tl, - Arc::clone(&start_work_barrier), - receiver, - Arc::clone(&live_stats), - cancel.clone(), - ))); + for timeline in timelines.iter().cloned() { + for num_client in 0..args.num_clients.get() { + let (sender, receiver) = tokio::sync::mpsc::channel(10); // TODO: not sure what the implications of this are + let worker_id = WorkerId { + timeline, + num_client, + }; + work_senders.insert(worker_id, sender); + tasks.push(tokio::spawn(client( + args, + worker_id, + Arc::clone(&start_work_barrier), + receiver, + Arc::clone(&live_stats), + cancel.clone(), + ))); + } } let work_sender: Pin>> = { @@ -271,7 +283,10 @@ async fn main_impl( let (rel_tag, block_no) = key_to_rel_block(key).expect("we filter non-rel-block keys out above"); ( - r.timeline, + WorkerId { + timeline: r.timeline, + num_client: rng.gen_range(0..args.num_clients.get()), + }, PagestreamGetPageRequest { latest: rng.gen_bool(args.req_latest_probability), lsn: r.timeline_lsn, @@ -289,56 +304,54 @@ async fn main_impl( }), Some(rps_limit) => Box::pin(async move { let period = Duration::from_secs_f64(1.0 / (rps_limit as f64)); - let make_timeline_task: &dyn Fn( - TenantTimelineId, - ) - -> Pin>> = &|timeline| { - let sender = work_senders.get(&timeline).unwrap(); - let ranges: Vec = all_ranges - .iter() - .filter(|r| r.timeline == timeline) - .cloned() - .collect(); - let weights = rand::distributions::weighted::WeightedIndex::new( - ranges.iter().map(|v| v.len()), - ) - .unwrap(); + let make_task: &dyn Fn(WorkerId) -> Pin>> = + &|worker_id| { + let sender = work_senders.get(&worker_id).unwrap(); + let ranges: Vec = all_ranges + .iter() + .filter(|r| r.timeline == worker_id.timeline) + .cloned() + .collect(); + let weights = rand::distributions::weighted::WeightedIndex::new( + ranges.iter().map(|v| v.len()), + ) + .unwrap(); - let cancel = cancel.clone(); - Box::pin(async move { - let mut ticker = tokio::time::interval(period); - ticker.set_missed_tick_behavior( - /* TODO review this choice */ - tokio::time::MissedTickBehavior::Burst, - ); - while !cancel.is_cancelled() { - ticker.tick().await; - let req = { - let mut rng = rand::thread_rng(); - let r = &ranges[weights.sample(&mut rng)]; - let key: i128 = rng.gen_range(r.start..r.end); - let key = Key::from_i128(key); - assert!(is_rel_block_key(&key)); - let (rel_tag, block_no) = key_to_rel_block(key) - .expect("we filter non-rel-block keys out above"); - PagestreamGetPageRequest { - latest: rng.gen_bool(args.req_latest_probability), - lsn: r.timeline_lsn, - rel: rel_tag, - blkno: block_no, + let cancel = cancel.clone(); + Box::pin(async move { + let mut ticker = tokio::time::interval(period); + ticker.set_missed_tick_behavior( + /* TODO review this choice */ + tokio::time::MissedTickBehavior::Burst, + ); + while !cancel.is_cancelled() { + ticker.tick().await; + let req = { + let mut rng = rand::thread_rng(); + let r = &ranges[weights.sample(&mut rng)]; + let key: i128 = rng.gen_range(r.start..r.end); + let key = Key::from_i128(key); + assert!(is_rel_block_key(&key)); + let (rel_tag, block_no) = key_to_rel_block(key) + .expect("we filter non-rel-block keys out above"); + PagestreamGetPageRequest { + latest: rng.gen_bool(args.req_latest_probability), + lsn: r.timeline_lsn, + rel: rel_tag, + blkno: block_no, + } + }; + if sender.send(req).await.is_err() { + assert!( + cancel.is_cancelled(), + "client has gone away unexpectedly" + ); } - }; - if sender.send(req).await.is_err() { - assert!(cancel.is_cancelled(), "client has gone away unexpectedly"); } - } - }) - }; + }) + }; - let tasks: Vec<_> = work_senders - .keys() - .map(|tl| make_timeline_task(*tl)) - .collect(); + let tasks: Vec<_> = work_senders.keys().map(|tl| make_task(*tl)).collect(); start_work_barrier.wait().await; @@ -390,12 +403,16 @@ async fn main_impl( #[instrument(skip_all)] async fn client( args: &'static Args, - timeline: TenantTimelineId, + id: WorkerId, start_work_barrier: Arc, mut work: tokio::sync::mpsc::Receiver, live_stats: Arc, cancel: CancellationToken, ) { + let WorkerId { + timeline, + num_client: _, + } = id; let client = pageserver_client::page_service::Client::new(args.page_service_connstring.clone()) .await .unwrap(); From edcde05c1cdf75f9bd5f0669b95ef61946d25549 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Tue, 6 Feb 2024 10:44:49 +0100 Subject: [PATCH 26/34] refactor(walredo): split up the massive `walredo.rs` (#6583) Part of https://github.com/neondatabase/neon/issues/6581 --- pageserver/src/walredo.rs | 825 +----------------- pageserver/src/walredo/apply_neon.rs | 235 +++++ pageserver/src/walredo/process.rs | 406 +++++++++ .../src/walredo/process/no_leak_child.rs | 126 +++ pageserver/src/walredo/process/protocol.rs | 57 ++ 5 files changed, 848 insertions(+), 801 deletions(-) create mode 100644 pageserver/src/walredo/apply_neon.rs create mode 100644 pageserver/src/walredo/process.rs create mode 100644 pageserver/src/walredo/process/no_leak_child.rs create mode 100644 pageserver/src/walredo/process/protocol.rs diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index 5bc897b730..773e5fc051 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -17,71 +17,30 @@ //! records. It achieves it by dropping privileges before replaying //! any WAL records, so that even if an attacker hijacks the Postgres //! process, he cannot escape out of it. -//! -use anyhow::Context; -use byteorder::{ByteOrder, LittleEndian}; -use bytes::{BufMut, Bytes, BytesMut}; -use nix::poll::*; -use pageserver_api::models::WalRedoManagerStatus; -use pageserver_api::shard::TenantShardId; -use serde::Serialize; -use std::collections::VecDeque; -use std::io; -use std::io::prelude::*; -use std::ops::{Deref, DerefMut}; -use std::os::unix::io::AsRawFd; -use std::process::Stdio; -use std::process::{Child, ChildStdin, ChildStdout, Command}; -use std::sync::{Arc, Mutex, MutexGuard, RwLock}; -use std::time::Duration; -use std::time::Instant; -use tracing::*; -use utils::{bin_ser::BeSer, lsn::Lsn, nonblock::set_nonblock}; -#[cfg(feature = "testing")] -use std::sync::atomic::{AtomicUsize, Ordering}; +/// Process lifecycle and abstracction for the IPC protocol. +mod process; + +/// Code to apply [`NeonWalRecord`]s. +mod apply_neon; use crate::config::PageServerConf; use crate::metrics::{ - WalRedoKillCause, WAL_REDO_BYTES_HISTOGRAM, WAL_REDO_PROCESS_COUNTERS, - WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM, WAL_REDO_RECORDS_HISTOGRAM, - WAL_REDO_RECORD_COUNTER, WAL_REDO_TIME, + WAL_REDO_BYTES_HISTOGRAM, WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM, + WAL_REDO_RECORDS_HISTOGRAM, WAL_REDO_TIME, }; use crate::repository::Key; use crate::walrecord::NeonWalRecord; - -use pageserver_api::key::{key_to_rel_block, key_to_slru_block}; -use pageserver_api::reltag::{RelTag, SlruKind}; -use postgres_ffi::pg_constants; -use postgres_ffi::relfile_utils::VISIBILITYMAP_FORKNUM; -use postgres_ffi::v14::nonrelfile_utils::{ - mx_offset_to_flags_bitshift, mx_offset_to_flags_offset, mx_offset_to_member_offset, - transaction_id_set_status, -}; -use postgres_ffi::BLCKSZ; - -/// -/// `RelTag` + block number (`blknum`) gives us a unique id of the page in the cluster. -/// -/// In Postgres `BufferTag` structure is used for exactly the same purpose. -/// [See more related comments here](https://github.com/postgres/postgres/blob/99c5852e20a0987eca1c38ba0c09329d4076b6a0/src/include/storage/buf_internals.h#L91). -/// -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Serialize)] -pub(crate) struct BufferTag { - pub rel: RelTag, - pub blknum: u32, -} - -struct ProcessInput { - stdin: ChildStdin, - n_requests: usize, -} - -struct ProcessOutput { - stdout: ChildStdout, - pending_responses: VecDeque>, - n_processed_responses: usize, -} +use anyhow::Context; +use bytes::{Bytes, BytesMut}; +use pageserver_api::key::key_to_rel_block; +use pageserver_api::models::WalRedoManagerStatus; +use pageserver_api::shard::TenantShardId; +use std::sync::{Arc, RwLock}; +use std::time::Duration; +use std::time::Instant; +use tracing::*; +use utils::lsn::Lsn; /// /// This is the real implementation that uses a Postgres process to @@ -94,22 +53,7 @@ pub struct PostgresRedoManager { tenant_shard_id: TenantShardId, conf: &'static PageServerConf, last_redo_at: std::sync::Mutex>, - redo_process: RwLock>>, -} - -/// Can this request be served by neon redo functions -/// or we need to pass it to wal-redo postgres process? -fn can_apply_in_neon(rec: &NeonWalRecord) -> bool { - // Currently, we don't have bespoken Rust code to replay any - // Postgres WAL records. But everything else is handled in neon. - #[allow(clippy::match_like_matches_macro)] - match rec { - NeonWalRecord::Postgres { - will_init: _, - rec: _, - } => false, - _ => true, - } + redo_process: RwLock>>, } /// @@ -139,10 +83,10 @@ impl PostgresRedoManager { let base_img_lsn = base_img.as_ref().map(|p| p.0).unwrap_or(Lsn::INVALID); let mut img = base_img.map(|p| p.1); - let mut batch_neon = can_apply_in_neon(&records[0].1); + let mut batch_neon = apply_neon::can_apply_in_neon(&records[0].1); let mut batch_start = 0; for (i, record) in records.iter().enumerate().skip(1) { - let rec_neon = can_apply_in_neon(&record.1); + let rec_neon = apply_neon::can_apply_in_neon(&record.1); if rec_neon != batch_neon { let result = if batch_neon { @@ -248,7 +192,7 @@ impl PostgresRedoManager { let mut n_attempts = 0u32; loop { // launch the WAL redo process on first use - let proc: Arc = { + let proc: Arc = { let proc_guard = self.redo_process.read().unwrap(); match &*proc_guard { None => { @@ -259,7 +203,7 @@ impl PostgresRedoManager { None => { let start = Instant::now(); let proc = Arc::new( - WalRedoProcess::launch( + process::WalRedoProcess::launch( self.conf, self.tenant_shard_id, pg_version, @@ -287,9 +231,8 @@ impl PostgresRedoManager { let started_at = std::time::Instant::now(); // Relational WAL records are applied using wal-redo-postgres - let buf_tag = BufferTag { rel, blknum }; let result = proc - .apply_wal_records(buf_tag, &base_img, records, wal_redo_timeout) + .apply_wal_records(rel, blknum, &base_img, records, wal_redo_timeout) .context("apply_wal_records"); let duration = started_at.elapsed(); @@ -416,732 +359,12 @@ impl PostgresRedoManager { _record_lsn: Lsn, record: &NeonWalRecord, ) -> anyhow::Result<()> { - match record { - NeonWalRecord::Postgres { - will_init: _, - rec: _, - } => { - anyhow::bail!("tried to pass postgres wal record to neon WAL redo"); - } - NeonWalRecord::ClearVisibilityMapFlags { - new_heap_blkno, - old_heap_blkno, - flags, - } => { - // sanity check that this is modifying the correct relation - let (rel, blknum) = key_to_rel_block(key).context("invalid record")?; - assert!( - rel.forknum == VISIBILITYMAP_FORKNUM, - "ClearVisibilityMapFlags record on unexpected rel {}", - rel - ); - if let Some(heap_blkno) = *new_heap_blkno { - // Calculate the VM block and offset that corresponds to the heap block. - let map_block = pg_constants::HEAPBLK_TO_MAPBLOCK(heap_blkno); - let map_byte = pg_constants::HEAPBLK_TO_MAPBYTE(heap_blkno); - let map_offset = pg_constants::HEAPBLK_TO_OFFSET(heap_blkno); - - // Check that we're modifying the correct VM block. - assert!(map_block == blknum); - - // equivalent to PageGetContents(page) - let map = &mut page[pg_constants::MAXALIGN_SIZE_OF_PAGE_HEADER_DATA..]; - - map[map_byte as usize] &= !(flags << map_offset); - } - - // Repeat for 'old_heap_blkno', if any - if let Some(heap_blkno) = *old_heap_blkno { - let map_block = pg_constants::HEAPBLK_TO_MAPBLOCK(heap_blkno); - let map_byte = pg_constants::HEAPBLK_TO_MAPBYTE(heap_blkno); - let map_offset = pg_constants::HEAPBLK_TO_OFFSET(heap_blkno); - - assert!(map_block == blknum); - - let map = &mut page[pg_constants::MAXALIGN_SIZE_OF_PAGE_HEADER_DATA..]; - - map[map_byte as usize] &= !(flags << map_offset); - } - } - // Non-relational WAL records are handled here, with custom code that has the - // same effects as the corresponding Postgres WAL redo function. - NeonWalRecord::ClogSetCommitted { xids, timestamp } => { - let (slru_kind, segno, blknum) = - key_to_slru_block(key).context("invalid record")?; - assert_eq!( - slru_kind, - SlruKind::Clog, - "ClogSetCommitted record with unexpected key {}", - key - ); - for &xid in xids { - let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE; - let expected_segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT; - let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; - - // Check that we're modifying the correct CLOG block. - assert!( - segno == expected_segno, - "ClogSetCommitted record for XID {} with unexpected key {}", - xid, - key - ); - assert!( - blknum == expected_blknum, - "ClogSetCommitted record for XID {} with unexpected key {}", - xid, - key - ); - - transaction_id_set_status( - xid, - pg_constants::TRANSACTION_STATUS_COMMITTED, - page, - ); - } - - // Append the timestamp - if page.len() == BLCKSZ as usize + 8 { - page.truncate(BLCKSZ as usize); - } - if page.len() == BLCKSZ as usize { - page.extend_from_slice(×tamp.to_be_bytes()); - } else { - warn!( - "CLOG blk {} in seg {} has invalid size {}", - blknum, - segno, - page.len() - ); - } - } - NeonWalRecord::ClogSetAborted { xids } => { - let (slru_kind, segno, blknum) = - key_to_slru_block(key).context("invalid record")?; - assert_eq!( - slru_kind, - SlruKind::Clog, - "ClogSetAborted record with unexpected key {}", - key - ); - for &xid in xids { - let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE; - let expected_segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT; - let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; - - // Check that we're modifying the correct CLOG block. - assert!( - segno == expected_segno, - "ClogSetAborted record for XID {} with unexpected key {}", - xid, - key - ); - assert!( - blknum == expected_blknum, - "ClogSetAborted record for XID {} with unexpected key {}", - xid, - key - ); - - transaction_id_set_status(xid, pg_constants::TRANSACTION_STATUS_ABORTED, page); - } - } - NeonWalRecord::MultixactOffsetCreate { mid, moff } => { - let (slru_kind, segno, blknum) = - key_to_slru_block(key).context("invalid record")?; - assert_eq!( - slru_kind, - SlruKind::MultiXactOffsets, - "MultixactOffsetCreate record with unexpected key {}", - key - ); - // Compute the block and offset to modify. - // See RecordNewMultiXact in PostgreSQL sources. - let pageno = mid / pg_constants::MULTIXACT_OFFSETS_PER_PAGE as u32; - let entryno = mid % pg_constants::MULTIXACT_OFFSETS_PER_PAGE as u32; - let offset = (entryno * 4) as usize; - - // Check that we're modifying the correct multixact-offsets block. - let expected_segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT; - let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; - assert!( - segno == expected_segno, - "MultiXactOffsetsCreate record for multi-xid {} with unexpected key {}", - mid, - key - ); - assert!( - blknum == expected_blknum, - "MultiXactOffsetsCreate record for multi-xid {} with unexpected key {}", - mid, - key - ); - - LittleEndian::write_u32(&mut page[offset..offset + 4], *moff); - } - NeonWalRecord::MultixactMembersCreate { moff, members } => { - let (slru_kind, segno, blknum) = - key_to_slru_block(key).context("invalid record")?; - assert_eq!( - slru_kind, - SlruKind::MultiXactMembers, - "MultixactMembersCreate record with unexpected key {}", - key - ); - for (i, member) in members.iter().enumerate() { - let offset = moff + i as u32; - - // Compute the block and offset to modify. - // See RecordNewMultiXact in PostgreSQL sources. - let pageno = offset / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32; - let memberoff = mx_offset_to_member_offset(offset); - let flagsoff = mx_offset_to_flags_offset(offset); - let bshift = mx_offset_to_flags_bitshift(offset); - - // Check that we're modifying the correct multixact-members block. - let expected_segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT; - let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; - assert!( - segno == expected_segno, - "MultiXactMembersCreate record for offset {} with unexpected key {}", - moff, - key - ); - assert!( - blknum == expected_blknum, - "MultiXactMembersCreate record for offset {} with unexpected key {}", - moff, - key - ); - - let mut flagsval = LittleEndian::read_u32(&page[flagsoff..flagsoff + 4]); - flagsval &= !(((1 << pg_constants::MXACT_MEMBER_BITS_PER_XACT) - 1) << bshift); - flagsval |= member.status << bshift; - LittleEndian::write_u32(&mut page[flagsoff..flagsoff + 4], flagsval); - LittleEndian::write_u32(&mut page[memberoff..memberoff + 4], member.xid); - } - } - } + apply_neon::apply_in_neon(record, key, page)?; Ok(()) } } -struct WalRedoProcess { - #[allow(dead_code)] - conf: &'static PageServerConf, - tenant_shard_id: TenantShardId, - // Some() on construction, only becomes None on Drop. - child: Option, - stdout: Mutex, - stdin: Mutex, - /// Counter to separate same sized walredo inputs failing at the same millisecond. - #[cfg(feature = "testing")] - dump_sequence: AtomicUsize, -} - -impl WalRedoProcess { - // - // Start postgres binary in special WAL redo mode. - // - #[instrument(skip_all,fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), pg_version=pg_version))] - fn launch( - conf: &'static PageServerConf, - tenant_shard_id: TenantShardId, - pg_version: u32, - ) -> anyhow::Result { - let pg_bin_dir_path = conf.pg_bin_dir(pg_version).context("pg_bin_dir")?; // TODO these should be infallible. - let pg_lib_dir_path = conf.pg_lib_dir(pg_version).context("pg_lib_dir")?; - - // Start postgres itself - let child = Command::new(pg_bin_dir_path.join("postgres")) - // the first arg must be --wal-redo so the child process enters into walredo mode - .arg("--wal-redo") - // the child doesn't process this arg, but, having it in the argv helps indentify the - // walredo process for a particular tenant when debugging a pagserver - .args(["--tenant-shard-id", &format!("{tenant_shard_id}")]) - .stdin(Stdio::piped()) - .stderr(Stdio::piped()) - .stdout(Stdio::piped()) - .env_clear() - .env("LD_LIBRARY_PATH", &pg_lib_dir_path) - .env("DYLD_LIBRARY_PATH", &pg_lib_dir_path) - // NB: The redo process is not trusted after we sent it the first - // walredo work. Before that, it is trusted. Specifically, we trust - // it to - // 1. close all file descriptors except stdin, stdout, stderr because - // pageserver might not be 100% diligent in setting FD_CLOEXEC on all - // the files it opens, and - // 2. to use seccomp to sandbox itself before processing the first - // walredo request. - .spawn_no_leak_child(tenant_shard_id) - .context("spawn process")?; - WAL_REDO_PROCESS_COUNTERS.started.inc(); - let mut child = scopeguard::guard(child, |child| { - error!("killing wal-redo-postgres process due to a problem during launch"); - child.kill_and_wait(WalRedoKillCause::Startup); - }); - - let stdin = child.stdin.take().unwrap(); - let stdout = child.stdout.take().unwrap(); - let stderr = child.stderr.take().unwrap(); - let stderr = tokio::process::ChildStderr::from_std(stderr) - .context("convert to tokio::ChildStderr")?; - macro_rules! set_nonblock_or_log_err { - ($file:ident) => {{ - let res = set_nonblock($file.as_raw_fd()); - if let Err(e) = &res { - error!(error = %e, file = stringify!($file), pid = child.id(), "set_nonblock failed"); - } - res - }}; - } - set_nonblock_or_log_err!(stdin)?; - set_nonblock_or_log_err!(stdout)?; - - // all fallible operations post-spawn are complete, so get rid of the guard - let child = scopeguard::ScopeGuard::into_inner(child); - - tokio::spawn( - async move { - scopeguard::defer! { - debug!("wal-redo-postgres stderr_logger_task finished"); - crate::metrics::WAL_REDO_PROCESS_COUNTERS.active_stderr_logger_tasks_finished.inc(); - } - debug!("wal-redo-postgres stderr_logger_task started"); - crate::metrics::WAL_REDO_PROCESS_COUNTERS.active_stderr_logger_tasks_started.inc(); - - use tokio::io::AsyncBufReadExt; - let mut stderr_lines = tokio::io::BufReader::new(stderr); - let mut buf = Vec::new(); - let res = loop { - buf.clear(); - // TODO we don't trust the process to cap its stderr length. - // Currently it can do unbounded Vec allocation. - match stderr_lines.read_until(b'\n', &mut buf).await { - Ok(0) => break Ok(()), // eof - Ok(num_bytes) => { - let output = String::from_utf8_lossy(&buf[..num_bytes]); - error!(%output, "received output"); - } - Err(e) => { - break Err(e); - } - } - }; - match res { - Ok(()) => (), - Err(e) => { - error!(error=?e, "failed to read from walredo stderr"); - } - } - }.instrument(tracing::info_span!(parent: None, "wal-redo-postgres-stderr", pid = child.id(), tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %pg_version)) - ); - - Ok(Self { - conf, - tenant_shard_id, - child: Some(child), - stdin: Mutex::new(ProcessInput { - stdin, - n_requests: 0, - }), - stdout: Mutex::new(ProcessOutput { - stdout, - pending_responses: VecDeque::new(), - n_processed_responses: 0, - }), - #[cfg(feature = "testing")] - dump_sequence: AtomicUsize::default(), - }) - } - - fn id(&self) -> u32 { - self.child - .as_ref() - .expect("must not call this during Drop") - .id() - } - - // Apply given WAL records ('records') over an old page image. Returns - // new page image. - // - #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), pid=%self.id()))] - fn apply_wal_records( - &self, - tag: BufferTag, - base_img: &Option, - records: &[(Lsn, NeonWalRecord)], - wal_redo_timeout: Duration, - ) -> anyhow::Result { - let input = self.stdin.lock().unwrap(); - - // Serialize all the messages to send the WAL redo process first. - // - // This could be problematic if there are millions of records to replay, - // but in practice the number of records is usually so small that it doesn't - // matter, and it's better to keep this code simple. - // - // Most requests start with a before-image with BLCKSZ bytes, followed by - // by some other WAL records. Start with a buffer that can hold that - // comfortably. - let mut writebuf: Vec = Vec::with_capacity((BLCKSZ as usize) * 3); - build_begin_redo_for_block_msg(tag, &mut writebuf); - if let Some(img) = base_img { - build_push_page_msg(tag, img, &mut writebuf); - } - for (lsn, rec) in records.iter() { - if let NeonWalRecord::Postgres { - will_init: _, - rec: postgres_rec, - } = rec - { - build_apply_record_msg(*lsn, postgres_rec, &mut writebuf); - } else { - anyhow::bail!("tried to pass neon wal record to postgres WAL redo"); - } - } - build_get_page_msg(tag, &mut writebuf); - WAL_REDO_RECORD_COUNTER.inc_by(records.len() as u64); - - let res = self.apply_wal_records0(&writebuf, input, wal_redo_timeout); - - if res.is_err() { - // not all of these can be caused by this particular input, however these are so rare - // in tests so capture all. - self.record_and_log(&writebuf); - } - - res - } - - fn apply_wal_records0( - &self, - writebuf: &[u8], - input: MutexGuard, - wal_redo_timeout: Duration, - ) -> anyhow::Result { - let mut proc = { input }; // TODO: remove this legacy rename, but this keep the patch small. - let mut nwrite = 0usize; - - while nwrite < writebuf.len() { - let mut stdin_pollfds = [PollFd::new(&proc.stdin, PollFlags::POLLOUT)]; - let n = loop { - match nix::poll::poll(&mut stdin_pollfds[..], wal_redo_timeout.as_millis() as i32) { - Err(nix::errno::Errno::EINTR) => continue, - res => break res, - } - }?; - - if n == 0 { - anyhow::bail!("WAL redo timed out"); - } - - // If 'stdin' is writeable, do write. - let in_revents = stdin_pollfds[0].revents().unwrap(); - if in_revents & (PollFlags::POLLERR | PollFlags::POLLOUT) != PollFlags::empty() { - nwrite += proc.stdin.write(&writebuf[nwrite..])?; - } - if in_revents.contains(PollFlags::POLLHUP) { - // We still have more data to write, but the process closed the pipe. - anyhow::bail!("WAL redo process closed its stdin unexpectedly"); - } - } - let request_no = proc.n_requests; - proc.n_requests += 1; - drop(proc); - - // To improve walredo performance we separate sending requests and receiving - // responses. Them are protected by different mutexes (output and input). - // If thread T1, T2, T3 send requests D1, D2, D3 to walredo process - // then there is not warranty that T1 will first granted output mutex lock. - // To address this issue we maintain number of sent requests, number of processed - // responses and ring buffer with pending responses. After sending response - // (under input mutex), threads remembers request number. Then it releases - // input mutex, locks output mutex and fetch in ring buffer all responses until - // its stored request number. The it takes correspondent element from - // pending responses ring buffer and truncate all empty elements from the front, - // advancing processed responses number. - - let mut output = self.stdout.lock().unwrap(); - let n_processed_responses = output.n_processed_responses; - while n_processed_responses + output.pending_responses.len() <= request_no { - // We expect the WAL redo process to respond with an 8k page image. We read it - // into this buffer. - let mut resultbuf = vec![0; BLCKSZ.into()]; - let mut nresult: usize = 0; // # of bytes read into 'resultbuf' so far - while nresult < BLCKSZ.into() { - let mut stdout_pollfds = [PollFd::new(&output.stdout, PollFlags::POLLIN)]; - // We do two things simultaneously: reading response from stdout - // and forward any logging information that the child writes to its stderr to the page server's log. - let n = loop { - match nix::poll::poll( - &mut stdout_pollfds[..], - wal_redo_timeout.as_millis() as i32, - ) { - Err(nix::errno::Errno::EINTR) => continue, - res => break res, - } - }?; - - if n == 0 { - anyhow::bail!("WAL redo timed out"); - } - - // If we have some data in stdout, read it to the result buffer. - let out_revents = stdout_pollfds[0].revents().unwrap(); - if out_revents & (PollFlags::POLLERR | PollFlags::POLLIN) != PollFlags::empty() { - nresult += output.stdout.read(&mut resultbuf[nresult..])?; - } - if out_revents.contains(PollFlags::POLLHUP) { - anyhow::bail!("WAL redo process closed its stdout unexpectedly"); - } - } - output - .pending_responses - .push_back(Some(Bytes::from(resultbuf))); - } - // Replace our request's response with None in `pending_responses`. - // Then make space in the ring buffer by clearing out any seqence of contiguous - // `None`'s from the front of `pending_responses`. - // NB: We can't pop_front() because other requests' responses because another - // requester might have grabbed the output mutex before us: - // T1: grab input mutex - // T1: send request_no 23 - // T1: release input mutex - // T2: grab input mutex - // T2: send request_no 24 - // T2: release input mutex - // T2: grab output mutex - // T2: n_processed_responses + output.pending_responses.len() <= request_no - // 23 0 24 - // T2: enters poll loop that reads stdout - // T2: put response for 23 into pending_responses - // T2: put response for 24 into pending_resposnes - // pending_responses now looks like this: Front Some(response_23) Some(response_24) Back - // T2: takes its response_24 - // pending_responses now looks like this: Front Some(response_23) None Back - // T2: does the while loop below - // pending_responses now looks like this: Front Some(response_23) None Back - // T2: releases output mutex - // T1: grabs output mutex - // T1: n_processed_responses + output.pending_responses.len() > request_no - // 23 2 23 - // T1: skips poll loop that reads stdout - // T1: takes its response_23 - // pending_responses now looks like this: Front None None Back - // T2: does the while loop below - // pending_responses now looks like this: Front Back - // n_processed_responses now has value 25 - let res = output.pending_responses[request_no - n_processed_responses] - .take() - .expect("we own this request_no, nobody else is supposed to take it"); - while let Some(front) = output.pending_responses.front() { - if front.is_none() { - output.pending_responses.pop_front(); - output.n_processed_responses += 1; - } else { - break; - } - } - Ok(res) - } - - #[cfg(feature = "testing")] - fn record_and_log(&self, writebuf: &[u8]) { - let millis = std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .unwrap() - .as_millis(); - - let seq = self.dump_sequence.fetch_add(1, Ordering::Relaxed); - - // these files will be collected to an allure report - let filename = format!("walredo-{millis}-{}-{seq}.walredo", writebuf.len()); - - let path = self.conf.tenant_path(&self.tenant_shard_id).join(&filename); - - let res = std::fs::OpenOptions::new() - .write(true) - .create_new(true) - .read(true) - .open(path) - .and_then(|mut f| f.write_all(writebuf)); - - // trip up allowed_errors - if let Err(e) = res { - tracing::error!(target=%filename, length=writebuf.len(), "failed to write out the walredo errored input: {e}"); - } else { - tracing::error!(filename, "erroring walredo input saved"); - } - } - - #[cfg(not(feature = "testing"))] - fn record_and_log(&self, _: &[u8]) {} -} - -impl Drop for WalRedoProcess { - fn drop(&mut self) { - self.child - .take() - .expect("we only do this once") - .kill_and_wait(WalRedoKillCause::WalRedoProcessDrop); - // no way to wait for stderr_logger_task from Drop because that is async only - } -} - -/// Wrapper type around `std::process::Child` which guarantees that the child -/// will be killed and waited-for by this process before being dropped. -struct NoLeakChild { - tenant_id: TenantShardId, - child: Option, -} - -impl Deref for NoLeakChild { - type Target = Child; - - fn deref(&self) -> &Self::Target { - self.child.as_ref().expect("must not use from drop") - } -} - -impl DerefMut for NoLeakChild { - fn deref_mut(&mut self) -> &mut Self::Target { - self.child.as_mut().expect("must not use from drop") - } -} - -impl NoLeakChild { - fn spawn(tenant_id: TenantShardId, command: &mut Command) -> io::Result { - let child = command.spawn()?; - Ok(NoLeakChild { - tenant_id, - child: Some(child), - }) - } - - fn kill_and_wait(mut self, cause: WalRedoKillCause) { - let child = match self.child.take() { - Some(child) => child, - None => return, - }; - Self::kill_and_wait_impl(child, cause); - } - - #[instrument(skip_all, fields(pid=child.id(), ?cause))] - fn kill_and_wait_impl(mut child: Child, cause: WalRedoKillCause) { - scopeguard::defer! { - WAL_REDO_PROCESS_COUNTERS.killed_by_cause[cause].inc(); - } - let res = child.kill(); - if let Err(e) = res { - // This branch is very unlikely because: - // - We (= pageserver) spawned this process successfully, so, we're allowed to kill it. - // - This is the only place that calls .kill() - // - We consume `self`, so, .kill() can't be called twice. - // - If the process exited by itself or was killed by someone else, - // .kill() will still succeed because we haven't wait()'ed yet. - // - // So, if we arrive here, we have really no idea what happened, - // whether the PID stored in self.child is still valid, etc. - // If this function were fallible, we'd return an error, but - // since it isn't, all we can do is log an error and proceed - // with the wait(). - error!(error = %e, "failed to SIGKILL; subsequent wait() might fail or wait for wrong process"); - } - - match child.wait() { - Ok(exit_status) => { - info!(exit_status = %exit_status, "wait successful"); - } - Err(e) => { - error!(error = %e, "wait error; might leak the child process; it will show as zombie (defunct)"); - } - } - } -} - -impl Drop for NoLeakChild { - fn drop(&mut self) { - let child = match self.child.take() { - Some(child) => child, - None => return, - }; - let tenant_shard_id = self.tenant_id; - // Offload the kill+wait of the child process into the background. - // If someone stops the runtime, we'll leak the child process. - // We can ignore that case because we only stop the runtime on pageserver exit. - tokio::runtime::Handle::current().spawn(async move { - tokio::task::spawn_blocking(move || { - // Intentionally don't inherit the tracing context from whoever is dropping us. - // This thread here is going to outlive of our dropper. - let span = tracing::info_span!( - "walredo", - tenant_id = %tenant_shard_id.tenant_id, - shard_id = %tenant_shard_id.shard_slug() - ); - let _entered = span.enter(); - Self::kill_and_wait_impl(child, WalRedoKillCause::NoLeakChildDrop); - }) - .await - }); - } -} - -trait NoLeakChildCommandExt { - fn spawn_no_leak_child(&mut self, tenant_id: TenantShardId) -> io::Result; -} - -impl NoLeakChildCommandExt for Command { - fn spawn_no_leak_child(&mut self, tenant_id: TenantShardId) -> io::Result { - NoLeakChild::spawn(tenant_id, self) - } -} - -// Functions for constructing messages to send to the postgres WAL redo -// process. See pgxn/neon_walredo/walredoproc.c for -// explanation of the protocol. - -fn build_begin_redo_for_block_msg(tag: BufferTag, buf: &mut Vec) { - let len = 4 + 1 + 4 * 4; - - buf.put_u8(b'B'); - buf.put_u32(len as u32); - - tag.ser_into(buf) - .expect("serialize BufferTag should always succeed"); -} - -fn build_push_page_msg(tag: BufferTag, base_img: &[u8], buf: &mut Vec) { - assert!(base_img.len() == 8192); - - let len = 4 + 1 + 4 * 4 + base_img.len(); - - buf.put_u8(b'P'); - buf.put_u32(len as u32); - tag.ser_into(buf) - .expect("serialize BufferTag should always succeed"); - buf.put(base_img); -} - -fn build_apply_record_msg(endlsn: Lsn, rec: &[u8], buf: &mut Vec) { - let len = 4 + 8 + rec.len(); - - buf.put_u8(b'A'); - buf.put_u32(len as u32); - buf.put_u64(endlsn.0); - buf.put(rec); -} - -fn build_get_page_msg(tag: BufferTag, buf: &mut Vec) { - let len = 4 + 1 + 4 * 4; - - buf.put_u8(b'G'); - buf.put_u32(len as u32); - tag.ser_into(buf) - .expect("serialize BufferTag should always succeed"); -} - #[cfg(test)] mod tests { use super::PostgresRedoManager; diff --git a/pageserver/src/walredo/apply_neon.rs b/pageserver/src/walredo/apply_neon.rs new file mode 100644 index 0000000000..52899349c4 --- /dev/null +++ b/pageserver/src/walredo/apply_neon.rs @@ -0,0 +1,235 @@ +use crate::walrecord::NeonWalRecord; +use anyhow::Context; +use byteorder::{ByteOrder, LittleEndian}; +use bytes::BytesMut; +use pageserver_api::key::{key_to_rel_block, key_to_slru_block, Key}; +use pageserver_api::reltag::SlruKind; +use postgres_ffi::pg_constants; +use postgres_ffi::relfile_utils::VISIBILITYMAP_FORKNUM; +use postgres_ffi::v14::nonrelfile_utils::{ + mx_offset_to_flags_bitshift, mx_offset_to_flags_offset, mx_offset_to_member_offset, + transaction_id_set_status, +}; +use postgres_ffi::BLCKSZ; +use tracing::*; + +/// Can this request be served by neon redo functions +/// or we need to pass it to wal-redo postgres process? +pub(crate) fn can_apply_in_neon(rec: &NeonWalRecord) -> bool { + // Currently, we don't have bespoken Rust code to replay any + // Postgres WAL records. But everything else is handled in neon. + #[allow(clippy::match_like_matches_macro)] + match rec { + NeonWalRecord::Postgres { + will_init: _, + rec: _, + } => false, + _ => true, + } +} + +pub(crate) fn apply_in_neon( + record: &NeonWalRecord, + key: Key, + page: &mut BytesMut, +) -> Result<(), anyhow::Error> { + match record { + NeonWalRecord::Postgres { + will_init: _, + rec: _, + } => { + anyhow::bail!("tried to pass postgres wal record to neon WAL redo"); + } + NeonWalRecord::ClearVisibilityMapFlags { + new_heap_blkno, + old_heap_blkno, + flags, + } => { + // sanity check that this is modifying the correct relation + let (rel, blknum) = key_to_rel_block(key).context("invalid record")?; + assert!( + rel.forknum == VISIBILITYMAP_FORKNUM, + "ClearVisibilityMapFlags record on unexpected rel {}", + rel + ); + if let Some(heap_blkno) = *new_heap_blkno { + // Calculate the VM block and offset that corresponds to the heap block. + let map_block = pg_constants::HEAPBLK_TO_MAPBLOCK(heap_blkno); + let map_byte = pg_constants::HEAPBLK_TO_MAPBYTE(heap_blkno); + let map_offset = pg_constants::HEAPBLK_TO_OFFSET(heap_blkno); + + // Check that we're modifying the correct VM block. + assert!(map_block == blknum); + + // equivalent to PageGetContents(page) + let map = &mut page[pg_constants::MAXALIGN_SIZE_OF_PAGE_HEADER_DATA..]; + + map[map_byte as usize] &= !(flags << map_offset); + } + + // Repeat for 'old_heap_blkno', if any + if let Some(heap_blkno) = *old_heap_blkno { + let map_block = pg_constants::HEAPBLK_TO_MAPBLOCK(heap_blkno); + let map_byte = pg_constants::HEAPBLK_TO_MAPBYTE(heap_blkno); + let map_offset = pg_constants::HEAPBLK_TO_OFFSET(heap_blkno); + + assert!(map_block == blknum); + + let map = &mut page[pg_constants::MAXALIGN_SIZE_OF_PAGE_HEADER_DATA..]; + + map[map_byte as usize] &= !(flags << map_offset); + } + } + // Non-relational WAL records are handled here, with custom code that has the + // same effects as the corresponding Postgres WAL redo function. + NeonWalRecord::ClogSetCommitted { xids, timestamp } => { + let (slru_kind, segno, blknum) = key_to_slru_block(key).context("invalid record")?; + assert_eq!( + slru_kind, + SlruKind::Clog, + "ClogSetCommitted record with unexpected key {}", + key + ); + for &xid in xids { + let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE; + let expected_segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT; + let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; + + // Check that we're modifying the correct CLOG block. + assert!( + segno == expected_segno, + "ClogSetCommitted record for XID {} with unexpected key {}", + xid, + key + ); + assert!( + blknum == expected_blknum, + "ClogSetCommitted record for XID {} with unexpected key {}", + xid, + key + ); + + transaction_id_set_status(xid, pg_constants::TRANSACTION_STATUS_COMMITTED, page); + } + + // Append the timestamp + if page.len() == BLCKSZ as usize + 8 { + page.truncate(BLCKSZ as usize); + } + if page.len() == BLCKSZ as usize { + page.extend_from_slice(×tamp.to_be_bytes()); + } else { + warn!( + "CLOG blk {} in seg {} has invalid size {}", + blknum, + segno, + page.len() + ); + } + } + NeonWalRecord::ClogSetAborted { xids } => { + let (slru_kind, segno, blknum) = key_to_slru_block(key).context("invalid record")?; + assert_eq!( + slru_kind, + SlruKind::Clog, + "ClogSetAborted record with unexpected key {}", + key + ); + for &xid in xids { + let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE; + let expected_segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT; + let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; + + // Check that we're modifying the correct CLOG block. + assert!( + segno == expected_segno, + "ClogSetAborted record for XID {} with unexpected key {}", + xid, + key + ); + assert!( + blknum == expected_blknum, + "ClogSetAborted record for XID {} with unexpected key {}", + xid, + key + ); + + transaction_id_set_status(xid, pg_constants::TRANSACTION_STATUS_ABORTED, page); + } + } + NeonWalRecord::MultixactOffsetCreate { mid, moff } => { + let (slru_kind, segno, blknum) = key_to_slru_block(key).context("invalid record")?; + assert_eq!( + slru_kind, + SlruKind::MultiXactOffsets, + "MultixactOffsetCreate record with unexpected key {}", + key + ); + // Compute the block and offset to modify. + // See RecordNewMultiXact in PostgreSQL sources. + let pageno = mid / pg_constants::MULTIXACT_OFFSETS_PER_PAGE as u32; + let entryno = mid % pg_constants::MULTIXACT_OFFSETS_PER_PAGE as u32; + let offset = (entryno * 4) as usize; + + // Check that we're modifying the correct multixact-offsets block. + let expected_segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT; + let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; + assert!( + segno == expected_segno, + "MultiXactOffsetsCreate record for multi-xid {} with unexpected key {}", + mid, + key + ); + assert!( + blknum == expected_blknum, + "MultiXactOffsetsCreate record for multi-xid {} with unexpected key {}", + mid, + key + ); + + LittleEndian::write_u32(&mut page[offset..offset + 4], *moff); + } + NeonWalRecord::MultixactMembersCreate { moff, members } => { + let (slru_kind, segno, blknum) = key_to_slru_block(key).context("invalid record")?; + assert_eq!( + slru_kind, + SlruKind::MultiXactMembers, + "MultixactMembersCreate record with unexpected key {}", + key + ); + for (i, member) in members.iter().enumerate() { + let offset = moff + i as u32; + + // Compute the block and offset to modify. + // See RecordNewMultiXact in PostgreSQL sources. + let pageno = offset / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32; + let memberoff = mx_offset_to_member_offset(offset); + let flagsoff = mx_offset_to_flags_offset(offset); + let bshift = mx_offset_to_flags_bitshift(offset); + + // Check that we're modifying the correct multixact-members block. + let expected_segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT; + let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; + assert!( + segno == expected_segno, + "MultiXactMembersCreate record for offset {} with unexpected key {}", + moff, + key + ); + assert!( + blknum == expected_blknum, + "MultiXactMembersCreate record for offset {} with unexpected key {}", + moff, + key + ); + + let mut flagsval = LittleEndian::read_u32(&page[flagsoff..flagsoff + 4]); + flagsval &= !(((1 << pg_constants::MXACT_MEMBER_BITS_PER_XACT) - 1) << bshift); + flagsval |= member.status << bshift; + LittleEndian::write_u32(&mut page[flagsoff..flagsoff + 4], flagsval); + LittleEndian::write_u32(&mut page[memberoff..memberoff + 4], member.xid); + } + } + } + Ok(()) +} diff --git a/pageserver/src/walredo/process.rs b/pageserver/src/walredo/process.rs new file mode 100644 index 0000000000..85db3b4a4a --- /dev/null +++ b/pageserver/src/walredo/process.rs @@ -0,0 +1,406 @@ +use self::no_leak_child::NoLeakChild; +use crate::{ + config::PageServerConf, + metrics::{WalRedoKillCause, WAL_REDO_PROCESS_COUNTERS, WAL_REDO_RECORD_COUNTER}, + walrecord::NeonWalRecord, +}; +use anyhow::Context; +use bytes::Bytes; +use nix::poll::{PollFd, PollFlags}; +use pageserver_api::{reltag::RelTag, shard::TenantShardId}; +use postgres_ffi::BLCKSZ; +use std::os::fd::AsRawFd; +#[cfg(feature = "testing")] +use std::sync::atomic::AtomicUsize; +use std::{ + collections::VecDeque, + io::{Read, Write}, + process::{ChildStdin, ChildStdout, Command, Stdio}, + sync::{Mutex, MutexGuard}, + time::Duration, +}; +use tracing::{debug, error, instrument, Instrument}; +use utils::{lsn::Lsn, nonblock::set_nonblock}; + +mod no_leak_child; +/// The IPC protocol that pageserver and walredo process speak over their shared pipe. +mod protocol; + +pub struct WalRedoProcess { + #[allow(dead_code)] + conf: &'static PageServerConf, + tenant_shard_id: TenantShardId, + // Some() on construction, only becomes None on Drop. + child: Option, + stdout: Mutex, + stdin: Mutex, + /// Counter to separate same sized walredo inputs failing at the same millisecond. + #[cfg(feature = "testing")] + dump_sequence: AtomicUsize, +} + +struct ProcessInput { + stdin: ChildStdin, + n_requests: usize, +} + +struct ProcessOutput { + stdout: ChildStdout, + pending_responses: VecDeque>, + n_processed_responses: usize, +} + +impl WalRedoProcess { + // + // Start postgres binary in special WAL redo mode. + // + #[instrument(skip_all,fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), pg_version=pg_version))] + pub(crate) fn launch( + conf: &'static PageServerConf, + tenant_shard_id: TenantShardId, + pg_version: u32, + ) -> anyhow::Result { + let pg_bin_dir_path = conf.pg_bin_dir(pg_version).context("pg_bin_dir")?; // TODO these should be infallible. + let pg_lib_dir_path = conf.pg_lib_dir(pg_version).context("pg_lib_dir")?; + + use no_leak_child::NoLeakChildCommandExt; + // Start postgres itself + let child = Command::new(pg_bin_dir_path.join("postgres")) + // the first arg must be --wal-redo so the child process enters into walredo mode + .arg("--wal-redo") + // the child doesn't process this arg, but, having it in the argv helps indentify the + // walredo process for a particular tenant when debugging a pagserver + .args(["--tenant-shard-id", &format!("{tenant_shard_id}")]) + .stdin(Stdio::piped()) + .stderr(Stdio::piped()) + .stdout(Stdio::piped()) + .env_clear() + .env("LD_LIBRARY_PATH", &pg_lib_dir_path) + .env("DYLD_LIBRARY_PATH", &pg_lib_dir_path) + // NB: The redo process is not trusted after we sent it the first + // walredo work. Before that, it is trusted. Specifically, we trust + // it to + // 1. close all file descriptors except stdin, stdout, stderr because + // pageserver might not be 100% diligent in setting FD_CLOEXEC on all + // the files it opens, and + // 2. to use seccomp to sandbox itself before processing the first + // walredo request. + .spawn_no_leak_child(tenant_shard_id) + .context("spawn process")?; + WAL_REDO_PROCESS_COUNTERS.started.inc(); + let mut child = scopeguard::guard(child, |child| { + error!("killing wal-redo-postgres process due to a problem during launch"); + child.kill_and_wait(WalRedoKillCause::Startup); + }); + + let stdin = child.stdin.take().unwrap(); + let stdout = child.stdout.take().unwrap(); + let stderr = child.stderr.take().unwrap(); + let stderr = tokio::process::ChildStderr::from_std(stderr) + .context("convert to tokio::ChildStderr")?; + macro_rules! set_nonblock_or_log_err { + ($file:ident) => {{ + let res = set_nonblock($file.as_raw_fd()); + if let Err(e) = &res { + error!(error = %e, file = stringify!($file), pid = child.id(), "set_nonblock failed"); + } + res + }}; + } + set_nonblock_or_log_err!(stdin)?; + set_nonblock_or_log_err!(stdout)?; + + // all fallible operations post-spawn are complete, so get rid of the guard + let child = scopeguard::ScopeGuard::into_inner(child); + + tokio::spawn( + async move { + scopeguard::defer! { + debug!("wal-redo-postgres stderr_logger_task finished"); + crate::metrics::WAL_REDO_PROCESS_COUNTERS.active_stderr_logger_tasks_finished.inc(); + } + debug!("wal-redo-postgres stderr_logger_task started"); + crate::metrics::WAL_REDO_PROCESS_COUNTERS.active_stderr_logger_tasks_started.inc(); + + use tokio::io::AsyncBufReadExt; + let mut stderr_lines = tokio::io::BufReader::new(stderr); + let mut buf = Vec::new(); + let res = loop { + buf.clear(); + // TODO we don't trust the process to cap its stderr length. + // Currently it can do unbounded Vec allocation. + match stderr_lines.read_until(b'\n', &mut buf).await { + Ok(0) => break Ok(()), // eof + Ok(num_bytes) => { + let output = String::from_utf8_lossy(&buf[..num_bytes]); + error!(%output, "received output"); + } + Err(e) => { + break Err(e); + } + } + }; + match res { + Ok(()) => (), + Err(e) => { + error!(error=?e, "failed to read from walredo stderr"); + } + } + }.instrument(tracing::info_span!(parent: None, "wal-redo-postgres-stderr", pid = child.id(), tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %pg_version)) + ); + + Ok(Self { + conf, + tenant_shard_id, + child: Some(child), + stdin: Mutex::new(ProcessInput { + stdin, + n_requests: 0, + }), + stdout: Mutex::new(ProcessOutput { + stdout, + pending_responses: VecDeque::new(), + n_processed_responses: 0, + }), + #[cfg(feature = "testing")] + dump_sequence: AtomicUsize::default(), + }) + } + + pub(crate) fn id(&self) -> u32 { + self.child + .as_ref() + .expect("must not call this during Drop") + .id() + } + + // Apply given WAL records ('records') over an old page image. Returns + // new page image. + // + #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), pid=%self.id()))] + pub(crate) fn apply_wal_records( + &self, + rel: RelTag, + blknum: u32, + base_img: &Option, + records: &[(Lsn, NeonWalRecord)], + wal_redo_timeout: Duration, + ) -> anyhow::Result { + let tag = protocol::BufferTag { rel, blknum }; + let input = self.stdin.lock().unwrap(); + + // Serialize all the messages to send the WAL redo process first. + // + // This could be problematic if there are millions of records to replay, + // but in practice the number of records is usually so small that it doesn't + // matter, and it's better to keep this code simple. + // + // Most requests start with a before-image with BLCKSZ bytes, followed by + // by some other WAL records. Start with a buffer that can hold that + // comfortably. + let mut writebuf: Vec = Vec::with_capacity((BLCKSZ as usize) * 3); + protocol::build_begin_redo_for_block_msg(tag, &mut writebuf); + if let Some(img) = base_img { + protocol::build_push_page_msg(tag, img, &mut writebuf); + } + for (lsn, rec) in records.iter() { + if let NeonWalRecord::Postgres { + will_init: _, + rec: postgres_rec, + } = rec + { + protocol::build_apply_record_msg(*lsn, postgres_rec, &mut writebuf); + } else { + anyhow::bail!("tried to pass neon wal record to postgres WAL redo"); + } + } + protocol::build_get_page_msg(tag, &mut writebuf); + WAL_REDO_RECORD_COUNTER.inc_by(records.len() as u64); + + let res = self.apply_wal_records0(&writebuf, input, wal_redo_timeout); + + if res.is_err() { + // not all of these can be caused by this particular input, however these are so rare + // in tests so capture all. + self.record_and_log(&writebuf); + } + + res + } + + fn apply_wal_records0( + &self, + writebuf: &[u8], + input: MutexGuard, + wal_redo_timeout: Duration, + ) -> anyhow::Result { + let mut proc = { input }; // TODO: remove this legacy rename, but this keep the patch small. + let mut nwrite = 0usize; + + while nwrite < writebuf.len() { + let mut stdin_pollfds = [PollFd::new(&proc.stdin, PollFlags::POLLOUT)]; + let n = loop { + match nix::poll::poll(&mut stdin_pollfds[..], wal_redo_timeout.as_millis() as i32) { + Err(nix::errno::Errno::EINTR) => continue, + res => break res, + } + }?; + + if n == 0 { + anyhow::bail!("WAL redo timed out"); + } + + // If 'stdin' is writeable, do write. + let in_revents = stdin_pollfds[0].revents().unwrap(); + if in_revents & (PollFlags::POLLERR | PollFlags::POLLOUT) != PollFlags::empty() { + nwrite += proc.stdin.write(&writebuf[nwrite..])?; + } + if in_revents.contains(PollFlags::POLLHUP) { + // We still have more data to write, but the process closed the pipe. + anyhow::bail!("WAL redo process closed its stdin unexpectedly"); + } + } + let request_no = proc.n_requests; + proc.n_requests += 1; + drop(proc); + + // To improve walredo performance we separate sending requests and receiving + // responses. Them are protected by different mutexes (output and input). + // If thread T1, T2, T3 send requests D1, D2, D3 to walredo process + // then there is not warranty that T1 will first granted output mutex lock. + // To address this issue we maintain number of sent requests, number of processed + // responses and ring buffer with pending responses. After sending response + // (under input mutex), threads remembers request number. Then it releases + // input mutex, locks output mutex and fetch in ring buffer all responses until + // its stored request number. The it takes correspondent element from + // pending responses ring buffer and truncate all empty elements from the front, + // advancing processed responses number. + + let mut output = self.stdout.lock().unwrap(); + let n_processed_responses = output.n_processed_responses; + while n_processed_responses + output.pending_responses.len() <= request_no { + // We expect the WAL redo process to respond with an 8k page image. We read it + // into this buffer. + let mut resultbuf = vec![0; BLCKSZ.into()]; + let mut nresult: usize = 0; // # of bytes read into 'resultbuf' so far + while nresult < BLCKSZ.into() { + let mut stdout_pollfds = [PollFd::new(&output.stdout, PollFlags::POLLIN)]; + // We do two things simultaneously: reading response from stdout + // and forward any logging information that the child writes to its stderr to the page server's log. + let n = loop { + match nix::poll::poll( + &mut stdout_pollfds[..], + wal_redo_timeout.as_millis() as i32, + ) { + Err(nix::errno::Errno::EINTR) => continue, + res => break res, + } + }?; + + if n == 0 { + anyhow::bail!("WAL redo timed out"); + } + + // If we have some data in stdout, read it to the result buffer. + let out_revents = stdout_pollfds[0].revents().unwrap(); + if out_revents & (PollFlags::POLLERR | PollFlags::POLLIN) != PollFlags::empty() { + nresult += output.stdout.read(&mut resultbuf[nresult..])?; + } + if out_revents.contains(PollFlags::POLLHUP) { + anyhow::bail!("WAL redo process closed its stdout unexpectedly"); + } + } + output + .pending_responses + .push_back(Some(Bytes::from(resultbuf))); + } + // Replace our request's response with None in `pending_responses`. + // Then make space in the ring buffer by clearing out any seqence of contiguous + // `None`'s from the front of `pending_responses`. + // NB: We can't pop_front() because other requests' responses because another + // requester might have grabbed the output mutex before us: + // T1: grab input mutex + // T1: send request_no 23 + // T1: release input mutex + // T2: grab input mutex + // T2: send request_no 24 + // T2: release input mutex + // T2: grab output mutex + // T2: n_processed_responses + output.pending_responses.len() <= request_no + // 23 0 24 + // T2: enters poll loop that reads stdout + // T2: put response for 23 into pending_responses + // T2: put response for 24 into pending_resposnes + // pending_responses now looks like this: Front Some(response_23) Some(response_24) Back + // T2: takes its response_24 + // pending_responses now looks like this: Front Some(response_23) None Back + // T2: does the while loop below + // pending_responses now looks like this: Front Some(response_23) None Back + // T2: releases output mutex + // T1: grabs output mutex + // T1: n_processed_responses + output.pending_responses.len() > request_no + // 23 2 23 + // T1: skips poll loop that reads stdout + // T1: takes its response_23 + // pending_responses now looks like this: Front None None Back + // T2: does the while loop below + // pending_responses now looks like this: Front Back + // n_processed_responses now has value 25 + let res = output.pending_responses[request_no - n_processed_responses] + .take() + .expect("we own this request_no, nobody else is supposed to take it"); + while let Some(front) = output.pending_responses.front() { + if front.is_none() { + output.pending_responses.pop_front(); + output.n_processed_responses += 1; + } else { + break; + } + } + Ok(res) + } + + #[cfg(feature = "testing")] + fn record_and_log(&self, writebuf: &[u8]) { + use std::sync::atomic::Ordering; + + let millis = std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .unwrap() + .as_millis(); + + let seq = self.dump_sequence.fetch_add(1, Ordering::Relaxed); + + // these files will be collected to an allure report + let filename = format!("walredo-{millis}-{}-{seq}.walredo", writebuf.len()); + + let path = self.conf.tenant_path(&self.tenant_shard_id).join(&filename); + + let res = std::fs::OpenOptions::new() + .write(true) + .create_new(true) + .read(true) + .open(path) + .and_then(|mut f| f.write_all(writebuf)); + + // trip up allowed_errors + if let Err(e) = res { + tracing::error!(target=%filename, length=writebuf.len(), "failed to write out the walredo errored input: {e}"); + } else { + tracing::error!(filename, "erroring walredo input saved"); + } + } + + #[cfg(not(feature = "testing"))] + fn record_and_log(&self, _: &[u8]) {} +} + +impl Drop for WalRedoProcess { + fn drop(&mut self) { + self.child + .take() + .expect("we only do this once") + .kill_and_wait(WalRedoKillCause::WalRedoProcessDrop); + // no way to wait for stderr_logger_task from Drop because that is async only + } +} diff --git a/pageserver/src/walredo/process/no_leak_child.rs b/pageserver/src/walredo/process/no_leak_child.rs new file mode 100644 index 0000000000..ca016408e6 --- /dev/null +++ b/pageserver/src/walredo/process/no_leak_child.rs @@ -0,0 +1,126 @@ +use tracing; +use tracing::error; +use tracing::info; +use tracing::instrument; + +use crate::metrics::WalRedoKillCause; +use crate::metrics::WAL_REDO_PROCESS_COUNTERS; + +use std::io; +use std::process::Command; + +use std::ops::DerefMut; + +use std::ops::Deref; + +use std::process::Child; + +use pageserver_api::shard::TenantShardId; + +/// Wrapper type around `std::process::Child` which guarantees that the child +/// will be killed and waited-for by this process before being dropped. +pub(crate) struct NoLeakChild { + pub(crate) tenant_id: TenantShardId, + pub(crate) child: Option, +} + +impl Deref for NoLeakChild { + type Target = Child; + + fn deref(&self) -> &Self::Target { + self.child.as_ref().expect("must not use from drop") + } +} + +impl DerefMut for NoLeakChild { + fn deref_mut(&mut self) -> &mut Self::Target { + self.child.as_mut().expect("must not use from drop") + } +} + +impl NoLeakChild { + pub(crate) fn spawn(tenant_id: TenantShardId, command: &mut Command) -> io::Result { + let child = command.spawn()?; + Ok(NoLeakChild { + tenant_id, + child: Some(child), + }) + } + + pub(crate) fn kill_and_wait(mut self, cause: WalRedoKillCause) { + let child = match self.child.take() { + Some(child) => child, + None => return, + }; + Self::kill_and_wait_impl(child, cause); + } + + #[instrument(skip_all, fields(pid=child.id(), ?cause))] + pub(crate) fn kill_and_wait_impl(mut child: Child, cause: WalRedoKillCause) { + scopeguard::defer! { + WAL_REDO_PROCESS_COUNTERS.killed_by_cause[cause].inc(); + } + let res = child.kill(); + if let Err(e) = res { + // This branch is very unlikely because: + // - We (= pageserver) spawned this process successfully, so, we're allowed to kill it. + // - This is the only place that calls .kill() + // - We consume `self`, so, .kill() can't be called twice. + // - If the process exited by itself or was killed by someone else, + // .kill() will still succeed because we haven't wait()'ed yet. + // + // So, if we arrive here, we have really no idea what happened, + // whether the PID stored in self.child is still valid, etc. + // If this function were fallible, we'd return an error, but + // since it isn't, all we can do is log an error and proceed + // with the wait(). + error!(error = %e, "failed to SIGKILL; subsequent wait() might fail or wait for wrong process"); + } + + match child.wait() { + Ok(exit_status) => { + info!(exit_status = %exit_status, "wait successful"); + } + Err(e) => { + error!(error = %e, "wait error; might leak the child process; it will show as zombie (defunct)"); + } + } + } +} + +impl Drop for NoLeakChild { + fn drop(&mut self) { + let child = match self.child.take() { + Some(child) => child, + None => return, + }; + let tenant_shard_id = self.tenant_id; + // Offload the kill+wait of the child process into the background. + // If someone stops the runtime, we'll leak the child process. + // We can ignore that case because we only stop the runtime on pageserver exit. + tokio::runtime::Handle::current().spawn(async move { + tokio::task::spawn_blocking(move || { + // Intentionally don't inherit the tracing context from whoever is dropping us. + // This thread here is going to outlive of our dropper. + let span = tracing::info_span!( + "walredo", + tenant_id = %tenant_shard_id.tenant_id, + shard_id = %tenant_shard_id.shard_slug() + ); + let _entered = span.enter(); + Self::kill_and_wait_impl(child, WalRedoKillCause::NoLeakChildDrop); + }) + .await + }); + } +} + +pub(crate) trait NoLeakChildCommandExt { + fn spawn_no_leak_child(&mut self, tenant_id: TenantShardId) -> io::Result; +} + +impl NoLeakChildCommandExt for Command { + fn spawn_no_leak_child(&mut self, tenant_id: TenantShardId) -> io::Result { + NoLeakChild::spawn(tenant_id, self) + } +} diff --git a/pageserver/src/walredo/process/protocol.rs b/pageserver/src/walredo/process/protocol.rs new file mode 100644 index 0000000000..b703344cc8 --- /dev/null +++ b/pageserver/src/walredo/process/protocol.rs @@ -0,0 +1,57 @@ +use bytes::BufMut; +use pageserver_api::reltag::RelTag; +use serde::Serialize; +use utils::bin_ser::BeSer; +use utils::lsn::Lsn; + +/// +/// `RelTag` + block number (`blknum`) gives us a unique id of the page in the cluster. +/// +/// In Postgres `BufferTag` structure is used for exactly the same purpose. +/// [See more related comments here](https://github.com/postgres/postgres/blob/99c5852e20a0987eca1c38ba0c09329d4076b6a0/src/include/storage/buf_internals.h#L91). +/// +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Serialize)] +pub(crate) struct BufferTag { + pub rel: RelTag, + pub blknum: u32, +} + +pub(crate) fn build_begin_redo_for_block_msg(tag: BufferTag, buf: &mut Vec) { + let len = 4 + 1 + 4 * 4; + + buf.put_u8(b'B'); + buf.put_u32(len as u32); + + tag.ser_into(buf) + .expect("serialize BufferTag should always succeed"); +} + +pub(crate) fn build_push_page_msg(tag: BufferTag, base_img: &[u8], buf: &mut Vec) { + assert!(base_img.len() == 8192); + + let len = 4 + 1 + 4 * 4 + base_img.len(); + + buf.put_u8(b'P'); + buf.put_u32(len as u32); + tag.ser_into(buf) + .expect("serialize BufferTag should always succeed"); + buf.put(base_img); +} + +pub(crate) fn build_apply_record_msg(endlsn: Lsn, rec: &[u8], buf: &mut Vec) { + let len = 4 + 8 + rec.len(); + + buf.put_u8(b'A'); + buf.put_u32(len as u32); + buf.put_u64(endlsn.0); + buf.put(rec); +} + +pub(crate) fn build_get_page_msg(tag: BufferTag, buf: &mut Vec) { + let len = 4 + 1 + 4 * 4; + + buf.put_u8(b'G'); + buf.put_u32(len as u32); + tag.ser_into(buf) + .expect("serialize BufferTag should always succeed"); +} From 431f4234d43f3fe42fbda441e601a89d2421b52e Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 6 Feb 2024 10:07:10 +0000 Subject: [PATCH 27/34] storage controller: embed database migrations in binary (#6637) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Problem We don't have a neat way to carry around migration .sql files during deploy, and in any case would prefer to avoid depending on diesel CLI to deploy. ## Summary of changes - Use `diesel_migrations` crate to embed migrations in our binary - Run migrations on startup - Drop the diesel dependency in the `neon_local` binary, as the attachment_service binary just needs the database to exist. Do database creation with a simple `createdb`. Co-authored-by: Arpad Müller --- Cargo.lock | 1 + control_plane/attachment_service/Cargo.toml | 1 + control_plane/attachment_service/src/main.rs | 24 +++++++++++++++++++- 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index c16331636a..b2b2777408 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -281,6 +281,7 @@ dependencies = [ "clap", "control_plane", "diesel", + "diesel_migrations", "futures", "git-version", "hyper", diff --git a/control_plane/attachment_service/Cargo.toml b/control_plane/attachment_service/Cargo.toml index d3c62d74d2..3a65153c41 100644 --- a/control_plane/attachment_service/Cargo.toml +++ b/control_plane/attachment_service/Cargo.toml @@ -25,6 +25,7 @@ tokio-util.workspace = true tracing.workspace = true diesel = { version = "2.1.4", features = ["serde_json", "postgres"] } +diesel_migrations = { version = "2.1.0" } utils = { path = "../../libs/utils/" } metrics = { path = "../../libs/metrics/" } diff --git a/control_plane/attachment_service/src/main.rs b/control_plane/attachment_service/src/main.rs index 37b06c4090..7ac5918244 100644 --- a/control_plane/attachment_service/src/main.rs +++ b/control_plane/attachment_service/src/main.rs @@ -4,13 +4,14 @@ /// This enables running & testing pageservers without a full-blown /// deployment of the Neon cloud platform. /// -use anyhow::anyhow; +use anyhow::{anyhow, Context}; use attachment_service::http::make_router; use attachment_service::persistence::Persistence; use attachment_service::service::{Config, Service}; use aws_config::{self, BehaviorVersion, Region}; use camino::Utf8PathBuf; use clap::Parser; +use diesel::Connection; use metrics::launch_timestamp::LaunchTimestamp; use std::sync::Arc; use tokio::signal::unix::SignalKind; @@ -22,6 +23,9 @@ use utils::{project_build_tag, project_git_version, tcp_listener}; project_git_version!(GIT_VERSION); project_build_tag!(BUILD_TAG); +use diesel_migrations::{embed_migrations, EmbeddedMigrations}; +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations"); + #[derive(Parser)] #[command(author, version, about, long_about = None)] #[command(arg_required_else_help(true))] @@ -166,6 +170,19 @@ impl Secrets { } } +async fn migration_run(database_url: &str) -> anyhow::Result<()> { + use diesel::PgConnection; + use diesel_migrations::{HarnessWithOutput, MigrationHarness}; + let mut conn = PgConnection::establish(database_url)?; + + HarnessWithOutput::write_to_stdout(&mut conn) + .run_pending_migrations(MIGRATIONS) + .map(|_| ()) + .map_err(|e| anyhow::anyhow!(e))?; + + Ok(()) +} + #[tokio::main] async fn main() -> anyhow::Result<()> { let launch_ts = Box::leak(Box::new(LaunchTimestamp::generate())); @@ -194,6 +211,11 @@ async fn main() -> anyhow::Result<()> { compute_hook_url: args.compute_hook_url, }; + // After loading secrets & config, but before starting anything else, apply database migrations + migration_run(&secrets.database_url) + .await + .context("Running database migrations")?; + let json_path = args.path; let persistence = Arc::new(Persistence::new(secrets.database_url, json_path.clone())); From 53743991decd9f1d13fd5063a8e840a38cbda383 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Tue, 6 Feb 2024 13:34:13 +0200 Subject: [PATCH 28/34] uploader: avoid cloning vecs just to get Bytes (#6645) Fix cloning the serialized heatmap on every attempt by just turning it into `bytes::Bytes` before clone so it will be a refcounted instead of refcounting a vec clone later on. Also fixes one cancellation token cloning I had missed in #6618. Cc: #6096 --- .../src/tenant/secondary/heatmap_uploader.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/pageserver/src/tenant/secondary/heatmap_uploader.rs b/pageserver/src/tenant/secondary/heatmap_uploader.rs index fff29b2487..806e3fb0e8 100644 --- a/pageserver/src/tenant/secondary/heatmap_uploader.rs +++ b/pageserver/src/tenant/secondary/heatmap_uploader.rs @@ -371,8 +371,6 @@ async fn upload_tenant_heatmap( }; let timelines = tenant.timelines.lock().unwrap().clone(); - let tenant_cancel = tenant.cancel.clone(); - // Ensure that Tenant::shutdown waits for any upload in flight: this is needed because otherwise // when we delete a tenant, we might race with an upload in flight and end up leaving a heatmap behind // in remote storage. @@ -401,6 +399,7 @@ async fn upload_tenant_heatmap( // Serialize the heatmap let bytes = serde_json::to_vec(&heatmap).map_err(|e| anyhow::anyhow!(e))?; + let bytes = bytes::Bytes::from(bytes); let size = bytes.len(); // Drop out early if nothing changed since our last upload @@ -411,13 +410,12 @@ async fn upload_tenant_heatmap( let path = remote_heatmap_path(tenant.get_tenant_shard_id()); - // Write the heatmap. + let cancel = &tenant.cancel; + tracing::debug!("Uploading {size} byte heatmap to {path}"); if let Err(e) = backoff::retry( || async { - let bytes = futures::stream::once(futures::future::ready(Ok(bytes::Bytes::from( - bytes.clone(), - )))); + let bytes = futures::stream::once(futures::future::ready(Ok(bytes.clone()))); remote_storage .upload_storage_object(bytes, size, &path) .await @@ -426,13 +424,13 @@ async fn upload_tenant_heatmap( 3, u32::MAX, "Uploading heatmap", - &tenant_cancel, + cancel, ) .await .ok_or_else(|| anyhow::anyhow!("Shutting down")) .and_then(|x| x) { - if tenant_cancel.is_cancelled() { + if cancel.is_cancelled() { return Err(UploadHeatmapError::Cancelled); } else { return Err(e.into()); From 0de46fd6f265e1ef0d27b0ab0f51fb7da2e52705 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Tue, 6 Feb 2024 13:04:15 +0100 Subject: [PATCH 29/34] heavier_once_cell: switch to tokio::sync::RwLock (#6589) Using the RwLock reduces contention on the hot path. Co-authored-by: Joonas Koivunen --- libs/utils/src/sync/heavier_once_cell.rs | 153 ++++++++++++++----- pageserver/src/tenant/storage_layer/layer.rs | 24 +-- pageserver/src/tenant/timeline.rs | 2 +- 3 files changed, 127 insertions(+), 52 deletions(-) diff --git a/libs/utils/src/sync/heavier_once_cell.rs b/libs/utils/src/sync/heavier_once_cell.rs index 0ccaf4e716..f733d107f1 100644 --- a/libs/utils/src/sync/heavier_once_cell.rs +++ b/libs/utils/src/sync/heavier_once_cell.rs @@ -1,6 +1,6 @@ use std::sync::{ atomic::{AtomicUsize, Ordering}, - Arc, Mutex, MutexGuard, + Arc, }; use tokio::sync::Semaphore; @@ -12,7 +12,7 @@ use tokio::sync::Semaphore; /// /// [`OwnedSemaphorePermit`]: tokio::sync::OwnedSemaphorePermit pub struct OnceCell { - inner: Mutex>, + inner: tokio::sync::RwLock>, initializers: AtomicUsize, } @@ -50,7 +50,7 @@ impl OnceCell { let sem = Semaphore::new(1); sem.close(); Self { - inner: Mutex::new(Inner { + inner: tokio::sync::RwLock::new(Inner { init_semaphore: Arc::new(sem), value: Some(value), }), @@ -61,18 +61,18 @@ impl OnceCell { /// Returns a guard to an existing initialized value, or uniquely initializes the value before /// returning the guard. /// - /// Initializing might wait on any existing [`Guard::take_and_deinit`] deinitialization. + /// Initializing might wait on any existing [`GuardMut::take_and_deinit`] deinitialization. /// /// Initialization is panic-safe and cancellation-safe. - pub async fn get_or_init(&self, factory: F) -> Result, E> + pub async fn get_mut_or_init(&self, factory: F) -> Result, E> where F: FnOnce(InitPermit) -> Fut, Fut: std::future::Future>, { let sem = { - let guard = self.inner.lock().unwrap(); + let guard = self.inner.write().await; if guard.value.is_some() { - return Ok(Guard(guard)); + return Ok(GuardMut(guard)); } guard.init_semaphore.clone() }; @@ -88,29 +88,72 @@ impl OnceCell { let permit = InitPermit(permit); let (value, _permit) = factory(permit).await?; - let guard = self.inner.lock().unwrap(); + let guard = self.inner.write().await; Ok(Self::set0(value, guard)) } Err(_closed) => { - let guard = self.inner.lock().unwrap(); + let guard = self.inner.write().await; assert!( guard.value.is_some(), "semaphore got closed, must be initialized" ); - return Ok(Guard(guard)); + return Ok(GuardMut(guard)); } } } - /// Assuming a permit is held after previous call to [`Guard::take_and_deinit`], it can be used + /// Returns a guard to an existing initialized value, or uniquely initializes the value before + /// returning the guard. + /// + /// Initialization is panic-safe and cancellation-safe. + pub async fn get_or_init(&self, factory: F) -> Result, E> + where + F: FnOnce(InitPermit) -> Fut, + Fut: std::future::Future>, + { + let sem = { + let guard = self.inner.read().await; + if guard.value.is_some() { + return Ok(GuardRef(guard)); + } + guard.init_semaphore.clone() + }; + + let permit = { + // increment the count for the duration of queued + let _guard = CountWaitingInitializers::start(self); + sem.acquire_owned().await + }; + + match permit { + Ok(permit) => { + let permit = InitPermit(permit); + let (value, _permit) = factory(permit).await?; + + let guard = self.inner.write().await; + + Ok(Self::set0(value, guard).downgrade()) + } + Err(_closed) => { + let guard = self.inner.read().await; + assert!( + guard.value.is_some(), + "semaphore got closed, must be initialized" + ); + return Ok(GuardRef(guard)); + } + } + } + + /// Assuming a permit is held after previous call to [`GuardMut::take_and_deinit`], it can be used /// to complete initializing the inner value. /// /// # Panics /// /// If the inner has already been initialized. - pub fn set(&self, value: T, _permit: InitPermit) -> Guard<'_, T> { - let guard = self.inner.lock().unwrap(); + pub async fn set(&self, value: T, _permit: InitPermit) -> GuardMut<'_, T> { + let guard = self.inner.write().await; // cannot assert that this permit is for self.inner.semaphore, but we can assert it cannot // give more permits right now. @@ -122,21 +165,31 @@ impl OnceCell { Self::set0(value, guard) } - fn set0(value: T, mut guard: std::sync::MutexGuard<'_, Inner>) -> Guard<'_, T> { + fn set0(value: T, mut guard: tokio::sync::RwLockWriteGuard<'_, Inner>) -> GuardMut<'_, T> { if guard.value.is_some() { drop(guard); unreachable!("we won permit, must not be initialized"); } guard.value = Some(value); guard.init_semaphore.close(); - Guard(guard) + GuardMut(guard) } /// Returns a guard to an existing initialized value, if any. - pub fn get(&self) -> Option> { - let guard = self.inner.lock().unwrap(); + pub async fn get_mut(&self) -> Option> { + let guard = self.inner.write().await; if guard.value.is_some() { - Some(Guard(guard)) + Some(GuardMut(guard)) + } else { + None + } + } + + /// Returns a guard to an existing initialized value, if any. + pub async fn get(&self) -> Option> { + let guard = self.inner.read().await; + if guard.value.is_some() { + Some(GuardRef(guard)) } else { None } @@ -168,9 +221,9 @@ impl<'a, T> Drop for CountWaitingInitializers<'a, T> { /// Uninteresting guard object to allow short-lived access to inspect or clone the held, /// initialized value. #[derive(Debug)] -pub struct Guard<'a, T>(MutexGuard<'a, Inner>); +pub struct GuardMut<'a, T>(tokio::sync::RwLockWriteGuard<'a, Inner>); -impl std::ops::Deref for Guard<'_, T> { +impl std::ops::Deref for GuardMut<'_, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -181,7 +234,7 @@ impl std::ops::Deref for Guard<'_, T> { } } -impl std::ops::DerefMut for Guard<'_, T> { +impl std::ops::DerefMut for GuardMut<'_, T> { fn deref_mut(&mut self) -> &mut Self::Target { self.0 .value @@ -190,7 +243,7 @@ impl std::ops::DerefMut for Guard<'_, T> { } } -impl<'a, T> Guard<'a, T> { +impl<'a, T> GuardMut<'a, T> { /// Take the current value, and a new permit for it's deinitialization. /// /// The permit will be on a semaphore part of the new internal value, and any following @@ -208,6 +261,24 @@ impl<'a, T> Guard<'a, T> { .map(|v| (v, InitPermit(permit))) .expect("guard is not created unless value has been initialized") } + + pub fn downgrade(self) -> GuardRef<'a, T> { + GuardRef(self.0.downgrade()) + } +} + +#[derive(Debug)] +pub struct GuardRef<'a, T>(tokio::sync::RwLockReadGuard<'a, Inner>); + +impl std::ops::Deref for GuardRef<'_, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + self.0 + .value + .as_ref() + .expect("guard is not created unless value has been initialized") + } } /// Type held by OnceCell (de)initializing task. @@ -248,7 +319,7 @@ mod tests { barrier.wait().await; let won = { let g = cell - .get_or_init(|permit| { + .get_mut_or_init(|permit| { counters.factory_got_to_run.fetch_add(1, Ordering::Relaxed); async { counters.future_polled.fetch_add(1, Ordering::Relaxed); @@ -295,7 +366,11 @@ mod tests { let cell = cell.clone(); let deinitialization_started = deinitialization_started.clone(); async move { - let (answer, _permit) = cell.get().expect("initialized to value").take_and_deinit(); + let (answer, _permit) = cell + .get_mut() + .await + .expect("initialized to value") + .take_and_deinit(); assert_eq!(answer, initial); deinitialization_started.wait().await; @@ -306,7 +381,7 @@ mod tests { deinitialization_started.wait().await; let started_at = tokio::time::Instant::now(); - cell.get_or_init(|permit| async { Ok::<_, Infallible>((reinit, permit)) }) + cell.get_mut_or_init(|permit| async { Ok::<_, Infallible>((reinit, permit)) }) .await .unwrap(); @@ -318,21 +393,21 @@ mod tests { jh.await.unwrap(); - assert_eq!(*cell.get().unwrap(), reinit); + assert_eq!(*cell.get_mut().await.unwrap(), reinit); } - #[test] - fn reinit_with_deinit_permit() { + #[tokio::test] + async fn reinit_with_deinit_permit() { let cell = Arc::new(OnceCell::new(42)); - let (mol, permit) = cell.get().unwrap().take_and_deinit(); - cell.set(5, permit); - assert_eq!(*cell.get().unwrap(), 5); + let (mol, permit) = cell.get_mut().await.unwrap().take_and_deinit(); + cell.set(5, permit).await; + assert_eq!(*cell.get_mut().await.unwrap(), 5); - let (five, permit) = cell.get().unwrap().take_and_deinit(); + let (five, permit) = cell.get_mut().await.unwrap().take_and_deinit(); assert_eq!(5, five); - cell.set(mol, permit); - assert_eq!(*cell.get().unwrap(), 42); + cell.set(mol, permit).await; + assert_eq!(*cell.get_mut().await.unwrap(), 42); } #[tokio::test] @@ -340,13 +415,13 @@ mod tests { let cell = OnceCell::default(); for _ in 0..10 { - cell.get_or_init(|_permit| async { Err("whatever error") }) + cell.get_mut_or_init(|_permit| async { Err("whatever error") }) .await .unwrap_err(); } let g = cell - .get_or_init(|permit| async { Ok::<_, Infallible>(("finally success", permit)) }) + .get_mut_or_init(|permit| async { Ok::<_, Infallible>(("finally success", permit)) }) .await .unwrap(); assert_eq!(*g, "finally success"); @@ -358,7 +433,7 @@ mod tests { let barrier = tokio::sync::Barrier::new(2); - let initializer = cell.get_or_init(|permit| async { + let initializer = cell.get_mut_or_init(|permit| async { barrier.wait().await; futures::future::pending::<()>().await; @@ -372,10 +447,10 @@ mod tests { // now initializer is dropped - assert!(cell.get().is_none()); + assert!(cell.get_mut().await.is_none()); let g = cell - .get_or_init(|permit| async { Ok::<_, Infallible>(("now initialized", permit)) }) + .get_mut_or_init(|permit| async { Ok::<_, Infallible>(("now initialized", permit)) }) .await .unwrap(); assert_eq!(*g, "now initialized"); diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 12af866810..1f337adf53 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -299,8 +299,8 @@ impl Layer { }) } - pub(crate) fn info(&self, reset: LayerAccessStatsReset) -> HistoricLayerInfo { - self.0.info(reset) + pub(crate) async fn info(&self, reset: LayerAccessStatsReset) -> HistoricLayerInfo { + self.0.info(reset).await } pub(crate) fn access_stats(&self) -> &LayerAccessStats { @@ -611,10 +611,10 @@ impl LayerInner { let mut rx = self.status.subscribe(); let strong = { - match self.inner.get() { + match self.inner.get_mut().await { Some(mut either) => { self.wanted_evicted.store(true, Ordering::Relaxed); - either.downgrade() + ResidentOrWantedEvicted::downgrade(&mut either) } None => return Err(EvictionError::NotFound), } @@ -640,7 +640,7 @@ impl LayerInner { // use however late (compared to the initial expressing of wanted) as the // "outcome" now LAYER_IMPL_METRICS.inc_broadcast_lagged(); - match self.inner.get() { + match self.inner.get_mut().await { Some(_) => Err(EvictionError::Downloaded), None => Ok(()), } @@ -758,7 +758,7 @@ impl LayerInner { // use the already held initialization permit because it is impossible to hit the // below paths anymore essentially limiting the max loop iterations to 2. let (value, init_permit) = download(init_permit).await?; - let mut guard = self.inner.set(value, init_permit); + let mut guard = self.inner.set(value, init_permit).await; let (strong, _upgraded) = guard .get_and_upgrade() .expect("init creates strong reference, we held the init permit"); @@ -766,7 +766,7 @@ impl LayerInner { } let (weak, permit) = { - let mut locked = self.inner.get_or_init(download).await?; + let mut locked = self.inner.get_mut_or_init(download).await?; if let Some((strong, upgraded)) = locked.get_and_upgrade() { if upgraded { @@ -986,12 +986,12 @@ impl LayerInner { } } - fn info(&self, reset: LayerAccessStatsReset) -> HistoricLayerInfo { + async fn info(&self, reset: LayerAccessStatsReset) -> HistoricLayerInfo { let layer_file_name = self.desc.filename().file_name(); // this is not accurate: we could have the file locally but there was a cancellation // and now we are not in sync, or we are currently downloading it. - let remote = self.inner.get().is_none(); + let remote = self.inner.get_mut().await.is_none(); let access_stats = self.access_stats.as_api_model(reset); @@ -1050,7 +1050,7 @@ impl LayerInner { LAYER_IMPL_METRICS.inc_eviction_cancelled(EvictionCancelled::LayerGone); return; }; - match this.evict_blocking(version) { + match tokio::runtime::Handle::current().block_on(this.evict_blocking(version)) { Ok(()) => LAYER_IMPL_METRICS.inc_completed_evictions(), Err(reason) => LAYER_IMPL_METRICS.inc_eviction_cancelled(reason), } @@ -1058,7 +1058,7 @@ impl LayerInner { } } - fn evict_blocking(&self, only_version: usize) -> Result<(), EvictionCancelled> { + async fn evict_blocking(&self, only_version: usize) -> Result<(), EvictionCancelled> { // deleted or detached timeline, don't do anything. let Some(timeline) = self.timeline.upgrade() else { return Err(EvictionCancelled::TimelineGone); @@ -1067,7 +1067,7 @@ impl LayerInner { // to avoid starting a new download while we evict, keep holding on to the // permit. let _permit = { - let maybe_downloaded = self.inner.get(); + let maybe_downloaded = self.inner.get_mut().await; let (_weak, permit) = match maybe_downloaded { Some(mut guard) => { diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 0ba3fe728a..50ffc4d265 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -1268,7 +1268,7 @@ impl Timeline { let mut historic_layers = Vec::new(); for historic_layer in layer_map.iter_historic_layers() { let historic_layer = guard.get_from_desc(&historic_layer); - historic_layers.push(historic_layer.info(reset)); + historic_layers.push(historic_layer.info(reset).await); } LayerMapInfo { From dae56ef60ca33643b3d80b4d2497fb6902620db0 Mon Sep 17 00:00:00 2001 From: Vadim Kharitonov Date: Tue, 6 Feb 2024 13:15:42 +0100 Subject: [PATCH 30/34] Do not suspend compute if there is an active logical replication subscription. (#6570) ## Problem the idea is to keep compute up and running if there are any active logical replication subscriptions. ### Rationale Rationale: - The Write-Ahead Logging (WAL) files, which contain the data changes, will need to be retained on the publisher side until the subscriber is able to connect again and apply these changes. This could potentially lead to increased disk usage on the publisher - and we do not want to disrupt the source - I think it is more pain for our customer to resolve storage issues on the source than to pay for the compute at the target. - Upon resuming the compute resources, the subscriber will start consuming and applying the changes from the retained WAL files. The time taken to catch up will depend on the volume of changes and the configured vCPUs. we can avoid explaining complex situations where we lag behind (in extreme cases we could lag behind hours, days or even months) - I think an important use case for logical replication from a source is a one-time migration or release upgrade. In this case the customer would not mind if we are not suspended for the duration of the migration. We need to document this in the release notes and the documentation in the context of logical replication where Neon is the target (subscriber) ### See internal discussion here https://neondb.slack.com/archives/C04DGM6SMTM/p1706793400746539?thread_ts=1706792628.701279&cid=C04DGM6SMTM --- compute_tools/src/monitor.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/compute_tools/src/monitor.rs b/compute_tools/src/monitor.rs index f09bd02664..872a3f7750 100644 --- a/compute_tools/src/monitor.rs +++ b/compute_tools/src/monitor.rs @@ -138,6 +138,34 @@ fn watch_compute_activity(compute: &ComputeNode) { } } // + // Don't suspend compute if there is an active logical replication subscription + // + // `where pid is not null` – to filter out read only computes and subscription on branches + // + let logical_subscriptions_query = + "select count(*) from pg_stat_subscription where pid is not null;"; + match cli.query_one(logical_subscriptions_query, &[]) { + Ok(row) => match row.try_get::<&str, i64>("count") { + Ok(num_subscribers) => { + if num_subscribers > 0 { + compute.update_last_active(Some(Utc::now())); + continue; + } + } + Err(e) => { + warn!("failed to parse `pg_stat_subscription` count: {:?}", e); + continue; + } + }, + Err(e) => { + warn!( + "failed to get list of active logical replication subscriptions: {:?}", + e + ); + continue; + } + } + // // Do not suspend compute if autovacuum is running // let autovacuum_count_query = "select count(*) from pg_stat_activity where backend_type = 'autovacuum worker'"; From 62978433176ca6a9679baea769aa751c48fa037d Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 6 Feb 2024 12:49:41 +0000 Subject: [PATCH 31/34] tests: flakiness fixes in pageserver tests (#6632) Fix several test flakes: - test_sharding_service_smoke had log failures on "Dropped LSN updates" - test_emergency_mode had log failures on a deletion queue shutdown check, where the check was incorrect because it was expecting channel receiver to stay alive after cancellation token was fired. - test_secondary_mode_eviction had racing heatmap uploads because the test was using a live migration hook to set up locations, where that migration was itself uploading heatmaps and generally making the situation more complex than it needed to be. These are the failure modes that I saw when spot checking the last few failures of each test. This will mostly/completely address #6511, but I'll leave that ticket open for a couple days and then check if either of the tests named in that ticket are flaky. Related #6511 --- pageserver/src/deletion_queue.rs | 6 ++-- test_runner/fixtures/neon_fixtures.py | 3 +- .../regress/test_disk_usage_eviction.py | 30 ++++++++++--------- test_runner/regress/test_sharding_service.py | 5 ++++ 4 files changed, 27 insertions(+), 17 deletions(-) diff --git a/pageserver/src/deletion_queue.rs b/pageserver/src/deletion_queue.rs index 6a820e1bdc..da1da9331a 100644 --- a/pageserver/src/deletion_queue.rs +++ b/pageserver/src/deletion_queue.rs @@ -700,8 +700,6 @@ impl DeletionQueue { } pub async fn shutdown(&mut self, timeout: Duration) { - self.cancel.cancel(); - match tokio::time::timeout(timeout, self.client.flush()).await { Ok(Ok(())) => { tracing::info!("Deletion queue flushed successfully on shutdown") @@ -715,6 +713,10 @@ impl DeletionQueue { tracing::warn!("Timed out flushing deletion queue on shutdown") } } + + // We only cancel _after_ flushing: otherwise we would be shutting down the + // components that do the flush. + self.cancel.cancel(); } } diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 5ce2fca820..bf7c6ccc14 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -1162,7 +1162,8 @@ class NeonEnv: to the attachment service. """ meta = self.attachment_service.inspect(tenant_id) - assert meta is not None, f"{tenant_id} attachment location not found" + if meta is None: + return None pageserver_id = meta[1] return self.get_pageserver(pageserver_id) diff --git a/test_runner/regress/test_disk_usage_eviction.py b/test_runner/regress/test_disk_usage_eviction.py index dcbf8a5025..061c57c88b 100644 --- a/test_runner/regress/test_disk_usage_eviction.py +++ b/test_runner/regress/test_disk_usage_eviction.py @@ -17,7 +17,7 @@ from fixtures.neon_fixtures import ( from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import wait_for_upload_queue_empty from fixtures.remote_storage import RemoteStorageKind -from fixtures.types import Lsn, TenantId, TenantShardId, TimelineId +from fixtures.types import Lsn, TenantId, TimelineId from fixtures.utils import wait_until GLOBAL_LRU_LOG_LINE = "tenant_min_resident_size-respecting LRU would not relieve pressure, evicting more following global LRU policy" @@ -194,8 +194,10 @@ class EvictionEnv: # we now do initial logical size calculation on startup, which on debug builds can fight with disk usage based eviction for tenant_id, timeline_id in self.timelines: - pageserver_http = self.neon_env.get_tenant_pageserver(tenant_id).http_client() - pageserver_http.timeline_wait_logical_size(tenant_id, timeline_id) + tenant_ps = self.neon_env.get_tenant_pageserver(tenant_id) + # Pageserver may be none if we are currently not attached anywhere, e.g. during secondary eviction test + if tenant_ps is not None: + tenant_ps.http_client().timeline_wait_logical_size(tenant_id, timeline_id) def statvfs_called(): assert pageserver.log_contains(".*running mocked statvfs.*") @@ -864,18 +866,18 @@ def test_secondary_mode_eviction(eviction_env_ha: EvictionEnv): # Set up a situation where one pageserver _only_ has secondary locations on it, # so that when we release space we are sure it is via secondary locations. - - log.info("Setting up secondary location...") - ps_attached = env.neon_env.pageservers[0] + log.info("Setting up secondary locations...") ps_secondary = env.neon_env.pageservers[1] for tenant_id in tenant_ids: - # Migrate all attached tenants to the same pageserver, so that all the secondaries - # will run on the other pageserver. This is necessary because when we create tenants, - # they are spread over pageservers by default. - env.neon_env.attachment_service.tenant_shard_migrate( - TenantShardId(tenant_id, 0, 0), ps_attached.id - ) + # Find where it is attached + pageserver = env.neon_env.get_tenant_pageserver(tenant_id) + pageserver.http_client().tenant_heatmap_upload(tenant_id) + # Detach it + pageserver.tenant_detach(tenant_id) + + # Create a secondary mode location for the tenant, all tenants on one pageserver that will only + # contain secondary locations: this is the one where we will exercise disk usage eviction ps_secondary.tenant_location_configure( tenant_id, { @@ -887,8 +889,8 @@ def test_secondary_mode_eviction(eviction_env_ha: EvictionEnv): readback_conf = ps_secondary.read_tenant_location_conf(tenant_id) log.info(f"Read back conf: {readback_conf}") - # Request secondary location to download all layers that the attached location has - ps_attached.http_client().tenant_heatmap_upload(tenant_id) + # Request secondary location to download all layers that the attached location indicated + # in its heatmap ps_secondary.http_client().tenant_secondary_download(tenant_id) # Configure the secondary pageserver to have a phony small disk size diff --git a/test_runner/regress/test_sharding_service.py b/test_runner/regress/test_sharding_service.py index 5c70378ab0..ee57fcb2cf 100644 --- a/test_runner/regress/test_sharding_service.py +++ b/test_runner/regress/test_sharding_service.py @@ -35,6 +35,11 @@ def test_sharding_service_smoke( neon_env_builder.num_pageservers = 3 env = neon_env_builder.init_configs() + for pageserver in env.pageservers: + # This test detaches tenants during migration, which can race with deletion queue operations, + # during detach we only do an advisory flush, we don't wait for it. + pageserver.allowed_errors.extend([".*Dropped remote consistent LSN updates.*"]) + # Start services by hand so that we can skip a pageserver (this will start + register later) env.broker.try_start() env.attachment_service.start() From 27a3c9ecbe8fd09f35bbe534c0628831f29d0a1f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Feb 2024 13:15:07 +0000 Subject: [PATCH 32/34] build(deps): bump cryptography from 41.0.6 to 42.0.0 (#6643) --- poetry.lock | 65 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 28 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2904e2872e..e18cd4a74d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -836,47 +836,56 @@ files = [ [[package]] name = "cryptography" -version = "41.0.6" +version = "42.0.0" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-41.0.6-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:0f27acb55a4e77b9be8d550d762b0513ef3fc658cd3eb15110ebbcbd626db12c"}, - {file = "cryptography-41.0.6-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:ae236bb8760c1e55b7a39b6d4d32d2279bc6c7c8500b7d5a13b6fb9fc97be35b"}, - {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afda76d84b053923c27ede5edc1ed7d53e3c9f475ebaf63c68e69f1403c405a8"}, - {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da46e2b5df770070412c46f87bac0849b8d685c5f2679771de277a422c7d0b86"}, - {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ff369dd19e8fe0528b02e8df9f2aeb2479f89b1270d90f96a63500afe9af5cae"}, - {file = "cryptography-41.0.6-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b648fe2a45e426aaee684ddca2632f62ec4613ef362f4d681a9a6283d10e079d"}, - {file = "cryptography-41.0.6-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5daeb18e7886a358064a68dbcaf441c036cbdb7da52ae744e7b9207b04d3908c"}, - {file = "cryptography-41.0.6-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:068bc551698c234742c40049e46840843f3d98ad7ce265fd2bd4ec0d11306596"}, - {file = "cryptography-41.0.6-cp37-abi3-win32.whl", hash = "sha256:2132d5865eea673fe6712c2ed5fb4fa49dba10768bb4cc798345748380ee3660"}, - {file = "cryptography-41.0.6-cp37-abi3-win_amd64.whl", hash = "sha256:48783b7e2bef51224020efb61b42704207dde583d7e371ef8fc2a5fb6c0aabc7"}, - {file = "cryptography-41.0.6-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8efb2af8d4ba9dbc9c9dd8f04d19a7abb5b49eab1f3694e7b5a16a5fc2856f5c"}, - {file = "cryptography-41.0.6-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5a550dc7a3b50b116323e3d376241829fd326ac47bc195e04eb33a8170902a9"}, - {file = "cryptography-41.0.6-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:85abd057699b98fce40b41737afb234fef05c67e116f6f3650782c10862c43da"}, - {file = "cryptography-41.0.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f39812f70fc5c71a15aa3c97b2bbe213c3f2a460b79bd21c40d033bb34a9bf36"}, - {file = "cryptography-41.0.6-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:742ae5e9a2310e9dade7932f9576606836ed174da3c7d26bc3d3ab4bd49b9f65"}, - {file = "cryptography-41.0.6-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:35f3f288e83c3f6f10752467c48919a7a94b7d88cc00b0668372a0d2ad4f8ead"}, - {file = "cryptography-41.0.6-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4d03186af98b1c01a4eda396b137f29e4e3fb0173e30f885e27acec8823c1b09"}, - {file = "cryptography-41.0.6-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b27a7fd4229abef715e064269d98a7e2909ebf92eb6912a9603c7e14c181928c"}, - {file = "cryptography-41.0.6-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:398ae1fc711b5eb78e977daa3cbf47cec20f2c08c5da129b7a296055fbb22aed"}, - {file = "cryptography-41.0.6-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7e00fb556bda398b99b0da289ce7053639d33b572847181d6483ad89835115f6"}, - {file = "cryptography-41.0.6-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:60e746b11b937911dc70d164060d28d273e31853bb359e2b2033c9e93e6f3c43"}, - {file = "cryptography-41.0.6-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3288acccef021e3c3c10d58933f44e8602cf04dba96d9796d70d537bb2f4bbc4"}, - {file = "cryptography-41.0.6.tar.gz", hash = "sha256:422e3e31d63743855e43e5a6fcc8b4acab860f560f9321b0ee6269cc7ed70cc3"}, + {file = "cryptography-42.0.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:c640b0ef54138fde761ec99a6c7dc4ce05e80420262c20fa239e694ca371d434"}, + {file = "cryptography-42.0.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:678cfa0d1e72ef41d48993a7be75a76b0725d29b820ff3cfd606a5b2b33fda01"}, + {file = "cryptography-42.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:146e971e92a6dd042214b537a726c9750496128453146ab0ee8971a0299dc9bd"}, + {file = "cryptography-42.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87086eae86a700307b544625e3ba11cc600c3c0ef8ab97b0fda0705d6db3d4e3"}, + {file = "cryptography-42.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:0a68bfcf57a6887818307600c3c0ebc3f62fbb6ccad2240aa21887cda1f8df1b"}, + {file = "cryptography-42.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5a217bca51f3b91971400890905a9323ad805838ca3fa1e202a01844f485ee87"}, + {file = "cryptography-42.0.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ca20550bb590db16223eb9ccc5852335b48b8f597e2f6f0878bbfd9e7314eb17"}, + {file = "cryptography-42.0.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:33588310b5c886dfb87dba5f013b8d27df7ffd31dc753775342a1e5ab139e59d"}, + {file = "cryptography-42.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9515ea7f596c8092fdc9902627e51b23a75daa2c7815ed5aa8cf4f07469212ec"}, + {file = "cryptography-42.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:35cf6ed4c38f054478a9df14f03c1169bb14bd98f0b1705751079b25e1cb58bc"}, + {file = "cryptography-42.0.0-cp37-abi3-win32.whl", hash = "sha256:8814722cffcfd1fbd91edd9f3451b88a8f26a5fd41b28c1c9193949d1c689dc4"}, + {file = "cryptography-42.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:a2a8d873667e4fd2f34aedab02ba500b824692c6542e017075a2efc38f60a4c0"}, + {file = "cryptography-42.0.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:8fedec73d590fd30c4e3f0d0f4bc961aeca8390c72f3eaa1a0874d180e868ddf"}, + {file = "cryptography-42.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be41b0c7366e5549265adf2145135dca107718fa44b6e418dc7499cfff6b4689"}, + {file = "cryptography-42.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ca482ea80626048975360c8e62be3ceb0f11803180b73163acd24bf014133a0"}, + {file = "cryptography-42.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c58115384bdcfe9c7f644c72f10f6f42bed7cf59f7b52fe1bf7ae0a622b3a139"}, + {file = "cryptography-42.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:56ce0c106d5c3fec1038c3cca3d55ac320a5be1b44bf15116732d0bc716979a2"}, + {file = "cryptography-42.0.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:324721d93b998cb7367f1e6897370644751e5580ff9b370c0a50dc60a2003513"}, + {file = "cryptography-42.0.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:d97aae66b7de41cdf5b12087b5509e4e9805ed6f562406dfcf60e8481a9a28f8"}, + {file = "cryptography-42.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:85f759ed59ffd1d0baad296e72780aa62ff8a71f94dc1ab340386a1207d0ea81"}, + {file = "cryptography-42.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:206aaf42e031b93f86ad60f9f5d9da1b09164f25488238ac1dc488334eb5e221"}, + {file = "cryptography-42.0.0-cp39-abi3-win32.whl", hash = "sha256:74f18a4c8ca04134d2052a140322002fef535c99cdbc2a6afc18a8024d5c9d5b"}, + {file = "cryptography-42.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:14e4b909373bc5bf1095311fa0f7fcabf2d1a160ca13f1e9e467be1ac4cbdf94"}, + {file = "cryptography-42.0.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3005166a39b70c8b94455fdbe78d87a444da31ff70de3331cdec2c568cf25b7e"}, + {file = "cryptography-42.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:be14b31eb3a293fc6e6aa2807c8a3224c71426f7c4e3639ccf1a2f3ffd6df8c3"}, + {file = "cryptography-42.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:bd7cf7a8d9f34cc67220f1195884151426ce616fdc8285df9054bfa10135925f"}, + {file = "cryptography-42.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c310767268d88803b653fffe6d6f2f17bb9d49ffceb8d70aed50ad45ea49ab08"}, + {file = "cryptography-42.0.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bdce70e562c69bb089523e75ef1d9625b7417c6297a76ac27b1b8b1eb51b7d0f"}, + {file = "cryptography-42.0.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e9326ca78111e4c645f7e49cbce4ed2f3f85e17b61a563328c85a5208cf34440"}, + {file = "cryptography-42.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:69fd009a325cad6fbfd5b04c711a4da563c6c4854fc4c9544bff3088387c77c0"}, + {file = "cryptography-42.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:988b738f56c665366b1e4bfd9045c3efae89ee366ca3839cd5af53eaa1401bce"}, + {file = "cryptography-42.0.0.tar.gz", hash = "sha256:6cf9b76d6e93c62114bd19485e5cb003115c134cf9ce91f8ac924c44f8c8c3f4"}, ] [package.dependencies] -cffi = ">=1.12" +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} [package.extras] docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] nox = ["nox"] -pep8test = ["black", "check-sdist", "mypy", "ruff"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] From 53a3ed0a7e26ddba5a6a70b2a5176ee7d5491283 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Tue, 6 Feb 2024 15:43:33 +0100 Subject: [PATCH 33/34] debug_assert presence of `shard_id` tracing field (#6572) also: fixes https://github.com/neondatabase/neon/issues/6638 --- libs/utils/src/tracing_span_assert.rs | 51 +++++++------- pageserver/src/http/routes.rs | 10 +-- pageserver/src/lib.rs | 1 + pageserver/src/page_service.rs | 68 +++++++++++++------ pageserver/src/pgdatadir_mapping.rs | 3 +- pageserver/src/span.rs | 43 ++++++++++++ pageserver/src/tenant.rs | 16 +++-- pageserver/src/tenant/mgr.rs | 22 +++--- .../src/tenant/remote_timeline_client.rs | 1 + .../tenant/remote_timeline_client/download.rs | 2 +- pageserver/src/tenant/span.rs | 17 ----- pageserver/src/tenant/storage_layer/layer.rs | 3 + pageserver/src/tenant/timeline.rs | 8 +-- pageserver/src/tenant/timeline/span.rs | 19 ------ pageserver/src/walredo.rs | 9 +++ pageserver/src/walredo/process.rs | 4 +- 16 files changed, 165 insertions(+), 112 deletions(-) create mode 100644 pageserver/src/span.rs delete mode 100644 pageserver/src/tenant/span.rs diff --git a/libs/utils/src/tracing_span_assert.rs b/libs/utils/src/tracing_span_assert.rs index db17f7d8cd..d24c81ad0b 100644 --- a/libs/utils/src/tracing_span_assert.rs +++ b/libs/utils/src/tracing_span_assert.rs @@ -20,13 +20,13 @@ //! //! // Then, in the main code: //! -//! let span = tracing::info_span!("TestSpan", test_id = 1); +//! let span = tracing::info_span!("TestSpan", tenant_id = 1); //! let _guard = span.enter(); //! //! // ... down the call stack //! -//! use utils::tracing_span_assert::{check_fields_present, MultiNameExtractor}; -//! let extractor = MultiNameExtractor::new("TestExtractor", ["test", "test_id"]); +//! use utils::tracing_span_assert::{check_fields_present, ConstExtractor}; +//! let extractor = ConstExtractor::new("tenant_id"); //! if let Err(missing) = check_fields_present!([&extractor]) { //! // if you copypaste this to a custom assert method, remember to add #[track_caller] //! // to get the "user" code location for the panic. @@ -45,27 +45,26 @@ pub enum ExtractionResult { } pub trait Extractor: Send + Sync + std::fmt::Debug { - fn name(&self) -> &str; + fn id(&self) -> &str; fn extract(&self, fields: &tracing::field::FieldSet) -> ExtractionResult; } #[derive(Debug)] -pub struct MultiNameExtractor { - name: &'static str, - field_names: [&'static str; L], +pub struct ConstExtractor { + field_name: &'static str, } -impl MultiNameExtractor { - pub fn new(name: &'static str, field_names: [&'static str; L]) -> MultiNameExtractor { - MultiNameExtractor { name, field_names } +impl ConstExtractor { + pub const fn new(field_name: &'static str) -> ConstExtractor { + ConstExtractor { field_name } } } -impl Extractor for MultiNameExtractor { - fn name(&self) -> &str { - self.name +impl Extractor for ConstExtractor { + fn id(&self) -> &str { + self.field_name } fn extract(&self, fields: &tracing::field::FieldSet) -> ExtractionResult { - if fields.iter().any(|f| self.field_names.contains(&f.name())) { + if fields.iter().any(|f| f.name() == self.field_name) { ExtractionResult::Present } else { ExtractionResult::Absent @@ -203,19 +202,19 @@ mod tests { } impl<'a> fmt::Debug for MemoryIdentity<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:p}: {}", self.as_ptr(), self.0.name()) + write!(f, "{:p}: {}", self.as_ptr(), self.0.id()) } } struct Setup { _current_thread_subscriber_guard: tracing::subscriber::DefaultGuard, - tenant_extractor: MultiNameExtractor<2>, - timeline_extractor: MultiNameExtractor<2>, + tenant_extractor: ConstExtractor, + timeline_extractor: ConstExtractor, } fn setup_current_thread() -> Setup { - let tenant_extractor = MultiNameExtractor::new("TenantId", ["tenant_id", "tenant"]); - let timeline_extractor = MultiNameExtractor::new("TimelineId", ["timeline_id", "timeline"]); + let tenant_extractor = ConstExtractor::new("tenant_id"); + let timeline_extractor = ConstExtractor::new("timeline_id"); let registry = tracing_subscriber::registry() .with(tracing_subscriber::fmt::layer()) @@ -343,12 +342,12 @@ mod tests { let span = tracing::info_span!("foo", e = "some value"); let _guard = span.enter(); - let extractor = MultiNameExtractor::new("E", ["e"]); + let extractor = ConstExtractor::new("e"); let res = check_fields_present0([&extractor]); assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}"); // similarly for a not found key - let extractor = MultiNameExtractor::new("F", ["foobar"]); + let extractor = ConstExtractor::new("foobar"); let res = check_fields_present0([&extractor]); assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}"); } @@ -368,16 +367,14 @@ mod tests { // normally this would work, but without any tracing-subscriber configured, both // check_field_present find nothing let _guard = subspan.enter(); - let extractors: [&dyn Extractor; 2] = [ - &MultiNameExtractor::new("E", ["e"]), - &MultiNameExtractor::new("F", ["f"]), - ]; + let extractors: [&dyn Extractor; 2] = + [&ConstExtractor::new("e"), &ConstExtractor::new("f")]; let res = check_fields_present0(extractors); assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}"); // similarly for a not found key - let extractor = MultiNameExtractor::new("G", ["g"]); + let extractor = ConstExtractor::new("g"); let res = check_fields_present0([&extractor]); assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}"); } @@ -410,7 +407,7 @@ mod tests { let span = tracing::info_span!("foo", e = "some value"); let _guard = span.enter(); - let extractors: [&dyn Extractor; 1] = [&MultiNameExtractor::new("E", ["e"])]; + let extractors: [&dyn Extractor; 1] = [&ConstExtractor::new("e")]; if span.is_disabled() { // the tests are running single threaded, or we got lucky and no other tests subscriber diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index b97e272c86..792089ebe7 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -535,7 +535,7 @@ async fn timeline_create_handler( } .instrument(info_span!("timeline_create", tenant_id = %tenant_shard_id.tenant_id, - shard = %tenant_shard_id.shard_slug(), + shard_id = %tenant_shard_id.shard_slug(), timeline_id = %new_timeline_id, lsn=?request_data.ancestor_start_lsn, pg_version=?request_data.pg_version)) .await } @@ -831,7 +831,7 @@ async fn timeline_delete_handler( } })?; tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?; - tenant.delete_timeline(timeline_id).instrument(info_span!("timeline_delete", tenant_id=%tenant_shard_id.tenant_id, shard=%tenant_shard_id.shard_slug(), %timeline_id)) + tenant.delete_timeline(timeline_id).instrument(info_span!("timeline_delete", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id)) .await?; json_response(StatusCode::ACCEPTED, ()) @@ -856,7 +856,7 @@ async fn tenant_detach_handler( detach_ignored.unwrap_or(false), &state.deletion_queue_client, ) - .instrument(info_span!("tenant_detach", %tenant_id)) + .instrument(info_span!("tenant_detach", %tenant_id, shard_id=%tenant_shard_id.shard_slug())) .await?; json_response(StatusCode::OK, ()) @@ -1007,7 +1007,7 @@ async fn tenant_delete_handler( .delete_tenant(tenant_shard_id, ACTIVE_TENANT_TIMEOUT) .instrument(info_span!("tenant_delete_handler", tenant_id = %tenant_shard_id.tenant_id, - shard = %tenant_shard_id.shard_slug() + shard_id = %tenant_shard_id.shard_slug() )) .await?; @@ -1363,7 +1363,7 @@ async fn put_tenant_location_config_handler( mgr::detach_tenant(conf, tenant_shard_id, true, &state.deletion_queue_client) .instrument(info_span!("tenant_detach", tenant_id = %tenant_shard_id.tenant_id, - shard = %tenant_shard_id.shard_slug() + shard_id = %tenant_shard_id.shard_slug() )) .await { diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index bcde1166b7..c3f35142ec 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -17,6 +17,7 @@ pub mod page_cache; pub mod page_service; pub mod pgdatadir_mapping; pub mod repository; +pub mod span; pub(crate) mod statvfs; pub mod task_mgr; pub mod tenant; diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index 754c021c88..6fc38a76d4 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -63,9 +63,10 @@ use crate::import_datadir::import_wal_from_tar; use crate::metrics; use crate::metrics::LIVE_CONNECTIONS_COUNT; use crate::pgdatadir_mapping::Version; +use crate::span::debug_assert_current_span_has_tenant_and_timeline_id; +use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id; use crate::task_mgr; use crate::task_mgr::TaskKind; -use crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id; use crate::tenant::mgr; use crate::tenant::mgr::get_active_tenant_with_timeout; use crate::tenant::mgr::GetActiveTenantError; @@ -549,7 +550,7 @@ impl PageServerHandler { where IO: AsyncRead + AsyncWrite + Send + Sync + Unpin, { - debug_assert_current_span_has_tenant_and_timeline_id(); + debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id(); let tenant = mgr::get_active_tenant_with_timeout( tenant_id, @@ -631,6 +632,7 @@ impl PageServerHandler { ) } PagestreamFeMessage::GetPage(req) => { + // shard_id is filled in by the handler let span = tracing::info_span!("handle_get_page_at_lsn_request", rel = %req.rel, blkno = %req.blkno, req_lsn = %req.lsn); ( self.handle_get_page_at_lsn_request(tenant_id, timeline_id, &req, &ctx) @@ -719,7 +721,7 @@ impl PageServerHandler { where IO: AsyncRead + AsyncWrite + Send + Sync + Unpin, { - debug_assert_current_span_has_tenant_and_timeline_id(); + debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id(); // Create empty timeline info!("creating new timeline"); @@ -772,7 +774,7 @@ impl PageServerHandler { Ok(()) } - #[instrument(skip_all, fields(%start_lsn, %end_lsn))] + #[instrument(skip_all, fields(shard_id, %start_lsn, %end_lsn))] async fn handle_import_wal( &self, pgb: &mut PostgresBackend, @@ -785,8 +787,6 @@ impl PageServerHandler { where IO: AsyncRead + AsyncWrite + Send + Sync + Unpin, { - debug_assert_current_span_has_tenant_and_timeline_id(); - let timeline = self .get_active_tenant_timeline(tenant_id, timeline_id, ShardSelector::Zero) .await?; @@ -893,6 +893,7 @@ impl PageServerHandler { Ok(lsn) } + #[instrument(skip_all, fields(shard_id))] async fn handle_get_rel_exists_request( &mut self, tenant_id: TenantId, @@ -919,6 +920,7 @@ impl PageServerHandler { })) } + #[instrument(skip_all, fields(shard_id))] async fn handle_get_nblocks_request( &mut self, tenant_id: TenantId, @@ -946,6 +948,7 @@ impl PageServerHandler { })) } + #[instrument(skip_all, fields(shard_id))] async fn handle_db_size_request( &mut self, tenant_id: TenantId, @@ -1096,6 +1099,7 @@ impl PageServerHandler { } } + #[instrument(skip_all, fields(shard_id))] async fn handle_get_page_at_lsn_request( &mut self, tenant_id: TenantId, @@ -1129,6 +1133,9 @@ impl PageServerHandler { } }; + // load_timeline_for_page sets shard_id, but get_cached_timeline_for_page doesn't + set_tracing_field_shard_id(timeline); + let _timer = timeline .query_metrics .start_timer(metrics::SmgrQueryType::GetPageAtLsn); @@ -1147,6 +1154,7 @@ impl PageServerHandler { })) } + #[instrument(skip_all, fields(shard_id))] async fn handle_get_slru_segment_request( &mut self, tenant_id: TenantId, @@ -1175,7 +1183,7 @@ impl PageServerHandler { } #[allow(clippy::too_many_arguments)] - #[instrument(skip_all, fields(?lsn, ?prev_lsn, %full_backup))] + #[instrument(skip_all, fields(shard_id, ?lsn, ?prev_lsn, %full_backup))] async fn handle_basebackup_request( &mut self, pgb: &mut PostgresBackend, @@ -1190,8 +1198,6 @@ impl PageServerHandler { where IO: AsyncRead + AsyncWrite + Send + Sync + Unpin, { - debug_assert_current_span_has_tenant_and_timeline_id(); - let started = std::time::Instant::now(); // check that the timeline exists @@ -1313,6 +1319,7 @@ impl PageServerHandler { .await .map_err(GetActiveTimelineError::Tenant)?; let timeline = tenant.get_timeline(timeline_id, true)?; + set_tracing_field_shard_id(&timeline); Ok(timeline) } } @@ -1477,21 +1484,29 @@ where .record("timeline_id", field::display(timeline_id)); self.check_permission(Some(tenant_id))?; - let timeline = self - .get_active_tenant_timeline(tenant_id, timeline_id, ShardSelector::Zero) - .await?; + async { + let timeline = self + .get_active_tenant_timeline(tenant_id, timeline_id, ShardSelector::Zero) + .await?; - let end_of_timeline = timeline.get_last_record_rlsn(); + let end_of_timeline = timeline.get_last_record_rlsn(); - pgb.write_message_noflush(&BeMessage::RowDescription(&[ - RowDescriptor::text_col(b"prev_lsn"), - RowDescriptor::text_col(b"last_lsn"), - ]))? - .write_message_noflush(&BeMessage::DataRow(&[ - Some(end_of_timeline.prev.to_string().as_bytes()), - Some(end_of_timeline.last.to_string().as_bytes()), - ]))? - .write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?; + pgb.write_message_noflush(&BeMessage::RowDescription(&[ + RowDescriptor::text_col(b"prev_lsn"), + RowDescriptor::text_col(b"last_lsn"), + ]))? + .write_message_noflush(&BeMessage::DataRow(&[ + Some(end_of_timeline.prev.to_string().as_bytes()), + Some(end_of_timeline.last.to_string().as_bytes()), + ]))? + .write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?; + anyhow::Ok(()) + } + .instrument(info_span!( + "handle_get_last_record_lsn", + shard_id = tracing::field::Empty + )) + .await?; } // same as basebackup, but result includes relational data as well else if query_string.starts_with("fullbackup ") { @@ -1748,3 +1763,12 @@ impl From for QueryError { } } } + +fn set_tracing_field_shard_id(timeline: &Timeline) { + debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id(); + tracing::Span::current().record( + "shard_id", + tracing::field::display(timeline.tenant_shard_id.shard_slug()), + ); + debug_assert_current_span_has_tenant_and_timeline_id(); +} diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index a36785a69f..f1d18c0146 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -10,6 +10,7 @@ use super::tenant::{PageReconstructError, Timeline}; use crate::context::RequestContext; use crate::keyspace::{KeySpace, KeySpaceAccum}; use crate::repository::*; +use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id; use crate::walrecord::NeonWalRecord; use anyhow::{ensure, Context}; use bytes::{Buf, Bytes, BytesMut}; @@ -699,7 +700,7 @@ impl Timeline { lsn: Lsn, ctx: &RequestContext, ) -> Result { - crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id(); + debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id(); // Fetch list of database dirs and iterate them let buf = self.get(DBDIR_KEY, lsn, ctx).await?; diff --git a/pageserver/src/span.rs b/pageserver/src/span.rs new file mode 100644 index 0000000000..91fee50514 --- /dev/null +++ b/pageserver/src/span.rs @@ -0,0 +1,43 @@ +use utils::tracing_span_assert::check_fields_present; + +mod extractors { + use utils::tracing_span_assert::ConstExtractor; + + pub(super) const TENANT_ID: ConstExtractor = ConstExtractor::new("tenant_id"); + pub(super) const SHARD_ID: ConstExtractor = ConstExtractor::new("shard_id"); + pub(super) const TIMELINE_ID: ConstExtractor = ConstExtractor::new("timeline_id"); +} + +#[track_caller] +pub(crate) fn debug_assert_current_span_has_tenant_id() { + if cfg!(debug_assertions) { + if let Err(missing) = check_fields_present!([&extractors::TENANT_ID, &extractors::SHARD_ID]) + { + panic!("missing extractors: {missing:?}") + } + } +} + +#[track_caller] +pub(crate) fn debug_assert_current_span_has_tenant_and_timeline_id() { + if cfg!(debug_assertions) { + if let Err(missing) = check_fields_present!([ + &extractors::TENANT_ID, + &extractors::SHARD_ID, + &extractors::TIMELINE_ID, + ]) { + panic!("missing extractors: {missing:?}") + } + } +} + +#[track_caller] +pub(crate) fn debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id() { + if cfg!(debug_assertions) { + if let Err(missing) = + check_fields_present!([&extractors::TENANT_ID, &extractors::TIMELINE_ID,]) + { + panic!("missing extractors: {missing:?}") + } + } +} diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 624c3e365f..fe85cf9753 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -100,6 +100,7 @@ use std::sync::Arc; use std::sync::{Mutex, RwLock}; use std::time::{Duration, Instant}; +use crate::span; use crate::tenant::timeline::delete::DeleteTimelineFlow; use crate::tenant::timeline::uninit::cleanup_timeline_directory; use crate::virtual_file::VirtualFile; @@ -150,7 +151,6 @@ pub mod block_io; pub mod disk_btree; pub(crate) mod ephemeral_file; pub mod layer_map; -mod span; pub mod metadata; mod par_fsync; @@ -168,7 +168,7 @@ pub(crate) mod timeline; pub mod size; -pub(crate) use timeline::span::debug_assert_current_span_has_tenant_and_timeline_id; +pub(crate) use crate::span::debug_assert_current_span_has_tenant_and_timeline_id; pub(crate) use timeline::{LogicalSizeCalculationCause, PageReconstructError, Timeline}; // re-export for use in remote_timeline_client.rs @@ -3998,6 +3998,10 @@ pub(crate) mod harness { }) } + pub fn span(&self) -> tracing::Span { + info_span!("TenantHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()) + } + pub async fn load(&self) -> (Arc, RequestContext) { let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error); ( @@ -4602,7 +4606,7 @@ mod tests { // so that all uploads finish & we can call harness.load() below again tenant .shutdown(Default::default(), true) - .instrument(info_span!("test_shutdown", tenant_id=%tenant.tenant_shard_id)) + .instrument(harness.span()) .await .ok() .unwrap(); @@ -4643,7 +4647,7 @@ mod tests { // so that all uploads finish & we can call harness.load() below again tenant .shutdown(Default::default(), true) - .instrument(info_span!("test_shutdown", tenant_id=%tenant.tenant_shard_id)) + .instrument(harness.span()) .await .ok() .unwrap(); @@ -4705,7 +4709,7 @@ mod tests { // so that all uploads finish & we can call harness.try_load() below again tenant .shutdown(Default::default(), true) - .instrument(info_span!("test_shutdown", tenant_id=%tenant.tenant_shard_id)) + .instrument(harness.span()) .await .ok() .unwrap(); @@ -5238,7 +5242,7 @@ mod tests { let raw_tline = tline.raw_timeline().unwrap(); raw_tline .shutdown() - .instrument(info_span!("test_shutdown", tenant_id=%raw_tline.tenant_shard_id, timeline_id=%TIMELINE_ID)) + .instrument(info_span!("test_shutdown", tenant_id=%raw_tline.tenant_shard_id, shard_id=%raw_tline.tenant_shard_id.shard_slug(), timeline_id=%TIMELINE_ID)) .await; std::mem::forget(tline); } diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index de0b636d47..5ec910ca3e 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -684,7 +684,7 @@ async fn shutdown_all_tenants0(tenants: &std::sync::RwLock) { // going to log too many lines debug!("tenant successfully stopped"); } - .instrument(info_span!("shutdown", tenant_id=%tenant_shard_id.tenant_id, shard=%tenant_shard_id.shard_slug())), + .instrument(info_span!("shutdown", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug())), ); total_attached += 1; @@ -1720,6 +1720,7 @@ pub(crate) async fn ignore_tenant( ignore_tenant0(conf, &TENANTS, tenant_id).await } +#[instrument(skip_all, fields(shard_id))] async fn ignore_tenant0( conf: &'static PageServerConf, tenants: &std::sync::RwLock, @@ -1727,6 +1728,10 @@ async fn ignore_tenant0( ) -> Result<(), TenantStateError> { // This is a legacy API (replaced by `/location_conf`). It does not support sharding let tenant_shard_id = TenantShardId::unsharded(tenant_id); + tracing::Span::current().record( + "shard_id", + tracing::field::display(tenant_shard_id.shard_slug()), + ); remove_tenant_from_memory(tenants, tenant_shard_id, async { let ignore_mark_file = conf.tenant_ignore_mark_file_path(&tenant_shard_id); @@ -2122,7 +2127,7 @@ fn tenant_map_acquire_slot_impl( METRICS.tenant_slot_writes.inc(); let mut locked = tenants.write().unwrap(); - let span = tracing::info_span!("acquire_slot", tenant_id=%tenant_shard_id.tenant_id, shard = %tenant_shard_id.shard_slug()); + let span = tracing::info_span!("acquire_slot", tenant_id=%tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug()); let _guard = span.enter(); let m = match &mut *locked { @@ -2358,7 +2363,7 @@ pub(crate) async fn immediate_gc( mod tests { use std::collections::BTreeMap; use std::sync::Arc; - use tracing::{info_span, Instrument}; + use tracing::Instrument; use crate::tenant::mgr::TenantSlot; @@ -2369,17 +2374,16 @@ mod tests { // Test that if an InProgress tenant is in the map during shutdown, the shutdown will gracefully // wait for it to complete before proceeding. - let (t, _ctx) = TenantHarness::create("shutdown_awaits_in_progress_tenant") - .unwrap() - .load() - .await; + let h = TenantHarness::create("shutdown_awaits_in_progress_tenant").unwrap(); + let (t, _ctx) = h.load().await; // harness loads it to active, which is forced and nothing is running on the tenant let id = t.tenant_shard_id(); // tenant harness configures the logging and we cannot escape it - let _e = info_span!("testing", tenant_id = %id).entered(); + let span = h.span(); + let _e = span.enter(); let tenants = BTreeMap::from([(id, TenantSlot::Attached(t.clone()))]); let tenants = Arc::new(std::sync::RwLock::new(TenantsMap::Open(tenants))); @@ -2400,7 +2404,7 @@ mod tests { }; super::remove_tenant_from_memory(&tenants, id, cleanup).await } - .instrument(info_span!("foobar", tenant_id = %id)) + .instrument(h.span()) }); // now the long cleanup should be in place, with the stopping state diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index 831a073d17..152c9a2b7d 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -1952,6 +1952,7 @@ mod tests { tracing::info_span!( "test", tenant_id = %self.harness.tenant_shard_id.tenant_id, + shard_id = %self.harness.tenant_shard_id.shard_slug(), timeline_id = %TIMELINE_ID ) } diff --git a/pageserver/src/tenant/remote_timeline_client/download.rs b/pageserver/src/tenant/remote_timeline_client/download.rs index 2c50726b43..6c1125746b 100644 --- a/pageserver/src/tenant/remote_timeline_client/download.rs +++ b/pageserver/src/tenant/remote_timeline_client/download.rs @@ -17,11 +17,11 @@ use utils::timeout::timeout_cancellable; use utils::{backoff, crashsafe}; use crate::config::PageServerConf; +use crate::span::debug_assert_current_span_has_tenant_and_timeline_id; use crate::tenant::remote_timeline_client::{ download_cancellable, remote_layer_path, remote_timelines_path, DOWNLOAD_TIMEOUT, }; use crate::tenant::storage_layer::LayerFileName; -use crate::tenant::timeline::span::debug_assert_current_span_has_tenant_and_timeline_id; use crate::tenant::Generation; use crate::virtual_file::on_fatal_io_error; use crate::TEMP_FILE_SUFFIX; diff --git a/pageserver/src/tenant/span.rs b/pageserver/src/tenant/span.rs deleted file mode 100644 index 04e92f4096..0000000000 --- a/pageserver/src/tenant/span.rs +++ /dev/null @@ -1,17 +0,0 @@ -#[cfg(debug_assertions)] -use utils::tracing_span_assert::{check_fields_present, MultiNameExtractor}; - -#[cfg(not(debug_assertions))] -pub(crate) fn debug_assert_current_span_has_tenant_id() {} - -#[cfg(debug_assertions)] -pub(crate) static TENANT_ID_EXTRACTOR: once_cell::sync::Lazy> = - once_cell::sync::Lazy::new(|| MultiNameExtractor::new("TenantId", ["tenant_id"])); - -#[cfg(debug_assertions)] -#[track_caller] -pub(crate) fn debug_assert_current_span_has_tenant_id() { - if let Err(missing) = check_fields_present!([&*TENANT_ID_EXTRACTOR]) { - panic!("missing extractors: {missing:?}") - } -} diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 1f337adf53..52c0f8abdc 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -15,6 +15,7 @@ use utils::sync::heavier_once_cell; use crate::config::PageServerConf; use crate::context::RequestContext; use crate::repository::Key; +use crate::span::debug_assert_current_span_has_tenant_and_timeline_id; use crate::tenant::{remote_timeline_client::LayerFileMetadata, Timeline}; use super::delta_layer::{self, DeltaEntry}; @@ -836,6 +837,8 @@ impl LayerInner { timeline: Arc, permit: heavier_once_cell::InitPermit, ) -> Result { + debug_assert_current_span_has_tenant_and_timeline_id(); + let task_name = format!("download layer {}", self); let (tx, rx) = tokio::sync::oneshot::channel(); diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 50ffc4d265..43aa178ab5 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -1138,7 +1138,7 @@ impl Timeline { /// Shut down immediately, without waiting for any open layers to flush to disk. This is a subset of /// the graceful [`Timeline::flush_and_shutdown`] function. pub(crate) async fn shutdown(&self) { - span::debug_assert_current_span_has_tenant_and_timeline_id(); + debug_assert_current_span_has_tenant_and_timeline_id(); // Signal any subscribers to our cancellation token to drop out tracing::debug!("Cancelling CancellationToken"); @@ -1964,7 +1964,7 @@ impl Timeline { .await; Ok(()) } - .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, timeline_id=%self.timeline_id)), + .instrument(info_span!(parent: None, "initial_size_calculation", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id)), ); } @@ -2151,7 +2151,7 @@ impl Timeline { cause: LogicalSizeCalculationCause, ctx: &RequestContext, ) -> Result { - span::debug_assert_current_span_has_tenant_and_timeline_id(); + crate::span::debug_assert_current_span_has_tenant_and_timeline_id(); // We should never be calculating logical sizes on shard !=0, because these shards do not have // accurate relation sizes, and they do not emit consumption metrics. debug_assert!(self.tenant_shard_id.is_zero()); @@ -2849,7 +2849,7 @@ impl Timeline { frozen_layer: Arc, ctx: &RequestContext, ) -> Result<(), FlushLayerError> { - span::debug_assert_current_span_has_tenant_and_timeline_id(); + debug_assert_current_span_has_tenant_and_timeline_id(); // As a special case, when we have just imported an image into the repository, // instead of writing out a L0 delta layer, we directly write out image layer // files instead. This is possible as long as *all* the data imported into the diff --git a/pageserver/src/tenant/timeline/span.rs b/pageserver/src/tenant/timeline/span.rs index 3b580c9d1b..8b13789179 100644 --- a/pageserver/src/tenant/timeline/span.rs +++ b/pageserver/src/tenant/timeline/span.rs @@ -1,20 +1 @@ -#[cfg(debug_assertions)] -use utils::tracing_span_assert::{check_fields_present, Extractor, MultiNameExtractor}; -#[cfg(not(debug_assertions))] -pub(crate) fn debug_assert_current_span_has_tenant_and_timeline_id() {} - -#[cfg(debug_assertions)] -#[track_caller] -pub(crate) fn debug_assert_current_span_has_tenant_and_timeline_id() { - static TIMELINE_ID_EXTRACTOR: once_cell::sync::Lazy> = - once_cell::sync::Lazy::new(|| MultiNameExtractor::new("TimelineId", ["timeline_id"])); - - let fields: [&dyn Extractor; 2] = [ - &*crate::tenant::span::TENANT_ID_EXTRACTOR, - &*TIMELINE_ID_EXTRACTOR, - ]; - if let Err(missing) = check_fields_present!(fields) { - panic!("missing extractors: {missing:?}") - } -} diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index 773e5fc051..98a6a0bb6c 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -373,6 +373,7 @@ mod tests { use bytes::Bytes; use pageserver_api::shard::TenantShardId; use std::str::FromStr; + use tracing::Instrument; use utils::{id::TenantId, lsn::Lsn}; #[tokio::test] @@ -397,6 +398,7 @@ mod tests { short_records(), 14, ) + .instrument(h.span()) .await .unwrap(); @@ -424,6 +426,7 @@ mod tests { short_records(), 14, ) + .instrument(h.span()) .await .unwrap(); @@ -444,6 +447,7 @@ mod tests { short_records(), 16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */ ) + .instrument(h.span()) .await .unwrap_err(); } @@ -472,6 +476,7 @@ mod tests { // underscored because unused, except for removal at drop _repo_dir: camino_tempfile::Utf8TempDir, manager: PostgresRedoManager, + tenant_shard_id: TenantShardId, } impl RedoHarness { @@ -488,7 +493,11 @@ mod tests { Ok(RedoHarness { _repo_dir: repo_dir, manager, + tenant_shard_id, }) } + fn span(&self) -> tracing::Span { + tracing::info_span!("RedoHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()) + } } } diff --git a/pageserver/src/walredo/process.rs b/pageserver/src/walredo/process.rs index 85db3b4a4a..bcbb263663 100644 --- a/pageserver/src/walredo/process.rs +++ b/pageserver/src/walredo/process.rs @@ -54,12 +54,14 @@ impl WalRedoProcess { // // Start postgres binary in special WAL redo mode. // - #[instrument(skip_all,fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), pg_version=pg_version))] + #[instrument(skip_all,fields(pg_version=pg_version))] pub(crate) fn launch( conf: &'static PageServerConf, tenant_shard_id: TenantShardId, pg_version: u32, ) -> anyhow::Result { + crate::span::debug_assert_current_span_has_tenant_id(); + let pg_bin_dir_path = conf.pg_bin_dir(pg_version).context("pg_bin_dir")?; // TODO these should be infallible. let pg_lib_dir_path = conf.pg_lib_dir(pg_version).context("pg_lib_dir")?; From d7b29aace7eec730af45e7f12fbe5620545b48aa Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Tue, 6 Feb 2024 16:20:02 +0100 Subject: [PATCH 34/34] refactor(walredo): don't create WalRedoManager for broken tenants (#6597) When we'll later introduce a global pool of pre-spawned walredo processes (https://github.com/neondatabase/neon/issues/6581), this refactoring avoids plumbing through the reference to the pool to all the places where we create a broken tenant. Builds atop the refactoring in #6583 --- pageserver/src/tenant.rs | 18 +++++++----------- pageserver/src/tenant/tasks.rs | 4 +++- pageserver/src/tenant/timeline.rs | 9 ++++++--- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index fe85cf9753..f704f8c0dd 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -278,7 +278,7 @@ pub struct Tenant { // with timelines, which in turn may cause dropping replication connection, expiration of wait_for_lsn // timeout... gc_cs: tokio::sync::Mutex<()>, - walredo_mgr: Arc, + walredo_mgr: Option>, // provides access to timeline data sitting in the remote storage pub(crate) remote_storage: Option, @@ -635,7 +635,7 @@ impl Tenant { conf, attached_conf, shard_identity, - wal_redo_manager, + Some(wal_redo_manager), tenant_shard_id, remote_storage.clone(), deletion_queue_client, @@ -1195,10 +1195,6 @@ impl Tenant { tenant_shard_id: TenantShardId, reason: String, ) -> Arc { - let wal_redo_manager = Arc::new(WalRedoManager::from(PostgresRedoManager::new( - conf, - tenant_shard_id, - ))); Arc::new(Tenant::new( TenantState::Broken { reason, @@ -1209,7 +1205,7 @@ impl Tenant { // Shard identity isn't meaningful for a broken tenant: it's just a placeholder // to occupy the slot for this TenantShardId. ShardIdentity::broken(tenant_shard_id.shard_number, tenant_shard_id.shard_count), - wal_redo_manager, + None, tenant_shard_id, None, DeletionQueueClient::broken(), @@ -1978,7 +1974,7 @@ impl Tenant { } pub(crate) fn wal_redo_manager_status(&self) -> Option { - self.walredo_mgr.status() + self.walredo_mgr.as_ref().and_then(|mgr| mgr.status()) } /// Changes tenant status to active, unless shutdown was already requested. @@ -2613,7 +2609,7 @@ impl Tenant { self.tenant_shard_id, self.generation, self.shard_identity, - Arc::clone(&self.walredo_mgr), + self.walredo_mgr.as_ref().map(Arc::clone), resources, pg_version, state, @@ -2631,7 +2627,7 @@ impl Tenant { conf: &'static PageServerConf, attached_conf: AttachedTenantConf, shard_identity: ShardIdentity, - walredo_mgr: Arc, + walredo_mgr: Option>, tenant_shard_id: TenantShardId, remote_storage: Option, deletion_queue_client: DeletionQueueClient, @@ -4055,7 +4051,7 @@ pub(crate) mod harness { .unwrap(), // This is a legacy/test code path: sharding isn't supported here. ShardIdentity::unsharded(), - walredo_mgr, + Some(walredo_mgr), self.tenant_shard_id, Some(self.remote_storage.clone()), self.deletion_queue.new_client(), diff --git a/pageserver/src/tenant/tasks.rs b/pageserver/src/tenant/tasks.rs index 5f39c46a84..950cc46e71 100644 --- a/pageserver/src/tenant/tasks.rs +++ b/pageserver/src/tenant/tasks.rs @@ -199,7 +199,9 @@ async fn compaction_loop(tenant: Arc, cancel: CancellationToken) { // Perhaps we did no work and the walredo process has been idle for some time: // give it a chance to shut down to avoid leaving walredo process running indefinitely. - tenant.walredo_mgr.maybe_quiesce(period * 10); + if let Some(walredo_mgr) = &tenant.walredo_mgr { + walredo_mgr.maybe_quiesce(period * 10); + } // Sleep if tokio::time::timeout(sleep_duration, cancel.cancelled()) diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 43aa178ab5..735b8003b4 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -215,8 +215,8 @@ pub struct Timeline { // Atomic would be more appropriate here. last_freeze_ts: RwLock, - // WAL redo manager - walredo_mgr: Arc, + // WAL redo manager. `None` only for broken tenants. + walredo_mgr: Option>, /// Remote storage client. /// See [`remote_timeline_client`](super::remote_timeline_client) module comment for details. @@ -1427,7 +1427,7 @@ impl Timeline { tenant_shard_id: TenantShardId, generation: Generation, shard_identity: ShardIdentity, - walredo_mgr: Arc, + walredo_mgr: Option>, resources: TimelineResources, pg_version: u32, state: TimelineState, @@ -4457,6 +4457,9 @@ impl Timeline { let img = match self .walredo_mgr + .as_ref() + .context("timeline has no walredo manager") + .map_err(PageReconstructError::WalRedo)? .request_redo(key, request_lsn, data.img, data.records, self.pg_version) .await .context("reconstruct a page image")