From a695713727f77891cf3cc85077c44dee4d7c84fd Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Wed, 23 Jul 2025 18:43:43 +0100 Subject: [PATCH 01/23] [sql-over-http] Reset session state between pooled connection re-use (#12681) Session variables can be set during one sql-over-http query and observed on another when that pooled connection is re-used. To address this we can use `RESET ALL;` before re-using the connection. LKB-2495 To be on the safe side, we can opt for a full `DISCARD ALL;`, but that might have performance regressions since it also clears any query plans. See pgbouncer docs https://www.pgbouncer.org/config.html#server_reset_query. `DISCARD ALL` is currently defined as: ``` CLOSE ALL; SET SESSION AUTHORIZATION DEFAULT; RESET ALL; DEALLOCATE ALL; UNLISTEN *; SELECT pg_advisory_unlock_all(); DISCARD PLANS; DISCARD TEMP; DISCARD SEQUENCES; ``` I've opted to keep everything here except the `DISCARD PLANS`. I've modified the code so that this query is executed in the background when a connection is returned to the pool, rather than when taken from the pool. This should marginally improve performance for Neon RLS by removing 1 (localhost) round trip. I don't believe that keeping query plans could be a security concern. It's a potential side channel, but I can't imagine what you could extract from it. --- Thanks to https://github.com/neondatabase/neon/pull/12659#discussion_r2219016205 for probing the idea in my head. --- libs/proxy/tokio-postgres2/src/client.rs | 28 ++++++++++- proxy/src/serverless/backend.rs | 19 +++++--- proxy/src/serverless/conn_pool.rs | 3 ++ proxy/src/serverless/conn_pool_lib.rs | 25 +++++----- proxy/src/serverless/http_conn_pool.rs | 20 +++++--- proxy/src/serverless/local_conn_pool.rs | 5 -- proxy/src/serverless/rest.rs | 10 ++-- proxy/src/serverless/sql_over_http.rs | 24 +--------- test_runner/fixtures/neon_fixtures.py | 35 ++++++++++++++ test_runner/regress/test_proxy.py | 60 ++++++++++++++++++++---- 10 files changed, 161 insertions(+), 68 deletions(-) diff --git a/libs/proxy/tokio-postgres2/src/client.rs b/libs/proxy/tokio-postgres2/src/client.rs index 068566e955..f8aceb5263 100644 --- a/libs/proxy/tokio-postgres2/src/client.rs +++ b/libs/proxy/tokio-postgres2/src/client.rs @@ -292,8 +292,32 @@ impl Client { simple_query::batch_execute(self.inner_mut(), query).await } - pub async fn discard_all(&mut self) -> Result { - self.batch_execute("discard all").await + /// Similar to `discard_all`, but it does not clear any query plans + /// + /// This runs in the background, so it can be executed without `await`ing. + pub fn reset_session_background(&mut self) -> Result<(), Error> { + // "CLOSE ALL": closes any cursors + // "SET SESSION AUTHORIZATION DEFAULT": resets the current_user back to the session_user + // "RESET ALL": resets any GUCs back to their session defaults. + // "DEALLOCATE ALL": deallocates any prepared statements + // "UNLISTEN *": stops listening on all channels + // "SELECT pg_advisory_unlock_all();": unlocks all advisory locks + // "DISCARD TEMP;": drops all temporary tables + // "DISCARD SEQUENCES;": deallocates all cached sequence state + + let _responses = self.inner_mut().send_simple_query( + "ROLLBACK; + CLOSE ALL; + SET SESSION AUTHORIZATION DEFAULT; + RESET ALL; + DEALLOCATE ALL; + UNLISTEN *; + SELECT pg_advisory_unlock_all(); + DISCARD TEMP; + DISCARD SEQUENCES;", + )?; + + Ok(()) } /// Begins a new database transaction. diff --git a/proxy/src/serverless/backend.rs b/proxy/src/serverless/backend.rs index 59e4b09bc9..31df7eb9f1 100644 --- a/proxy/src/serverless/backend.rs +++ b/proxy/src/serverless/backend.rs @@ -18,7 +18,7 @@ use tracing::{debug, info}; use super::AsyncRW; use super::conn_pool::poll_client; use super::conn_pool_lib::{Client, ConnInfo, EndpointConnPool, GlobalConnPool}; -use super::http_conn_pool::{self, HttpConnPool, Send, poll_http2_client}; +use super::http_conn_pool::{self, HttpConnPool, LocalProxyClient, poll_http2_client}; use super::local_conn_pool::{self, EXT_NAME, EXT_SCHEMA, EXT_VERSION, LocalConnPool}; use crate::auth::backend::local::StaticAuthRules; use crate::auth::backend::{ComputeCredentialKeys, ComputeCredentials, ComputeUserInfo}; @@ -40,7 +40,8 @@ use crate::rate_limiter::EndpointRateLimiter; use crate::types::{EndpointId, Host, LOCAL_PROXY_SUFFIX}; pub(crate) struct PoolingBackend { - pub(crate) http_conn_pool: Arc>>, + pub(crate) http_conn_pool: + Arc>>, pub(crate) local_pool: Arc>, pub(crate) pool: Arc>>, @@ -210,7 +211,7 @@ impl PoolingBackend { &self, ctx: &RequestContext, conn_info: ConnInfo, - ) -> Result, HttpConnError> { + ) -> Result, HttpConnError> { debug!("pool: looking for an existing connection"); if let Ok(Some(client)) = self.http_conn_pool.get(ctx, &conn_info) { return Ok(client); @@ -568,7 +569,7 @@ impl ConnectMechanism for TokioMechanism { } struct HyperMechanism { - pool: Arc>>, + pool: Arc>>, conn_info: ConnInfo, conn_id: uuid::Uuid, @@ -578,7 +579,7 @@ struct HyperMechanism { #[async_trait] impl ConnectMechanism for HyperMechanism { - type Connection = http_conn_pool::Client; + type Connection = http_conn_pool::Client; type ConnectError = HttpConnError; type Error = HttpConnError; @@ -632,7 +633,13 @@ async fn connect_http2( port: u16, timeout: Duration, tls: Option<&Arc>, -) -> Result<(http_conn_pool::Send, http_conn_pool::Connect), LocalProxyConnError> { +) -> Result< + ( + http_conn_pool::LocalProxyClient, + http_conn_pool::LocalProxyConnection, + ), + LocalProxyConnError, +> { let addrs = match host_addr { Some(addr) => vec![SocketAddr::new(addr, port)], None => lookup_host((host, port)) diff --git a/proxy/src/serverless/conn_pool.rs b/proxy/src/serverless/conn_pool.rs index 015c46f787..17305e30f1 100644 --- a/proxy/src/serverless/conn_pool.rs +++ b/proxy/src/serverless/conn_pool.rs @@ -190,6 +190,9 @@ mod tests { fn get_process_id(&self) -> i32 { 0 } + fn reset(&mut self) -> Result<(), postgres_client::Error> { + Ok(()) + } } fn create_inner() -> ClientInnerCommon { diff --git a/proxy/src/serverless/conn_pool_lib.rs b/proxy/src/serverless/conn_pool_lib.rs index ed5cc0ea03..6adca49723 100644 --- a/proxy/src/serverless/conn_pool_lib.rs +++ b/proxy/src/serverless/conn_pool_lib.rs @@ -7,10 +7,9 @@ use std::time::Duration; use clashmap::ClashMap; use parking_lot::RwLock; -use postgres_client::ReadyForQueryStatus; use rand::Rng; use smol_str::ToSmolStr; -use tracing::{Span, debug, info}; +use tracing::{Span, debug, info, warn}; use super::backend::HttpConnError; use super::conn_pool::ClientDataRemote; @@ -188,7 +187,7 @@ impl EndpointConnPool { self.pools.get_mut(&db_user) } - pub(crate) fn put(pool: &RwLock, conn_info: &ConnInfo, client: ClientInnerCommon) { + pub(crate) fn put(pool: &RwLock, conn_info: &ConnInfo, mut client: ClientInnerCommon) { let conn_id = client.get_conn_id(); let (max_conn, conn_count, pool_name) = { let pool = pool.read(); @@ -201,12 +200,17 @@ impl EndpointConnPool { }; if client.inner.is_closed() { - info!(%conn_id, "{}: throwing away connection '{conn_info}' because connection is closed", pool_name); + info!(%conn_id, "{pool_name}: throwing away connection '{conn_info}' because connection is closed"); + return; + } + + if let Err(error) = client.inner.reset() { + warn!(?error, %conn_id, "{pool_name}: throwing away connection '{conn_info}' because connection could not be reset"); return; } if conn_count >= max_conn { - info!(%conn_id, "{}: throwing away connection '{conn_info}' because pool is full", pool_name); + info!(%conn_id, "{pool_name}: throwing away connection '{conn_info}' because pool is full"); return; } @@ -691,6 +695,7 @@ impl Deref for Client { pub(crate) trait ClientInnerExt: Sync + Send + 'static { fn is_closed(&self) -> bool; fn get_process_id(&self) -> i32; + fn reset(&mut self) -> Result<(), postgres_client::Error>; } impl ClientInnerExt for postgres_client::Client { @@ -701,15 +706,13 @@ impl ClientInnerExt for postgres_client::Client { fn get_process_id(&self) -> i32 { self.get_process_id() } + + fn reset(&mut self) -> Result<(), postgres_client::Error> { + self.reset_session_background() + } } impl Discard<'_, C> { - pub(crate) fn check_idle(&mut self, status: ReadyForQueryStatus) { - let conn_info = &self.conn_info; - if status != ReadyForQueryStatus::Idle && std::mem::take(self.pool).strong_count() > 0 { - info!("pool: throwing away connection '{conn_info}' because connection is not idle"); - } - } pub(crate) fn discard(&mut self) { let conn_info = &self.conn_info; if std::mem::take(self.pool).strong_count() > 0 { diff --git a/proxy/src/serverless/http_conn_pool.rs b/proxy/src/serverless/http_conn_pool.rs index 7acd816026..bf6b934d20 100644 --- a/proxy/src/serverless/http_conn_pool.rs +++ b/proxy/src/serverless/http_conn_pool.rs @@ -23,8 +23,8 @@ use crate::protocol2::ConnectionInfoExtra; use crate::types::EndpointCacheKey; use crate::usage_metrics::{Ids, MetricCounter, USAGE_METRICS}; -pub(crate) type Send = http2::SendRequest>; -pub(crate) type Connect = +pub(crate) type LocalProxyClient = http2::SendRequest>; +pub(crate) type LocalProxyConnection = http2::Connection, BoxBody, TokioExecutor>; #[derive(Clone)] @@ -189,14 +189,14 @@ impl GlobalConnPool> { } pub(crate) fn poll_http2_client( - global_pool: Arc>>, + global_pool: Arc>>, ctx: &RequestContext, conn_info: &ConnInfo, - client: Send, - connection: Connect, + client: LocalProxyClient, + connection: LocalProxyConnection, conn_id: uuid::Uuid, aux: MetricsAuxInfo, -) -> Client { +) -> Client { let conn_gauge = Metrics::get().proxy.db_connections.guard(ctx.protocol()); let session_id = ctx.session_id(); @@ -285,7 +285,7 @@ impl Client { } } -impl ClientInnerExt for Send { +impl ClientInnerExt for LocalProxyClient { fn is_closed(&self) -> bool { self.is_closed() } @@ -294,4 +294,10 @@ impl ClientInnerExt for Send { // ideally throw something meaningful -1 } + + fn reset(&mut self) -> Result<(), postgres_client::Error> { + // We use HTTP/2.0 to talk to local proxy. HTTP is stateless, + // so there's nothing to reset. + Ok(()) + } } diff --git a/proxy/src/serverless/local_conn_pool.rs b/proxy/src/serverless/local_conn_pool.rs index f63d84d66b..b8a502c37e 100644 --- a/proxy/src/serverless/local_conn_pool.rs +++ b/proxy/src/serverless/local_conn_pool.rs @@ -269,11 +269,6 @@ impl ClientInnerCommon { local_data.jti += 1; let token = resign_jwt(&local_data.key, payload, local_data.jti)?; - self.inner - .discard_all() - .await - .map_err(SqlOverHttpError::InternalPostgres)?; - // initiates the auth session // this is safe from query injections as the jwt format free of any escape characters. let query = format!("select auth.jwt_session_init('{token}')"); diff --git a/proxy/src/serverless/rest.rs b/proxy/src/serverless/rest.rs index 173c2629f7..c9b5e99747 100644 --- a/proxy/src/serverless/rest.rs +++ b/proxy/src/serverless/rest.rs @@ -46,7 +46,7 @@ use super::backend::{HttpConnError, LocalProxyConnError, PoolingBackend}; use super::conn_pool::AuthData; use super::conn_pool_lib::ConnInfo; use super::error::{ConnInfoError, Credentials, HttpCodeError, ReadPayloadError}; -use super::http_conn_pool::{self, Send}; +use super::http_conn_pool::{self, LocalProxyClient}; use super::http_util::{ ALLOW_POOL, CONN_STRING, NEON_REQUEST_ID, RAW_TEXT_OUTPUT, TXN_ISOLATION_LEVEL, TXN_READ_ONLY, get_conn_info, json_response, uuid_to_header_value, @@ -145,7 +145,7 @@ impl DbSchemaCache { endpoint_id: &EndpointCacheKey, auth_header: &HeaderValue, connection_string: &str, - client: &mut http_conn_pool::Client, + client: &mut http_conn_pool::Client, ctx: &RequestContext, config: &'static ProxyConfig, ) -> Result, RestError> { @@ -190,7 +190,7 @@ impl DbSchemaCache { &self, auth_header: &HeaderValue, connection_string: &str, - client: &mut http_conn_pool::Client, + client: &mut http_conn_pool::Client, ctx: &RequestContext, config: &'static ProxyConfig, ) -> Result<(ApiConfig, DbSchemaOwned), RestError> { @@ -430,7 +430,7 @@ struct BatchQueryData<'a> { } async fn make_local_proxy_request( - client: &mut http_conn_pool::Client, + client: &mut http_conn_pool::Client, headers: impl IntoIterator, body: QueryData<'_>, max_len: usize, @@ -461,7 +461,7 @@ async fn make_local_proxy_request( } async fn make_raw_local_proxy_request( - client: &mut http_conn_pool::Client, + client: &mut http_conn_pool::Client, headers: impl IntoIterator, body: String, ) -> Result, RestError> { diff --git a/proxy/src/serverless/sql_over_http.rs b/proxy/src/serverless/sql_over_http.rs index f254b41b5b..26f65379e7 100644 --- a/proxy/src/serverless/sql_over_http.rs +++ b/proxy/src/serverless/sql_over_http.rs @@ -735,9 +735,7 @@ impl QueryData { match batch_result { // The query successfully completed. - Ok(status) => { - discard.check_idle(status); - + Ok(_) => { let json_output = String::from_utf8(json_buf).expect("json should be valid utf8"); Ok(json_output) } @@ -793,7 +791,7 @@ impl BatchQueryData { { Ok(json_output) => { info!("commit"); - let status = transaction + transaction .commit() .await .inspect_err(|_| { @@ -802,7 +800,6 @@ impl BatchQueryData { discard.discard(); }) .map_err(SqlOverHttpError::Postgres)?; - discard.check_idle(status); json_output } Err(SqlOverHttpError::Cancelled(_)) => { @@ -815,17 +812,6 @@ impl BatchQueryData { return Err(SqlOverHttpError::Cancelled(SqlOverHttpCancel::Postgres)); } Err(err) => { - info!("rollback"); - let status = transaction - .rollback() - .await - .inspect_err(|_| { - // if we cannot rollback - for now don't return connection to pool - // TODO: get a query status from the error - discard.discard(); - }) - .map_err(SqlOverHttpError::Postgres)?; - discard.check_idle(status); return Err(err); } }; @@ -1012,12 +998,6 @@ impl Client { } impl Discard<'_> { - fn check_idle(&mut self, status: ReadyForQueryStatus) { - match self { - Discard::Remote(discard) => discard.check_idle(status), - Discard::Local(discard) => discard.check_idle(status), - } - } fn discard(&mut self) { match self { Discard::Remote(discard) => discard.discard(), diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index f7917f214a..33a18e4394 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3910,6 +3910,41 @@ class NeonProxy(PgProtocol): assert response.status_code == expected_code, f"response: {response.json()}" return response.json() + def http_multiquery(self, *queries, **kwargs): + # TODO maybe use default values if not provided + user = quote(kwargs["user"]) + password = quote(kwargs["password"]) + expected_code = kwargs.get("expected_code") + timeout = kwargs.get("timeout") + + json_queries = [] + for query in queries: + if type(query) is str: + json_queries.append({"query": query}) + else: + [query, params] = query + json_queries.append({"query": query, "params": params}) + + queries_str = [j["query"] for j in json_queries] + log.info(f"Executing http queries: {queries_str}") + + connstr = f"postgresql://{user}:{password}@{self.domain}:{self.proxy_port}/postgres" + response = requests.post( + f"https://{self.domain}:{self.external_http_port}/sql", + data=json.dumps({"queries": json_queries}), + headers={ + "Content-Type": "application/sql", + "Neon-Connection-String": connstr, + "Neon-Pool-Opt-In": "true", + }, + verify=str(self.test_output_dir / "proxy.crt"), + timeout=timeout, + ) + + if expected_code is not None: + assert response.status_code == expected_code, f"response: {response.json()}" + return response.json() + async def http2_query(self, query, args, **kwargs): # TODO maybe use default values if not provided user = kwargs["user"] diff --git a/test_runner/regress/test_proxy.py b/test_runner/regress/test_proxy.py index 9860658ba5..dadaf8a1cf 100644 --- a/test_runner/regress/test_proxy.py +++ b/test_runner/regress/test_proxy.py @@ -17,9 +17,6 @@ if TYPE_CHECKING: from typing import Any -GET_CONNECTION_PID_QUERY = "SELECT pid FROM pg_stat_activity WHERE state = 'active'" - - @pytest.mark.asyncio async def test_http_pool_begin_1(static_proxy: NeonProxy): static_proxy.safe_psql("create user http_auth with password 'http' superuser") @@ -479,7 +476,7 @@ def test_sql_over_http_pool(static_proxy: NeonProxy): def get_pid(status: int, pw: str, user="http_auth") -> Any: return static_proxy.http_query( - GET_CONNECTION_PID_QUERY, + "SELECT pg_backend_pid() as pid", [], user=user, password=pw, @@ -513,6 +510,35 @@ def test_sql_over_http_pool(static_proxy: NeonProxy): assert "password authentication failed for user" in res["message"] +def test_sql_over_http_pool_settings(static_proxy: NeonProxy): + static_proxy.safe_psql("create user http_auth with password 'http' superuser") + + def multiquery(*queries) -> Any: + results = static_proxy.http_multiquery( + *queries, + user="http_auth", + password="http", + expected_code=200, + ) + + return [result["rows"] for result in results["results"]] + + [[intervalstyle]] = static_proxy.safe_psql("SHOW IntervalStyle") + assert intervalstyle == "postgres", "'postgres' is the default IntervalStyle in postgres" + + result = multiquery("select '0 seconds'::interval as interval") + assert result[0][0]["interval"] == "00:00:00", "interval is expected in postgres format" + + result = multiquery( + "SET IntervalStyle = 'iso_8601'", + "select '0 seconds'::interval as interval", + ) + assert result[1][0]["interval"] == "PT0S", "interval is expected in ISO-8601 format" + + result = multiquery("select '0 seconds'::interval as interval") + assert result[0][0]["interval"] == "00:00:00", "interval is expected in postgres format" + + def test_sql_over_http_urlencoding(static_proxy: NeonProxy): static_proxy.safe_psql("create user \"http+auth$$\" with password '%+$^&*@!' superuser") @@ -544,23 +570,37 @@ def test_http_pool_begin(static_proxy: NeonProxy): query(200, "SELECT 1;") # Query that should succeed regardless of the transaction -def test_sql_over_http_pool_idle(static_proxy: NeonProxy): +def test_sql_over_http_pool_tx_reuse(static_proxy: NeonProxy): static_proxy.safe_psql("create user http_auth2 with password 'http' superuser") - def query(status: int, query: str) -> Any: + def query(status: int, query: str, *args) -> Any: return static_proxy.http_query( query, - [], + args, user="http_auth2", password="http", expected_code=status, ) - pid1 = query(200, GET_CONNECTION_PID_QUERY)["rows"][0]["pid"] + def query_pid_txid() -> Any: + result = query( + 200, + "SELECT pg_backend_pid() as pid, pg_current_xact_id() as txid", + ) + + return result["rows"][0] + + res0 = query_pid_txid() + time.sleep(0.02) query(200, "BEGIN") - pid2 = query(200, GET_CONNECTION_PID_QUERY)["rows"][0]["pid"] - assert pid1 != pid2 + + res1 = query_pid_txid() + res2 = query_pid_txid() + + assert res0["pid"] == res1["pid"], "connection should be reused" + assert res0["pid"] == res2["pid"], "connection should be reused" + assert res1["txid"] != res2["txid"], "txid should be different" @pytest.mark.timeout(60) From 20881ef65e6b89bb5b1761c5b27da7f63ac78277 Mon Sep 17 00:00:00 2001 From: Folke Behrens Date: Wed, 23 Jul 2025 20:21:36 +0200 Subject: [PATCH 02/23] otel: Use blocking reqwest in dedicated thread (#12699) ## Problem OTel 0.28+ by default uses blocking operations in a dedicated thread and doesn't start a tokio runtime. Reqwest as currently configured wants to spawn tokio tasks. ## Summary of changes Use blocking reqwest. This PR just mitigates the current issue. --- Cargo.toml | 2 +- libs/tracing-utils/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 00efe79554..d8efabdadc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -145,7 +145,7 @@ num-traits = "0.2.19" once_cell = "1.13" opentelemetry = "0.30" opentelemetry_sdk = "0.30" -opentelemetry-otlp = { version = "0.30", default-features = false, features = ["http-proto", "trace", "http", "reqwest-client"] } +opentelemetry-otlp = { version = "0.30", default-features = false, features = ["http-proto", "trace", "http", "reqwest-blocking-client"] } opentelemetry-semantic-conventions = "0.30" parking_lot = "0.12" parquet = { version = "53", default-features = false, features = ["zstd"] } diff --git a/libs/tracing-utils/Cargo.toml b/libs/tracing-utils/Cargo.toml index 49a6055b1e..1f8d05ae80 100644 --- a/libs/tracing-utils/Cargo.toml +++ b/libs/tracing-utils/Cargo.toml @@ -8,7 +8,7 @@ license.workspace = true hyper0.workspace = true opentelemetry = { workspace = true, features = ["trace"] } opentelemetry_sdk = { workspace = true, features = ["rt-tokio"] } -opentelemetry-otlp = { workspace = true, default-features = false, features = ["http-proto", "trace", "http", "reqwest-client"] } +opentelemetry-otlp = { workspace = true, default-features = false, features = ["http-proto", "trace", "http", "reqwest-blocking-client"] } opentelemetry-semantic-conventions.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } tracing.workspace = true From 63ea4b0579f17d811b5adc83d26f37b9619146c7 Mon Sep 17 00:00:00 2001 From: HaoyuHuang Date: Wed, 23 Jul 2025 11:30:33 -0700 Subject: [PATCH 03/23] A few more compute_tool changes (#12687) ## Summary of changes All changes are no-op except that the tracing-appender lib is upgraded from 0.2.2 to 0.2.3 --- Cargo.lock | 6 +- Cargo.toml | 1 + compute_tools/Cargo.toml | 1 + compute_tools/src/bin/compute_ctl.rs | 22 +- compute_tools/src/compute.rs | 35 +++- .../src/http/middleware/authorize.rs | 29 ++- .../src/http/routes/hadron_liveness_probe.rs | 34 ++++ compute_tools/src/http/routes/mod.rs | 2 + .../src/http/routes/refresh_configuration.rs | 34 ++++ compute_tools/src/http/server.rs | 19 +- compute_tools/src/installed_extensions.rs | 5 + compute_tools/src/lib.rs | 1 + compute_tools/src/logger.rs | 192 +++++++++++++++++- compute_tools/src/pg_isready.rs | 30 +++ 14 files changed, 397 insertions(+), 14 deletions(-) create mode 100644 compute_tools/src/http/routes/hadron_liveness_probe.rs create mode 100644 compute_tools/src/http/routes/refresh_configuration.rs create mode 100644 compute_tools/src/pg_isready.rs diff --git a/Cargo.lock b/Cargo.lock index f503b45577..133ca5def9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1388,6 +1388,7 @@ dependencies = [ "tower-http", "tower-otel", "tracing", + "tracing-appender", "tracing-opentelemetry", "tracing-subscriber", "tracing-utils", @@ -7934,11 +7935,12 @@ dependencies = [ [[package]] name = "tracing-appender" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", + "thiserror 1.0.69", "time", "tracing-subscriber", ] diff --git a/Cargo.toml b/Cargo.toml index d8efabdadc..18236a81f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -222,6 +222,7 @@ tracing-log = "0.2" tracing-opentelemetry = "0.31" tracing-serde = "0.2.0" tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json"] } +tracing-appender = "0.2.3" try-lock = "0.2.5" test-log = { version = "0.2.17", default-features = false, features = ["log"] } twox-hash = { version = "1.6.3", default-features = false } diff --git a/compute_tools/Cargo.toml b/compute_tools/Cargo.toml index 496471acc7..558760b0ad 100644 --- a/compute_tools/Cargo.toml +++ b/compute_tools/Cargo.toml @@ -62,6 +62,7 @@ tokio-stream.workspace = true tonic.workspace = true tower-otel.workspace = true tracing.workspace = true +tracing-appender.workspace = true tracing-opentelemetry.workspace = true tracing-subscriber.workspace = true tracing-utils.workspace = true diff --git a/compute_tools/src/bin/compute_ctl.rs b/compute_tools/src/bin/compute_ctl.rs index 04723d6f3d..ee8a504429 100644 --- a/compute_tools/src/bin/compute_ctl.rs +++ b/compute_tools/src/bin/compute_ctl.rs @@ -51,6 +51,7 @@ use compute_tools::compute::{ use compute_tools::extension_server::get_pg_version_string; use compute_tools::logger::*; use compute_tools::params::*; +use compute_tools::pg_isready::get_pg_isready_bin; use compute_tools::spec::*; use rlimit::{Resource, setrlimit}; use signal_hook::consts::{SIGINT, SIGQUIT, SIGTERM}; @@ -194,7 +195,12 @@ fn main() -> Result<()> { .build()?; let _rt_guard = runtime.enter(); - let tracing_provider = init(cli.dev)?; + let mut log_dir = None; + if cli.lakebase_mode { + log_dir = std::env::var("COMPUTE_CTL_LOG_DIRECTORY").ok(); + } + + let (tracing_provider, _file_logs_guard) = init(cli.dev, log_dir)?; // enable core dumping for all child processes setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?; @@ -226,6 +232,8 @@ fn main() -> Result<()> { cli.installed_extensions_collection_interval, )), pg_init_timeout: cli.pg_init_timeout.map(Duration::from_secs), + pg_isready_bin: get_pg_isready_bin(&cli.pgbin), + instance_id: std::env::var("INSTANCE_ID").ok(), lakebase_mode: cli.lakebase_mode, }, config, @@ -238,8 +246,14 @@ fn main() -> Result<()> { deinit_and_exit(tracing_provider, exit_code); } -fn init(dev_mode: bool) -> Result> { - let provider = init_tracing_and_logging(DEFAULT_LOG_LEVEL)?; +fn init( + dev_mode: bool, + log_dir: Option, +) -> Result<( + Option, + Option, +)> { + let (provider, file_logs_guard) = init_tracing_and_logging(DEFAULT_LOG_LEVEL, &log_dir)?; let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?; thread::spawn(move || { @@ -250,7 +264,7 @@ fn init(dev_mode: bool) -> Result> { info!("compute build_tag: {}", &BUILD_TAG.to_string()); - Ok(provider) + Ok((provider, file_logs_guard)) } fn get_config(cli: &Cli) -> Result { diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index b4d7a6fca9..56bf7b8632 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -113,10 +113,12 @@ pub struct ComputeNodeParams { /// Interval for installed extensions collection pub installed_extensions_collection_interval: Arc, - + /// Hadron instance ID of the compute node. + pub instance_id: Option, /// Timeout of PG compute startup in the Init state. pub pg_init_timeout: Option, - + // Path to the `pg_isready` binary. + pub pg_isready_bin: String, pub lakebase_mode: bool, } @@ -486,6 +488,7 @@ impl ComputeNode { port: this.params.external_http_port, config: this.compute_ctl_config.clone(), compute_id: this.params.compute_id.clone(), + instance_id: this.params.instance_id.clone(), } .launch(&this); @@ -1785,6 +1788,34 @@ impl ComputeNode { Ok::<(), anyhow::Error>(()) } + // Signal to the configurator to refresh the configuration by pulling a new spec from the HCC. + // Note that this merely triggers a notification on a condition variable the configurator thread + // waits on. The configurator thread (in configurator.rs) pulls the new spec from the HCC and + // applies it. + pub async fn signal_refresh_configuration(&self) -> Result<()> { + let states_allowing_configuration_refresh = [ + ComputeStatus::Running, + ComputeStatus::Failed, + // ComputeStatus::RefreshConfigurationPending, + ]; + + let state = self.state.lock().expect("state lock poisoned"); + if states_allowing_configuration_refresh.contains(&state.status) { + // state.status = ComputeStatus::RefreshConfigurationPending; + self.state_changed.notify_all(); + Ok(()) + } else if state.status == ComputeStatus::Init { + // If the compute is in Init state, we can't refresh the configuration immediately, + // but we should be able to do that soon. + Ok(()) + } else { + Err(anyhow::anyhow!( + "Cannot refresh compute configuration in state {:?}", + state.status + )) + } + } + // Wrapped this around `pg_ctl reload`, but right now we don't use // `pg_ctl` for start / stop. #[instrument(skip_all)] diff --git a/compute_tools/src/http/middleware/authorize.rs b/compute_tools/src/http/middleware/authorize.rs index a82f46e062..407833bb0e 100644 --- a/compute_tools/src/http/middleware/authorize.rs +++ b/compute_tools/src/http/middleware/authorize.rs @@ -16,13 +16,29 @@ use crate::http::JsonResponse; #[derive(Clone, Debug)] pub(in crate::http) struct Authorize { compute_id: String, + // BEGIN HADRON + // Hadron instance ID. Only set if it's a Lakebase V1 a.k.a. Hadron instance. + instance_id: Option, + // END HADRON jwks: JwkSet, validation: Validation, } impl Authorize { - pub fn new(compute_id: String, jwks: JwkSet) -> Self { + pub fn new(compute_id: String, instance_id: Option, jwks: JwkSet) -> Self { let mut validation = Validation::new(Algorithm::EdDSA); + + // BEGIN HADRON + let use_rsa = jwks.keys.iter().any(|jwk| { + jwk.common + .key_algorithm + .is_some_and(|alg| alg == jsonwebtoken::jwk::KeyAlgorithm::RS256) + }); + if use_rsa { + validation = Validation::new(Algorithm::RS256); + } + // END HADRON + validation.validate_exp = true; // Unused by the control plane validation.validate_nbf = false; @@ -34,6 +50,7 @@ impl Authorize { Self { compute_id, + instance_id, jwks, validation, } @@ -47,10 +64,20 @@ impl AsyncAuthorizeRequest for Authorize { fn authorize(&mut self, mut request: Request) -> Self::Future { let compute_id = self.compute_id.clone(); + let is_hadron_instance = self.instance_id.is_some(); let jwks = self.jwks.clone(); let validation = self.validation.clone(); Box::pin(async move { + // BEGIN HADRON + // In Hadron deployments the "external" HTTP endpoint on compute_ctl can only be + // accessed by trusted components (enforced by dblet network policy), so we can bypass + // all auth here. + if is_hadron_instance { + return Ok(request); + } + // END HADRON + let TypedHeader(Authorization(bearer)) = request .extract_parts::>>() .await diff --git a/compute_tools/src/http/routes/hadron_liveness_probe.rs b/compute_tools/src/http/routes/hadron_liveness_probe.rs new file mode 100644 index 0000000000..4f66b6b139 --- /dev/null +++ b/compute_tools/src/http/routes/hadron_liveness_probe.rs @@ -0,0 +1,34 @@ +use crate::pg_isready::pg_isready; +use crate::{compute::ComputeNode, http::JsonResponse}; +use axum::{extract::State, http::StatusCode, response::Response}; +use std::sync::Arc; + +/// NOTE: NOT ENABLED YET +/// Detect if the compute is alive. +/// Called by the liveness probe of the compute container. +pub(in crate::http) async fn hadron_liveness_probe( + State(compute): State>, +) -> Response { + let port = match compute.params.connstr.port() { + Some(port) => port, + None => { + return JsonResponse::error( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed to get the port from the connection string", + ); + } + }; + match pg_isready(&compute.params.pg_isready_bin, port) { + Ok(_) => { + // The connection is successful, so the compute is alive. + // Return a 200 OK response. + JsonResponse::success(StatusCode::OK, "ok") + } + Err(e) => { + tracing::error!("Hadron liveness probe failed: {}", e); + // The connection failed, so the compute is not alive. + // Return a 500 Internal Server Error response. + JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e) + } + } +} diff --git a/compute_tools/src/http/routes/mod.rs b/compute_tools/src/http/routes/mod.rs index dd71f663eb..c0f68701c6 100644 --- a/compute_tools/src/http/routes/mod.rs +++ b/compute_tools/src/http/routes/mod.rs @@ -10,11 +10,13 @@ pub(in crate::http) mod extension_server; pub(in crate::http) mod extensions; pub(in crate::http) mod failpoints; pub(in crate::http) mod grants; +pub(in crate::http) mod hadron_liveness_probe; pub(in crate::http) mod insights; pub(in crate::http) mod lfc; pub(in crate::http) mod metrics; pub(in crate::http) mod metrics_json; pub(in crate::http) mod promote; +pub(in crate::http) mod refresh_configuration; pub(in crate::http) mod status; pub(in crate::http) mod terminate; diff --git a/compute_tools/src/http/routes/refresh_configuration.rs b/compute_tools/src/http/routes/refresh_configuration.rs new file mode 100644 index 0000000000..d00f5a285a --- /dev/null +++ b/compute_tools/src/http/routes/refresh_configuration.rs @@ -0,0 +1,34 @@ +// This file is added by Hadron + +use std::sync::Arc; + +use axum::{ + extract::State, + response::{IntoResponse, Response}, +}; +use http::StatusCode; +use tracing::debug; + +use crate::compute::ComputeNode; +// use crate::hadron_metrics::POSTGRES_PAGESTREAM_REQUEST_ERRORS; +use crate::http::JsonResponse; + +// The /refresh_configuration POST method is used to nudge compute_ctl to pull a new spec +// from the HCC and attempt to reconfigure Postgres with the new spec. The method does not wait +// for the reconfiguration to complete. Rather, it simply delivers a signal that will cause +// configuration to be reloaded in a best effort manner. Invocation of this method does not +// guarantee that a reconfiguration will occur. The caller should consider keep sending this +// request while it believes that the compute configuration is out of date. +pub(in crate::http) async fn refresh_configuration( + State(compute): State>, +) -> Response { + debug!("serving /refresh_configuration POST request"); + // POSTGRES_PAGESTREAM_REQUEST_ERRORS.inc(); + match compute.signal_refresh_configuration().await { + Ok(_) => StatusCode::OK.into_response(), + Err(e) => { + tracing::error!("error handling /refresh_configuration request: {}", e); + JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e) + } + } +} diff --git a/compute_tools/src/http/server.rs b/compute_tools/src/http/server.rs index f0fbca8263..9901a6de07 100644 --- a/compute_tools/src/http/server.rs +++ b/compute_tools/src/http/server.rs @@ -27,6 +27,7 @@ use super::{ }, }; use crate::compute::ComputeNode; +use crate::http::routes::{hadron_liveness_probe, refresh_configuration}; /// `compute_ctl` has two servers: internal and external. The internal server /// binds to the loopback interface and handles communication from clients on @@ -43,6 +44,7 @@ pub enum Server { port: u16, config: ComputeCtlConfig, compute_id: String, + instance_id: Option, }, } @@ -67,7 +69,12 @@ impl From<&Server> for Router> { post(extension_server::download_extension), ) .route("/extensions", post(extensions::install_extension)) - .route("/grants", post(grants::add_grant)); + .route("/grants", post(grants::add_grant)) + // Hadron: Compute-initiated configuration refresh + .route( + "/refresh_configuration", + post(refresh_configuration::refresh_configuration), + ); // Add in any testing support if cfg!(feature = "testing") { @@ -79,7 +86,10 @@ impl From<&Server> for Router> { router } Server::External { - config, compute_id, .. + config, + compute_id, + instance_id, + .. } => { let unauthenticated_router = Router::>::new() .route("/metrics", get(metrics::get_metrics)) @@ -100,8 +110,13 @@ impl From<&Server> for Router> { .route("/metrics.json", get(metrics_json::get_metrics)) .route("/status", get(status::get_status)) .route("/terminate", post(terminate::terminate)) + .route( + "/hadron_liveness_probe", + get(hadron_liveness_probe::hadron_liveness_probe), + ) .layer(AsyncRequireAuthorizationLayer::new(Authorize::new( compute_id.clone(), + instance_id.clone(), config.jwks.clone(), ))); diff --git a/compute_tools/src/installed_extensions.rs b/compute_tools/src/installed_extensions.rs index 90e1a17be4..5f60b711c8 100644 --- a/compute_tools/src/installed_extensions.rs +++ b/compute_tools/src/installed_extensions.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use anyhow::Result; use compute_api::responses::{InstalledExtension, InstalledExtensions}; +use once_cell::sync::Lazy; use tokio_postgres::error::Error as PostgresError; use tokio_postgres::{Client, Config, NoTls}; @@ -119,3 +120,7 @@ pub async fn get_installed_extensions( extensions: extensions_map.into_values().collect(), }) } + +pub fn initialize_metrics() { + Lazy::force(&INSTALLED_EXTENSIONS); +} diff --git a/compute_tools/src/lib.rs b/compute_tools/src/lib.rs index 5ffa2f004a..85a6f955d9 100644 --- a/compute_tools/src/lib.rs +++ b/compute_tools/src/lib.rs @@ -25,6 +25,7 @@ mod migration; pub mod monitor; pub mod params; pub mod pg_helpers; +pub mod pg_isready; pub mod pgbouncer; pub mod rsyslog; pub mod spec; diff --git a/compute_tools/src/logger.rs b/compute_tools/src/logger.rs index cd076472a6..83e666223c 100644 --- a/compute_tools/src/logger.rs +++ b/compute_tools/src/logger.rs @@ -1,7 +1,10 @@ use std::collections::HashMap; +use std::sync::{LazyLock, RwLock}; +use tracing::Subscriber; use tracing::info; -use tracing_subscriber::layer::SubscriberExt; +use tracing_appender; use tracing_subscriber::prelude::*; +use tracing_subscriber::{fmt, layer::SubscriberExt, registry::LookupSpan}; /// Initialize logging to stderr, and OpenTelemetry tracing and exporter. /// @@ -15,16 +18,44 @@ use tracing_subscriber::prelude::*; /// pub fn init_tracing_and_logging( default_log_level: &str, -) -> anyhow::Result> { + log_dir_opt: &Option, +) -> anyhow::Result<( + Option, + Option, +)> { // Initialize Logging let env_filter = tracing_subscriber::EnvFilter::try_from_default_env() .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level)); + // Standard output streams let fmt_layer = tracing_subscriber::fmt::layer() .with_ansi(false) .with_target(false) .with_writer(std::io::stderr); + // Logs with file rotation. Files in `$log_dir/pgcctl.yyyy-MM-dd` + let (json_to_file_layer, _file_logs_guard) = if let Some(log_dir) = log_dir_opt { + std::fs::create_dir_all(log_dir)?; + let file_logs_appender = tracing_appender::rolling::RollingFileAppender::builder() + .rotation(tracing_appender::rolling::Rotation::DAILY) + .filename_prefix("pgcctl") + // Lib appends to existing files, so we will keep files for up to 2 days even on restart loops. + // At minimum, log-daemon will have 1 day to detect and upload a file (if created right before midnight). + .max_log_files(2) + .build(log_dir) + .expect("Initializing rolling file appender should succeed"); + let (file_logs_writer, _file_logs_guard) = + tracing_appender::non_blocking(file_logs_appender); + let json_to_file_layer = tracing_subscriber::fmt::layer() + .with_ansi(false) + .with_target(false) + .event_format(PgJsonLogShapeFormatter) + .with_writer(file_logs_writer); + (Some(json_to_file_layer), Some(_file_logs_guard)) + } else { + (None, None) + }; + // Initialize OpenTelemetry let provider = tracing_utils::init_tracing("compute_ctl", tracing_utils::ExportConfig::default()); @@ -35,12 +66,13 @@ pub fn init_tracing_and_logging( .with(env_filter) .with(otlp_layer) .with(fmt_layer) + .with(json_to_file_layer) .init(); tracing::info!("logging and tracing started"); utils::logging::replace_panic_hook_with_tracing_panic_hook().forget(); - Ok(provider) + Ok((provider, _file_logs_guard)) } /// Replace all newline characters with a special character to make it @@ -95,3 +127,157 @@ pub fn startup_context_from_env() -> Option { None } } + +/// Track relevant id's +const UNKNOWN_IDS: &str = r#""pg_instance_id": "", "pg_compute_id": """#; +static IDS: LazyLock> = LazyLock::new(|| RwLock::new(UNKNOWN_IDS.to_string())); + +pub fn update_ids(instance_id: &Option, compute_id: &Option) -> anyhow::Result<()> { + let ids = format!( + r#""pg_instance_id": "{}", "pg_compute_id": "{}""#, + instance_id.as_ref().map(|s| s.as_str()).unwrap_or_default(), + compute_id.as_ref().map(|s| s.as_str()).unwrap_or_default() + ); + let mut guard = IDS + .write() + .map_err(|e| anyhow::anyhow!("Log set id's rwlock poisoned: {}", e))?; + *guard = ids; + Ok(()) +} + +/// Massage compute_ctl logs into PG json log shape so we can use the same Lumberjack setup. +struct PgJsonLogShapeFormatter; +impl fmt::format::FormatEvent for PgJsonLogShapeFormatter +where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> fmt::format::FormatFields<'a> + 'static, +{ + fn format_event( + &self, + ctx: &fmt::FmtContext<'_, S, N>, + mut writer: fmt::format::Writer<'_>, + event: &tracing::Event<'_>, + ) -> std::fmt::Result { + // Format values from the event's metadata, and open message string + let metadata = event.metadata(); + { + let ids_guard = IDS.read(); + let ids = ids_guard + .as_ref() + .map(|guard| guard.as_str()) + // Surpress so that we don't lose all uploaded/ file logs if something goes super wrong. We would notice the missing id's. + .unwrap_or(UNKNOWN_IDS); + write!( + &mut writer, + r#"{{"timestamp": "{}", "error_severity": "{}", "file_name": "{}", "backend_type": "compute_ctl_self", {}, "message": "#, + chrono::Utc::now().format("%Y-%m-%d %H:%M:%S%.3f GMT"), + metadata.level(), + metadata.target(), + ids + )?; + } + + let mut message = String::new(); + let message_writer = fmt::format::Writer::new(&mut message); + + // Gather the message + ctx.field_format().format_fields(message_writer, event)?; + + // TODO: any better options than to copy-paste this OSS span formatter? + // impl FormatEvent for Format + // https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/trait.FormatEvent.html#impl-FormatEvent%3CS,+N%3E-for-Format%3CFull,+T%3E + + // write message, close bracket, and new line + writeln!(writer, "{}}}", serde_json::to_string(&message).unwrap()) + } +} + +#[cfg(feature = "testing")] +#[cfg(test)] +mod test { + use super::*; + use std::{cell::RefCell, io}; + + // Use thread_local! instead of Mutex for test isolation + thread_local! { + static WRITER_OUTPUT: RefCell = const { RefCell::new(String::new()) }; + } + + #[derive(Clone, Default)] + struct StaticStringWriter; + + impl io::Write for StaticStringWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + let output = String::from_utf8(buf.to_vec()).expect("Invalid UTF-8 in test output"); + WRITER_OUTPUT.with(|s| s.borrow_mut().push_str(&output)); + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + } + + impl fmt::MakeWriter<'_> for StaticStringWriter { + type Writer = Self; + + fn make_writer(&self) -> Self::Writer { + Self + } + } + + #[test] + fn test_log_pg_json_shape_formatter() { + // Use a scoped subscriber to prevent global state pollution + let subscriber = tracing_subscriber::registry().with( + tracing_subscriber::fmt::layer() + .with_ansi(false) + .with_target(false) + .event_format(PgJsonLogShapeFormatter) + .with_writer(StaticStringWriter), + ); + + let _ = update_ids(&Some("000".to_string()), &Some("111".to_string())); + + // Clear any previous test state + WRITER_OUTPUT.with(|s| s.borrow_mut().clear()); + + let messages = [ + "test message", + r#"json escape check: name="BatchSpanProcessor.Flush.ExportError" reason="Other(reqwest::Error { kind: Request, url: \"http://localhost:4318/v1/traces\", source: hyper_ + util::client::legacy::Error(Connect, ConnectError(\"tcp connect error\", Os { code: 111, kind: ConnectionRefused, message: \"Connection refused\" })) })" Failed during the export process"#, + ]; + + tracing::subscriber::with_default(subscriber, || { + for message in messages { + tracing::info!(message); + } + }); + tracing::info!("not test message"); + + // Get captured output + let output = WRITER_OUTPUT.with(|s| s.borrow().clone()); + + let json_strings: Vec<&str> = output.lines().collect(); + assert_eq!( + json_strings.len(), + messages.len(), + "Log didn't have the expected number of json strings." + ); + + let json_string_shape_regex = regex::Regex::new( + r#"\{"timestamp": "\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} GMT", "error_severity": "INFO", "file_name": ".+", "backend_type": "compute_ctl_self", "pg_instance_id": "000", "pg_compute_id": "111", "message": ".+"\}"# + ).unwrap(); + + for (i, expected_message) in messages.iter().enumerate() { + let json_string = json_strings[i]; + assert!( + json_string_shape_regex.is_match(json_string), + "Json log didn't match expected pattern:\n{json_string}", + ); + let parsed_json: serde_json::Value = serde_json::from_str(json_string).unwrap(); + let actual_message = parsed_json["message"].as_str().unwrap(); + assert_eq!(*expected_message, actual_message); + } + } +} diff --git a/compute_tools/src/pg_isready.rs b/compute_tools/src/pg_isready.rs new file mode 100644 index 0000000000..76c45d6b0a --- /dev/null +++ b/compute_tools/src/pg_isready.rs @@ -0,0 +1,30 @@ +use anyhow::{Context, anyhow}; + +// Run `/usr/local/bin/pg_isready -p {port}` +// Check the connectivity of PG +// Success means PG is listening on the port and accepting connections +// Note that PG does not need to authenticate the connection, nor reserve a connection quota for it. +// See https://www.postgresql.org/docs/current/app-pg-isready.html +pub fn pg_isready(bin: &str, port: u16) -> anyhow::Result<()> { + let child_result = std::process::Command::new(bin) + .arg("-p") + .arg(port.to_string()) + .spawn(); + + child_result + .context("spawn() failed") + .and_then(|mut child| child.wait().context("wait() failed")) + .and_then(|status| match status.success() { + true => Ok(()), + false => Err(anyhow!("process exited with {status}")), + }) + // wrap any prior error with the overall context that we couldn't run the command + .with_context(|| format!("could not run `{bin} --port {port}`")) +} + +// It's safe to assume pg_isready is under the same directory with postgres, +// because it is a PG util bin installed along with postgres +pub fn get_pg_isready_bin(pgbin: &str) -> String { + let split = pgbin.split("/").collect::>(); + split[0..split.len() - 1].join("/") + "/pg_isready" +} From 9e6ca2932fa8193974159aeae739440e6a851a95 Mon Sep 17 00:00:00 2001 From: "Alex Chi Z." <4198311+skyzh@users.noreply.github.com> Date: Wed, 23 Jul 2025 14:56:37 -0400 Subject: [PATCH 04/23] fix(test): convert bool to lowercase when invoking neon-cli (#12688) ## Problem There has been some inconsistencies of providing tenant config via `tenant_create` and via other tenant config APIs due to how the properties are processed: in `tenant_create`, the test framework calls neon-cli and therefore puts those properties in the cmdline. In other cases, it's done via the HTTP API by directly serializing to a JSON. When using the cmdline, the program only accepts serde bool that is true/false. ## Summary of changes Convert Python bool into `true`/`false` when using neon-cli. Signed-off-by: Alex Chi Z --- test_runner/fixtures/neon_cli.py | 12 +++++++----- test_runner/performance/test_perf_many_relations.py | 4 +--- test_runner/regress/test_compaction.py | 4 ++-- test_runner/regress/test_ondemand_slru_download.py | 6 +++--- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/test_runner/fixtures/neon_cli.py b/test_runner/fixtures/neon_cli.py index 5ad00d155e..1931d6aaa5 100644 --- a/test_runner/fixtures/neon_cli.py +++ b/test_runner/fixtures/neon_cli.py @@ -212,11 +212,13 @@ class NeonLocalCli(AbstractNeonCli): pg_version, ] if conf is not None: - args.extend( - chain.from_iterable( - product(["-c"], (f"{key}:{value}" for key, value in conf.items())) - ) - ) + for key, value in conf.items(): + if isinstance(value, bool): + args.extend( + ["-c", f"{key}:{str(value).lower()}"] + ) # only accepts true/false not True/False + else: + args.extend(["-c", f"{key}:{value}"]) if set_default: args.append("--set-default") diff --git a/test_runner/performance/test_perf_many_relations.py b/test_runner/performance/test_perf_many_relations.py index da76c3ec86..9204a6a740 100644 --- a/test_runner/performance/test_perf_many_relations.py +++ b/test_runner/performance/test_perf_many_relations.py @@ -80,9 +80,7 @@ def test_perf_simple_many_relations_reldir( """ Test creating many relations in a single database. """ - env = neon_env_builder.init_start( - initial_tenant_conf={"rel_size_v2_enabled": "true" if reldir != "v1" else "false"} - ) + env = neon_env_builder.init_start(initial_tenant_conf={"rel_size_v2_enabled": reldir != "v1"}) ep = env.endpoints.create_start( "main", config_lines=[ diff --git a/test_runner/regress/test_compaction.py b/test_runner/regress/test_compaction.py index 76485c8321..be82ee806f 100644 --- a/test_runner/regress/test_compaction.py +++ b/test_runner/regress/test_compaction.py @@ -58,7 +58,7 @@ PREEMPT_GC_COMPACTION_TENANT_CONF = { "compaction_upper_limit": 6, "lsn_lease_length": "0s", # Enable gc-compaction - "gc_compaction_enabled": "true", + "gc_compaction_enabled": True, "gc_compaction_initial_threshold_kb": 1024, # At a small threshold "gc_compaction_ratio_percent": 1, # No PiTR interval and small GC horizon @@ -540,7 +540,7 @@ def test_pageserver_gc_compaction_trigger(neon_env_builder: NeonEnvBuilder): "pitr_interval": "0s", "gc_horizon": f"{1024 * 16}", "lsn_lease_length": "0s", - "gc_compaction_enabled": "true", + "gc_compaction_enabled": True, "gc_compaction_initial_threshold_kb": "16", "gc_compaction_ratio_percent": "50", # Do not generate image layers with create_image_layers diff --git a/test_runner/regress/test_ondemand_slru_download.py b/test_runner/regress/test_ondemand_slru_download.py index f0f12290cc..607a2921a9 100644 --- a/test_runner/regress/test_ondemand_slru_download.py +++ b/test_runner/regress/test_ondemand_slru_download.py @@ -16,7 +16,7 @@ def test_ondemand_download_pg_xact(neon_env_builder: NeonEnvBuilder, shard_count neon_env_builder.num_pageservers = shard_count tenant_conf = { - "lazy_slru_download": "true", + "lazy_slru_download": True, # set PITR interval to be small, so we can do GC "pitr_interval": "0 s", } @@ -82,7 +82,7 @@ def test_ondemand_download_replica(neon_env_builder: NeonEnvBuilder, shard_count neon_env_builder.num_pageservers = shard_count tenant_conf = { - "lazy_slru_download": "true", + "lazy_slru_download": True, } env = neon_env_builder.init_start( initial_tenant_conf=tenant_conf, initial_tenant_shard_count=shard_count @@ -141,7 +141,7 @@ def test_ondemand_download_after_wal_switch(neon_env_builder: NeonEnvBuilder): """ tenant_conf = { - "lazy_slru_download": "true", + "lazy_slru_download": True, } env = neon_env_builder.init_start(initial_tenant_conf=tenant_conf) From a56afee2692caeaa7a2d2323cd311a8a14d2a03a Mon Sep 17 00:00:00 2001 From: Mikhail Date: Wed, 23 Jul 2025 21:11:34 +0100 Subject: [PATCH 05/23] Accept primary compute spec in /promote, promotion corner cases testing (#12574) https://github.com/neondatabase/cloud/issues/19011 - Accept `ComputeSpec` in `/promote` instead of just passing safekeepers and LSN. Update API spec - Add corner case tests for promotion when promotion or perwarm fails (using failpoints) - Print root error for prewarm and promotion in status handlers --- compute_tools/src/compute_prewarm.rs | 34 ++++-- compute_tools/src/compute_promote.rs | 84 +++++++++---- compute_tools/src/http/openapi_spec.yaml | 30 ++--- compute_tools/src/http/routes/promote.rs | 10 +- control_plane/src/bin/neon_local.rs | 2 +- libs/compute_api/src/responses.rs | 7 +- test_runner/fixtures/endpoint/http.py | 13 +- test_runner/fixtures/neon_fixtures.py | 11 +- test_runner/regress/test_lfc_prewarm.py | 19 +++ test_runner/regress/test_replica_promotes.py | 118 ++++++++++++++++--- 10 files changed, 242 insertions(+), 86 deletions(-) diff --git a/compute_tools/src/compute_prewarm.rs b/compute_tools/src/compute_prewarm.rs index 07b4a596cc..97e62c1c80 100644 --- a/compute_tools/src/compute_prewarm.rs +++ b/compute_tools/src/compute_prewarm.rs @@ -90,6 +90,7 @@ impl ComputeNode { } /// If there is a prewarm request ongoing, return `false`, `true` otherwise. + /// Has a failpoint "compute-prewarm" pub fn prewarm_lfc(self: &Arc, from_endpoint: Option) -> bool { { let state = &mut self.state.lock().unwrap().lfc_prewarm_state; @@ -112,9 +113,8 @@ impl ComputeNode { Err(err) => { crate::metrics::LFC_PREWARM_ERRORS.inc(); error!(%err, "could not prewarm LFC"); - LfcPrewarmState::Failed { - error: err.to_string(), + error: format!("{err:#}"), } } }; @@ -135,16 +135,20 @@ impl ComputeNode { async fn prewarm_impl(&self, from_endpoint: Option) -> Result { let EndpointStoragePair { url, token } = self.endpoint_storage_pair(from_endpoint)?; + #[cfg(feature = "testing")] + fail::fail_point!("compute-prewarm", |_| { + bail!("prewarm configured to fail because of a failpoint") + }); + info!(%url, "requesting LFC state from endpoint storage"); let request = Client::new().get(&url).bearer_auth(token); let res = request.send().await.context("querying endpoint storage")?; - let status = res.status(); - match status { + match res.status() { StatusCode::OK => (), StatusCode::NOT_FOUND => { return Ok(false); } - _ => bail!("{status} querying endpoint storage"), + status => bail!("{status} querying endpoint storage"), } let mut uncompressed = Vec::new(); @@ -205,7 +209,7 @@ impl ComputeNode { crate::metrics::LFC_OFFLOAD_ERRORS.inc(); error!(%err, "could not offload LFC state to endpoint storage"); self.state.lock().unwrap().lfc_offload_state = LfcOffloadState::Failed { - error: err.to_string(), + error: format!("{err:#}"), }; } @@ -213,16 +217,22 @@ impl ComputeNode { let EndpointStoragePair { url, token } = self.endpoint_storage_pair(None)?; info!(%url, "requesting LFC state from Postgres"); - let mut compressed = Vec::new(); - ComputeNode::get_maintenance_client(&self.tokio_conn_conf) + let row = ComputeNode::get_maintenance_client(&self.tokio_conn_conf) .await .context("connecting to postgres")? .query_one("select neon.get_local_cache_state()", &[]) .await - .context("querying LFC state")? - .try_get::(0) - .context("deserializing LFC state") - .map(ZstdEncoder::new)? + .context("querying LFC state")?; + let state = row + .try_get::>(0) + .context("deserializing LFC state")?; + let Some(state) = state else { + info!(%url, "empty LFC state, not exporting"); + return Ok(()); + }; + + let mut compressed = Vec::new(); + ZstdEncoder::new(state) .read_to_end(&mut compressed) .await .context("compressing LFC state")?; diff --git a/compute_tools/src/compute_promote.rs b/compute_tools/src/compute_promote.rs index 42256faa22..a34368c531 100644 --- a/compute_tools/src/compute_promote.rs +++ b/compute_tools/src/compute_promote.rs @@ -1,11 +1,12 @@ use crate::compute::ComputeNode; use anyhow::{Context, Result, bail}; -use compute_api::{ - responses::{LfcPrewarmState, PromoteState, SafekeepersLsn}, - spec::ComputeMode, -}; +use compute_api::responses::{LfcPrewarmState, PromoteConfig, PromoteState}; +use compute_api::spec::ComputeMode; +use itertools::Itertools; +use std::collections::HashMap; use std::{sync::Arc, time::Duration}; use tokio::time::sleep; +use tracing::info; use utils::lsn::Lsn; impl ComputeNode { @@ -13,21 +14,22 @@ impl ComputeNode { /// and http client disconnects, this does not stop promotion, and subsequent /// calls block until promote finishes. /// Called by control plane on secondary after primary endpoint is terminated - pub async fn promote(self: &Arc, safekeepers_lsn: SafekeepersLsn) -> PromoteState { + /// Has a failpoint "compute-promotion" + pub async fn promote(self: &Arc, cfg: PromoteConfig) -> PromoteState { let cloned = self.clone(); + let promote_fn = async move || { + let Err(err) = cloned.promote_impl(cfg).await else { + return PromoteState::Completed; + }; + tracing::error!(%err, "promoting"); + PromoteState::Failed { + error: format!("{err:#}"), + } + }; + let start_promotion = || { let (tx, rx) = tokio::sync::watch::channel(PromoteState::NotPromoted); - tokio::spawn(async move { - tx.send(match cloned.promote_impl(safekeepers_lsn).await { - Ok(_) => PromoteState::Completed, - Err(err) => { - tracing::error!(%err, "promoting"); - PromoteState::Failed { - error: err.to_string(), - } - } - }) - }); + tokio::spawn(async move { tx.send(promote_fn().await) }); rx }; @@ -47,9 +49,7 @@ impl ComputeNode { task.borrow().clone() } - // Why do we have to supply safekeepers? - // For secondary we use primary_connection_conninfo so safekeepers field is empty - async fn promote_impl(&self, safekeepers_lsn: SafekeepersLsn) -> Result<()> { + async fn promote_impl(&self, mut cfg: PromoteConfig) -> Result<()> { { let state = self.state.lock().unwrap(); let mode = &state.pspec.as_ref().unwrap().spec.mode; @@ -73,7 +73,7 @@ impl ComputeNode { .await .context("connecting to postgres")?; - let primary_lsn = safekeepers_lsn.wal_flush_lsn; + let primary_lsn = cfg.wal_flush_lsn; let mut last_wal_replay_lsn: Lsn = Lsn::INVALID; const RETRIES: i32 = 20; for i in 0..=RETRIES { @@ -86,7 +86,7 @@ impl ComputeNode { if last_wal_replay_lsn >= primary_lsn { break; } - tracing::info!("Try {i}, replica lsn {last_wal_replay_lsn}, primary lsn {primary_lsn}"); + info!("Try {i}, replica lsn {last_wal_replay_lsn}, primary lsn {primary_lsn}"); sleep(Duration::from_secs(1)).await; } if last_wal_replay_lsn < primary_lsn { @@ -96,7 +96,7 @@ impl ComputeNode { // using $1 doesn't work with ALTER SYSTEM SET let safekeepers_sql = format!( "ALTER SYSTEM SET neon.safekeepers='{}'", - safekeepers_lsn.safekeepers + cfg.spec.safekeeper_connstrings.join(",") ); client .query(&safekeepers_sql, &[]) @@ -106,6 +106,12 @@ impl ComputeNode { .query("SELECT pg_reload_conf()", &[]) .await .context("reloading postgres config")?; + + #[cfg(feature = "testing")] + fail::fail_point!("compute-promotion", |_| { + bail!("promotion configured to fail because of a failpoint") + }); + let row = client .query_one("SELECT * FROM pg_promote()", &[]) .await @@ -125,8 +131,36 @@ impl ComputeNode { bail!("replica in read only mode after promotion"); } - let mut state = self.state.lock().unwrap(); - state.pspec.as_mut().unwrap().spec.mode = ComputeMode::Primary; - Ok(()) + { + let mut state = self.state.lock().unwrap(); + let spec = &mut state.pspec.as_mut().unwrap().spec; + spec.mode = ComputeMode::Primary; + let new_conf = cfg.spec.cluster.postgresql_conf.as_mut().unwrap(); + let existing_conf = spec.cluster.postgresql_conf.as_ref().unwrap(); + Self::merge_spec(new_conf, existing_conf); + } + info!("applied new spec, reconfiguring as primary"); + self.reconfigure() + } + + /// Merge old and new Postgres conf specs to apply on secondary. + /// Change new spec's port and safekeepers since they are supplied + /// differenly + fn merge_spec(new_conf: &mut String, existing_conf: &str) { + let mut new_conf_set: HashMap<&str, &str> = new_conf + .split_terminator('\n') + .map(|e| e.split_once("=").expect("invalid item")) + .collect(); + new_conf_set.remove("neon.safekeepers"); + + let existing_conf_set: HashMap<&str, &str> = existing_conf + .split_terminator('\n') + .map(|e| e.split_once("=").expect("invalid item")) + .collect(); + new_conf_set.insert("port", existing_conf_set["port"]); + *new_conf = new_conf_set + .iter() + .map(|(k, v)| format!("{k}={v}")) + .join("\n"); } } diff --git a/compute_tools/src/http/openapi_spec.yaml b/compute_tools/src/http/openapi_spec.yaml index 3cf5ea7c51..ab729d62b5 100644 --- a/compute_tools/src/http/openapi_spec.yaml +++ b/compute_tools/src/http/openapi_spec.yaml @@ -96,7 +96,7 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/SafekeepersLsn" + $ref: "#/components/schemas/ComputeSchemaWithLsn" responses: 200: description: Promote succeeded or wasn't started @@ -297,14 +297,7 @@ paths: content: application/json: schema: - type: object - required: - - spec - properties: - spec: - # XXX: I don't want to explain current spec in the OpenAPI format, - # as it could be changed really soon. Consider doing it later. - type: object + $ref: "#/components/schemas/ComputeSchema" responses: 200: description: Compute configuration finished. @@ -591,18 +584,25 @@ components: type: string example: "1.0.0" - SafekeepersLsn: + ComputeSchema: type: object required: - - safekeepers + - spec + properties: + spec: + type: object + ComputeSchemaWithLsn: + type: object + required: + - spec - wal_flush_lsn properties: - safekeepers: - description: Primary replica safekeepers - type: string + spec: + $ref: "#/components/schemas/ComputeState" wal_flush_lsn: - description: Primary last WAL flush LSN type: string + description: "last WAL flush LSN" + example: "0/028F10D8" LfcPrewarmState: type: object diff --git a/compute_tools/src/http/routes/promote.rs b/compute_tools/src/http/routes/promote.rs index bc5f93b4da..7ca3464b63 100644 --- a/compute_tools/src/http/routes/promote.rs +++ b/compute_tools/src/http/routes/promote.rs @@ -1,14 +1,14 @@ use crate::http::JsonResponse; -use axum::Form; +use axum::extract::Json; use http::StatusCode; pub(in crate::http) async fn promote( compute: axum::extract::State>, - Form(safekeepers_lsn): Form, + Json(cfg): Json, ) -> axum::response::Response { - let state = compute.promote(safekeepers_lsn).await; - if let compute_api::responses::PromoteState::Failed { error } = state { - return JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, error); + let state = compute.promote(cfg).await; + if let compute_api::responses::PromoteState::Failed { error: _ } = state { + return JsonResponse::create_response(StatusCode::INTERNAL_SERVER_ERROR, state); } JsonResponse::success(StatusCode::OK, state) } diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index e036e9d44b..f68bc1ed48 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -1517,7 +1517,7 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res let endpoint = cplane .endpoints .get(endpoint_id.as_str()) - .ok_or_else(|| anyhow::anyhow!("endpoint {endpoint_id} not found"))?; + .ok_or_else(|| anyhow!("endpoint {endpoint_id} not found"))?; if !args.allow_multiple { cplane.check_conflicting_endpoints( diff --git a/libs/compute_api/src/responses.rs b/libs/compute_api/src/responses.rs index 5b8fc49750..2ef1e6aab8 100644 --- a/libs/compute_api/src/responses.rs +++ b/libs/compute_api/src/responses.rs @@ -108,11 +108,10 @@ pub enum PromoteState { Failed { error: String }, } -#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[derive(Deserialize, Default, Debug)] #[serde(rename_all = "snake_case")] -/// Result of /safekeepers_lsn -pub struct SafekeepersLsn { - pub safekeepers: String, +pub struct PromoteConfig { + pub spec: ComputeSpec, pub wal_flush_lsn: utils::lsn::Lsn, } diff --git a/test_runner/fixtures/endpoint/http.py b/test_runner/fixtures/endpoint/http.py index c43445e89d..64db2b1f17 100644 --- a/test_runner/fixtures/endpoint/http.py +++ b/test_runner/fixtures/endpoint/http.py @@ -87,9 +87,10 @@ class EndpointHttpClient(requests.Session): def prewarmed(): json = self.prewarm_lfc_status() status, err = json["status"], json.get("error") - assert status == "completed", f"{status}, {err=}" + assert status in ["failed", "completed", "skipped"], f"{status}, {err=}" wait_until(prewarmed, timeout=60) + assert self.prewarm_lfc_status()["status"] != "failed" def offload_lfc_status(self) -> dict[str, str]: res = self.get(self.offload_url) @@ -105,19 +106,19 @@ class EndpointHttpClient(requests.Session): def offloaded(): json = self.offload_lfc_status() status, err = json["status"], json.get("error") - assert status == "completed", f"{status}, {err=}" + assert status in ["failed", "completed"], f"{status}, {err=}" wait_until(offloaded) + assert self.offload_lfc_status()["status"] != "failed" - def promote(self, safekeepers_lsn: dict[str, Any], disconnect: bool = False): + def promote(self, promote_spec: dict[str, Any], disconnect: bool = False): url = f"http://localhost:{self.external_port}/promote" if disconnect: try: # send first request to start promote and disconnect - self.post(url, data=safekeepers_lsn, timeout=0.001) + self.post(url, json=promote_spec, timeout=0.001) except ReadTimeout: pass # wait on second request which returns on promotion finish - res = self.post(url, data=safekeepers_lsn) - res.raise_for_status() + res = self.post(url, json=promote_spec) json: dict[str, str] = res.json() return json diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 33a18e4394..e7763de0e7 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -4794,9 +4794,10 @@ class Endpoint(PgProtocol, LogUtils): m = re.search(r"=\s*(\S+)", line) assert m is not None, f"malformed config line {line}" size = m.group(1) - assert size_to_bytes(size) >= size_to_bytes("1MB"), ( - "LFC size cannot be set less than 1MB" - ) + if size_to_bytes(size) > 0: + assert size_to_bytes(size) >= size_to_bytes("1MB"), ( + "LFC size cannot be set less than 1MB" + ) lfc_path_escaped = str(lfc_path).replace("'", "''") config_lines = [ f"neon.file_cache_path = '{lfc_path_escaped}'", @@ -4951,6 +4952,10 @@ class Endpoint(PgProtocol, LogUtils): log.debug(json.dumps(dict(data_dict, **kwargs))) json.dump(dict(data_dict, **kwargs), file, indent=4) + def get_compute_spec(self) -> dict[str, Any]: + out = json.loads((Path(self.endpoint_path()) / "config.json").read_text())["spec"] + return cast("dict[str, Any]", out) + def respec_deep(self, **kwargs: Any) -> None: """ Update the endpoint.json file taking into account nested keys. diff --git a/test_runner/regress/test_lfc_prewarm.py b/test_runner/regress/test_lfc_prewarm.py index 0f0cf4cc6d..2bbe8c3e97 100644 --- a/test_runner/regress/test_lfc_prewarm.py +++ b/test_runner/regress/test_lfc_prewarm.py @@ -164,6 +164,25 @@ def test_lfc_prewarm(neon_simple_env: NeonEnv, method: PrewarmMethod): check_prewarmed(method, client, desired) +@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") +def test_lfc_prewarm_empty(neon_simple_env: NeonEnv): + """ + Test there are no errors when trying to offload or prewarm endpoint without cache using compute_ctl. + Endpoint without cache is simulated by turning off LFC manually, but in cloud/ setup this is + also reproduced on fresh endpoints + """ + env = neon_simple_env + ep = env.endpoints.create_start("main", config_lines=["neon.file_cache_size_limit=0"]) + client = ep.http_client() + conn = ep.connect() + cur = conn.cursor() + cur.execute("create schema neon; create extension neon with schema neon") + method = PrewarmMethod.COMPUTE_CTL + offload_lfc(method, client, cur) + prewarm_endpoint(method, client, cur, None) + assert client.prewarm_lfc_status()["status"] == "skipped" + + # autoprewarm isn't needed as we prewarm manually WORKLOAD_VALUES = METHOD_VALUES[:-1] WORKLOAD_IDS = METHOD_IDS[:-1] diff --git a/test_runner/regress/test_replica_promotes.py b/test_runner/regress/test_replica_promotes.py index 8d39ac123a..9415d6886c 100644 --- a/test_runner/regress/test_replica_promotes.py +++ b/test_runner/regress/test_replica_promotes.py @@ -90,6 +90,7 @@ def test_replica_promote(neon_simple_env: NeonEnv, method: PromoteMethod): secondary_cur.execute("select count(*) from t") assert secondary_cur.fetchone() == (100,) + primary_spec = primary.get_compute_spec() primary_endpoint_id = primary.endpoint_id stop_and_check_lsn(primary, expected_primary_lsn) @@ -99,10 +100,9 @@ def test_replica_promote(neon_simple_env: NeonEnv, method: PromoteMethod): if method == PromoteMethod.COMPUTE_CTL: client = secondary.http_client() client.prewarm_lfc(primary_endpoint_id) - # control plane knows safekeepers, simulate it by querying primary assert (lsn := primary.terminate_flush_lsn) - safekeepers_lsn = {"safekeepers": safekeepers, "wal_flush_lsn": lsn} - assert client.promote(safekeepers_lsn)["status"] == "completed" + promote_spec = {"spec": primary_spec, "wal_flush_lsn": str(lsn)} + assert client.promote(promote_spec)["status"] == "completed" else: promo_cur.execute(f"alter system set neon.safekeepers='{safekeepers}'") promo_cur.execute("select pg_reload_conf()") @@ -131,21 +131,35 @@ def test_replica_promote(neon_simple_env: NeonEnv, method: PromoteMethod): lsn_triple = get_lsn_triple(new_primary_cur) log.info(f"Secondary: LSN after workload is {lsn_triple}") - expected_promoted_lsn = Lsn(lsn_triple[2]) + expected_lsn = Lsn(lsn_triple[2]) with secondary.connect() as conn, conn.cursor() as new_primary_cur: new_primary_cur.execute("select payload from t") assert new_primary_cur.fetchall() == [(it,) for it in range(1, 201)] if method == PromoteMethod.COMPUTE_CTL: - # compute_ctl's /promote switches replica type to Primary so it syncs - # safekeepers on finish - stop_and_check_lsn(secondary, expected_promoted_lsn) + # compute_ctl's /promote switches replica type to Primary so it syncs safekeepers on finish + stop_and_check_lsn(secondary, expected_lsn) else: - # on testing postgres, we don't update replica type, secondaries don't - # sync so lsn should be None + # on testing postgres, we don't update replica type, secondaries don't sync so lsn should be None stop_and_check_lsn(secondary, None) + if method == PromoteMethod.COMPUTE_CTL: + secondary.stop() + # In production, compute ultimately receives new compute spec from cplane. + secondary.respec(mode="Primary") + secondary.start() + + with secondary.connect() as conn, conn.cursor() as new_primary_cur: + new_primary_cur.execute( + "INSERT INTO t (payload) SELECT generate_series(101, 200) RETURNING payload" + ) + assert new_primary_cur.fetchall() == [(it,) for it in range(101, 201)] + lsn_triple = get_lsn_triple(new_primary_cur) + log.info(f"Secondary: LSN after restart and workload is {lsn_triple}") + expected_lsn = Lsn(lsn_triple[2]) + stop_and_check_lsn(secondary, expected_lsn) + primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary2") with primary.connect() as new_primary, new_primary.cursor() as new_primary_cur: @@ -154,10 +168,11 @@ def test_replica_promote(neon_simple_env: NeonEnv, method: PromoteMethod): log.info(f"New primary: Boot LSN is {lsn_triple}") new_primary_cur.execute("select count(*) from t") - assert new_primary_cur.fetchone() == (200,) + compute_ctl_count = 100 * (method == PromoteMethod.COMPUTE_CTL) + assert new_primary_cur.fetchone() == (200 + compute_ctl_count,) new_primary_cur.execute("INSERT INTO t (payload) SELECT generate_series(201, 300)") new_primary_cur.execute("select count(*) from t") - assert new_primary_cur.fetchone() == (300,) + assert new_primary_cur.fetchone() == (300 + compute_ctl_count,) stop_and_check_lsn(primary, expected_primary_lsn) @@ -175,18 +190,91 @@ def test_replica_promote_handler_disconnects(neon_simple_env: NeonEnv): cur.execute("create schema neon;create extension neon with schema neon") cur.execute("create table t(pk bigint GENERATED ALWAYS AS IDENTITY, payload integer)") cur.execute("INSERT INTO t(payload) SELECT generate_series(1, 100)") - cur.execute("show neon.safekeepers") - safekeepers = cur.fetchall()[0][0] primary.http_client().offload_lfc() + primary_spec = primary.get_compute_spec() primary_endpoint_id = primary.endpoint_id primary.stop(mode="immediate-terminate") assert (lsn := primary.terminate_flush_lsn) client = secondary.http_client() client.prewarm_lfc(primary_endpoint_id) - safekeepers_lsn = {"safekeepers": safekeepers, "wal_flush_lsn": lsn} - assert client.promote(safekeepers_lsn, disconnect=True)["status"] == "completed" + promote_spec = {"spec": primary_spec, "wal_flush_lsn": str(lsn)} + assert client.promote(promote_spec, disconnect=True)["status"] == "completed" + + with secondary.connect() as conn, conn.cursor() as cur: + cur.execute("select count(*) from t") + assert cur.fetchone() == (100,) + cur.execute("INSERT INTO t (payload) SELECT generate_series(101, 200) RETURNING payload") + cur.execute("select count(*) from t") + assert cur.fetchone() == (200,) + + +@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") +def test_replica_promote_fails(neon_simple_env: NeonEnv): + """ + Test that if a /promote route fails, we can safely start primary back + """ + env: NeonEnv = neon_simple_env + primary: Endpoint = env.endpoints.create_start(branch_name="main", endpoint_id="primary") + secondary: Endpoint = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary") + secondary.stop() + secondary.start(env={"FAILPOINTS": "compute-promotion=return(0)"}) + + with primary.connect() as conn, conn.cursor() as cur: + cur.execute("create schema neon;create extension neon with schema neon") + cur.execute("create table t(pk bigint GENERATED ALWAYS AS IDENTITY, payload integer)") + cur.execute("INSERT INTO t(payload) SELECT generate_series(1, 100)") + + primary.http_client().offload_lfc() + primary_spec = primary.get_compute_spec() + primary_endpoint_id = primary.endpoint_id + primary.stop(mode="immediate-terminate") + assert (lsn := primary.terminate_flush_lsn) + + client = secondary.http_client() + client.prewarm_lfc(primary_endpoint_id) + promote_spec = {"spec": primary_spec, "wal_flush_lsn": str(lsn)} + assert client.promote(promote_spec)["status"] == "failed" + secondary.stop() + + primary.start() + with primary.connect() as conn, conn.cursor() as cur: + cur.execute("select count(*) from t") + assert cur.fetchone() == (100,) + cur.execute("INSERT INTO t (payload) SELECT generate_series(101, 200) RETURNING payload") + cur.execute("select count(*) from t") + assert cur.fetchone() == (200,) + + +@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") +def test_replica_promote_prewarm_fails(neon_simple_env: NeonEnv): + """ + Test that if /lfc/prewarm route fails, we are able to promote + """ + env: NeonEnv = neon_simple_env + primary: Endpoint = env.endpoints.create_start(branch_name="main", endpoint_id="primary") + secondary: Endpoint = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary") + secondary.stop() + secondary.start(env={"FAILPOINTS": "compute-prewarm=return(0)"}) + + with primary.connect() as conn, conn.cursor() as cur: + cur.execute("create schema neon;create extension neon with schema neon") + cur.execute("create table t(pk bigint GENERATED ALWAYS AS IDENTITY, payload integer)") + cur.execute("INSERT INTO t(payload) SELECT generate_series(1, 100)") + + primary.http_client().offload_lfc() + primary_spec = primary.get_compute_spec() + primary_endpoint_id = primary.endpoint_id + primary.stop(mode="immediate-terminate") + assert (lsn := primary.terminate_flush_lsn) + + client = secondary.http_client() + with pytest.raises(AssertionError): + client.prewarm_lfc(primary_endpoint_id) + assert client.prewarm_lfc_status()["status"] == "failed" + promote_spec = {"spec": primary_spec, "wal_flush_lsn": str(lsn)} + assert client.promote(promote_spec)["status"] == "completed" with secondary.connect() as conn, conn.cursor() as cur: cur.execute("select count(*) from t") From 12e87d7a9fa7514bb66b02194a0cf64943bb2759 Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Wed, 23 Jul 2025 17:37:20 -0500 Subject: [PATCH 06/23] Add neon.lakebase_mode boolean GUC (#12714) This GUC will become useful for temporarily disabling Lakebase-specific features during the code merge. Signed-off-by: Tristan Partin --- pgxn/neon/neon.c | 11 +++++++++++ pgxn/neon/neon.h | 1 + 2 files changed, 12 insertions(+) diff --git a/pgxn/neon/neon.c b/pgxn/neon/neon.c index 5b9c7d600c..76f3cf2e87 100644 --- a/pgxn/neon/neon.c +++ b/pgxn/neon/neon.c @@ -48,6 +48,7 @@ PG_MODULE_MAGIC; void _PG_init(void); +bool lakebase_mode = false; static int running_xacts_overflow_policy; static bool monitor_query_exec_time = false; @@ -583,6 +584,16 @@ _PG_init(void) "neon_superuser", PGC_POSTMASTER, 0, NULL, NULL, NULL); + DefineCustomBoolVariable( + "neon.lakebase_mode", + "Is neon running in Lakebase?", + NULL, + &lakebase_mode, + false, + PGC_POSTMASTER, + 0, + NULL, NULL, NULL); + /* * Important: This must happen after other parts of the extension are * loaded, otherwise any settings to GUCs that were set before the diff --git a/pgxn/neon/neon.h b/pgxn/neon/neon.h index 20c850864a..e589d0cfba 100644 --- a/pgxn/neon/neon.h +++ b/pgxn/neon/neon.h @@ -21,6 +21,7 @@ extern int wal_acceptor_reconnect_timeout; extern int wal_acceptor_connection_timeout; extern int readahead_getpage_pull_timeout_ms; extern bool disable_wal_prev_lsn_checks; +extern bool lakebase_mode; extern bool AmPrewarmWorker; From 9b2e6f862acbc4e53707b34a0da2309b79aa5074 Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Wed, 23 Jul 2025 17:37:27 -0500 Subject: [PATCH 07/23] Set an upper limit on PG backpressure throttling (#12675) ## Problem Tenant split test revealed another bug with PG backpressure throttling that under some cases PS may never report its progress back to SK (e.g., observed when aborting tenant shard where the old shard needs to re-establish SK connection and re-ingest WALs from a much older LSN). In this case, PG may get stuck forever. ## Summary of changes As a general precaution that PS feedback mechanism may not always be reliable, this PR uses the previously introduced WAL write rate limit mechanism to slow down write rates instead of completely pausing it. The idea is to introduce a new `databricks_effective_max_wal_bytes_per_second`, which is set to `databricks_max_wal_mb_per_second` when no PS back pressure and is set to `10KB` when there is back pressure. This way, PG can still write to SK, though at a very low speed. The PR also fixes the problem that the current WAL rate limiting mechanism is too coarse grained and cannot enforce limits < 1MB. This is because it always resets the rate limiter after 1 second, even if PG could have written more data in the past second. The fix is to introduce a `batch_end_time_us` which records the expected end time of the current batch. For example, if PG writes 10MB of data in a single batch, and max WAL write rate is set as `1MB/s`, then `batch_end_time_us` will be set as 10 seconds later. ## How is this tested? Tweaked the existing test, and also did manual testing on dev. I set `max_replication_flush_lag` as 1GB, and loaded 500GB pgbench tables. It's expected to see PG gets throttled periodically because PS will accumulate 4GB of data before flushing. Results: when PG is throttled: ``` 9500000 of 3300000000 tuples (0%) done (elapsed 10.36 s, remaining 3587.62 s) 9600000 of 3300000000 tuples (0%) done (elapsed 124.07 s, remaining 42523.59 s) 9700000 of 3300000000 tuples (0%) done (elapsed 255.79 s, remaining 86763.97 s) 9800000 of 3300000000 tuples (0%) done (elapsed 315.89 s, remaining 106056.52 s) 9900000 of 3300000000 tuples (0%) done (elapsed 412.75 s, remaining 137170.58 s) ``` when PS just flushed: ``` 18100000 of 3300000000 tuples (0%) done (elapsed 433.80 s, remaining 78655.96 s) 18200000 of 3300000000 tuples (0%) done (elapsed 433.85 s, remaining 78231.71 s) 18300000 of 3300000000 tuples (0%) done (elapsed 433.90 s, remaining 77810.62 s) 18400000 of 3300000000 tuples (0%) done (elapsed 433.96 s, remaining 77395.86 s) 18500000 of 3300000000 tuples (0%) done (elapsed 434.03 s, remaining 76987.27 s) 18600000 of 3300000000 tuples (0%) done (elapsed 434.08 s, remaining 76579.59 s) 18700000 of 3300000000 tuples (0%) done (elapsed 434.13 s, remaining 76177.12 s) 18800000 of 3300000000 tuples (0%) done (elapsed 434.19 s, remaining 75779.45 s) 18900000 of 3300000000 tuples (0%) done (elapsed 434.84 s, remaining 75489.40 s) 19000000 of 3300000000 tuples (0%) done (elapsed 434.89 s, remaining 75097.90 s) 19100000 of 3300000000 tuples (0%) done (elapsed 434.94 s, remaining 74712.56 s) 19200000 of 3300000000 tuples (0%) done (elapsed 498.93 s, remaining 85254.20 s) 19300000 of 3300000000 tuples (0%) done (elapsed 498.97 s, remaining 84817.95 s) 19400000 of 3300000000 tuples (0%) done (elapsed 623.80 s, remaining 105486.76 s) 19500000 of 3300000000 tuples (0%) done (elapsed 745.86 s, remaining 125476.51 s) ``` Co-authored-by: Chen Luo --- libs/walproposer/src/api_bindings.rs | 4 +- pgxn/neon/walproposer.h | 15 ++- pgxn/neon/walproposer_pg.c | 161 ++++++++++++++++++------- test_runner/regress/test_pg_regress.py | 17 --- test_runner/regress/test_sharding.py | 130 +++++++++++++------- 5 files changed, 220 insertions(+), 107 deletions(-) diff --git a/libs/walproposer/src/api_bindings.rs b/libs/walproposer/src/api_bindings.rs index 825a137d0f..c3be1e1dae 100644 --- a/libs/walproposer/src/api_bindings.rs +++ b/libs/walproposer/src/api_bindings.rs @@ -429,9 +429,11 @@ pub fn empty_shmem() -> crate::bindings::WalproposerShmemState { }; let empty_wal_rate_limiter = crate::bindings::WalRateLimiter { + effective_max_wal_bytes_per_second: crate::bindings::pg_atomic_uint32 { value: 0 }, should_limit: crate::bindings::pg_atomic_uint32 { value: 0 }, sent_bytes: 0, - last_recorded_time_us: crate::bindings::pg_atomic_uint64 { value: 0 }, + batch_start_time_us: crate::bindings::pg_atomic_uint64 { value: 0 }, + batch_end_time_us: crate::bindings::pg_atomic_uint64 { value: 0 }, }; crate::bindings::WalproposerShmemState { diff --git a/pgxn/neon/walproposer.h b/pgxn/neon/walproposer.h index 19d23925a5..5507294c3b 100644 --- a/pgxn/neon/walproposer.h +++ b/pgxn/neon/walproposer.h @@ -389,12 +389,21 @@ typedef struct PageserverFeedback */ typedef struct WalRateLimiter { - /* If the value is 1, PG backends will hit backpressure. */ + /* The effective wal write rate. Could be changed dynamically + based on whether PG has backpressure or not.*/ + pg_atomic_uint32 effective_max_wal_bytes_per_second; + /* If the value is 1, PG backends will hit backpressure until the time has past batch_end_time_us. */ pg_atomic_uint32 should_limit; /* The number of bytes sent in the current second. */ uint64 sent_bytes; - /* The last recorded time in microsecond. */ - pg_atomic_uint64 last_recorded_time_us; + /* The timestamp when the write starts in the current batch. A batch is a time interval (e.g., )that we + track and throttle writes. Most times a batch is 1s, but it could become larger if the PG overwrites the WALs + and we will adjust the batch accordingly to compensate (e.g., if PG writes 10MB at once and max WAL write rate + is 1MB/s, then the current batch will become 10s). */ + pg_atomic_uint64 batch_start_time_us; + /* The timestamp (in the future) that the current batch should end and accept more writes + (after should_limit is set to 1). */ + pg_atomic_uint64 batch_end_time_us; } WalRateLimiter; /* END_HADRON */ diff --git a/pgxn/neon/walproposer_pg.c b/pgxn/neon/walproposer_pg.c index d43d372c2e..874a1590ac 100644 --- a/pgxn/neon/walproposer_pg.c +++ b/pgxn/neon/walproposer_pg.c @@ -68,6 +68,14 @@ int safekeeper_proto_version = 3; char *safekeeper_conninfo_options = ""; /* BEGIN_HADRON */ int databricks_max_wal_mb_per_second = -1; +// during throttling, we will limit the effective WAL write rate to 10KB. +// PG can still push some WAL to SK, but at a very low rate. +int databricks_throttled_max_wal_bytes_per_second = 10 * 1024; +// The max sleep time of a batch. This is to make sure the rate limiter does not +// overshoot too much and block PG for a very long time. +// This is set as 5 minuetes for now. PG can send as much as 10MB of WALs to SK in one batch, +// so this effectively caps the write rate to ~30KB/s in the worst case. +static uint64 kRateLimitMaxBatchUSecs = 300 * USECS_PER_SEC; /* END_HADRON */ /* Set to true in the walproposer bgw. */ @@ -86,6 +94,7 @@ static HotStandbyFeedback agg_hs_feedback; static void nwp_register_gucs(void); static void assign_neon_safekeepers(const char *newval, void *extra); static uint64 backpressure_lag_impl(void); +static uint64 hadron_backpressure_lag_impl(void); static uint64 startup_backpressure_wrap(void); static bool backpressure_throttling_impl(void); static void walprop_register_bgworker(void); @@ -110,9 +119,22 @@ static void rm_safekeeper_event_set(Safekeeper *to_remove, bool is_sk); static void CheckGracefulShutdown(WalProposer *wp); -// HADRON +/* BEGIN_HADRON */ shardno_t get_num_shards(void); +static int positive_mb_to_bytes(int mb) +{ + if (mb <= 0) + { + return mb; + } + else + { + return mb * 1024 * 1024; + } +} +/* END_HADRON */ + static void init_walprop_config(bool syncSafekeepers) { @@ -260,6 +282,16 @@ nwp_register_gucs(void) PGC_SUSET, GUC_UNIT_MB, NULL, NULL, NULL); + + DefineCustomIntVariable( + "databricks.throttled_max_wal_bytes_per_second", + "The maximum WAL bytes per second when PG is being throttled.", + NULL, + &databricks_throttled_max_wal_bytes_per_second, + 10 * 1024, 0, INT_MAX, + PGC_SUSET, + GUC_UNIT_BYTE, + NULL, NULL, NULL); /* END_HADRON */ } @@ -398,19 +430,65 @@ assign_neon_safekeepers(const char *newval, void *extra) pfree(oldval); } -/* Check if we need to suspend inserts because of lagging replication. */ -static uint64 -backpressure_lag_impl(void) +/* BEGIN_HADRON */ +static uint64 hadron_backpressure_lag_impl(void) { struct WalproposerShmemState* state = NULL; + uint64 lag = 0; - /* BEGIN_HADRON */ if(max_cluster_size < 0){ // if max cluster size is not set, then we don't apply backpressure because we're reconfiguring PG return 0; } - /* END_HADRON */ + lag = backpressure_lag_impl(); + state = GetWalpropShmemState(); + if ( state != NULL && databricks_max_wal_mb_per_second != -1 ) + { + int old_limit = pg_atomic_read_u32(&state->wal_rate_limiter.effective_max_wal_bytes_per_second); + int new_limit = (lag == 0)? positive_mb_to_bytes(databricks_max_wal_mb_per_second) : databricks_throttled_max_wal_bytes_per_second; + if( old_limit != new_limit ) + { + uint64 batch_start_time = pg_atomic_read_u64(&state->wal_rate_limiter.batch_start_time_us); + uint64 batch_end_time = pg_atomic_read_u64(&state->wal_rate_limiter.batch_end_time_us); + // the rate limit has changed, we need to reset the rate limiter's batch end time + pg_atomic_write_u32(&state->wal_rate_limiter.effective_max_wal_bytes_per_second, new_limit); + pg_atomic_write_u64(&state->wal_rate_limiter.batch_end_time_us, Min(batch_start_time + USECS_PER_SEC, batch_end_time)); + } + if( new_limit == -1 ) + { + return 0; + } + + if (pg_atomic_read_u32(&state->wal_rate_limiter.should_limit) == true) + { + TimestampTz now = GetCurrentTimestamp(); + struct WalRateLimiter *limiter = &state->wal_rate_limiter; + uint64 batch_end_time = pg_atomic_read_u64(&limiter->batch_end_time_us); + if ( now >= batch_end_time ) + { + /* + * The backend has past the batch end time and it's time to push more WALs. + * If the backends are pushing WALs too fast, the wal proposer will rate limit them again. + */ + uint32 expected = true; + pg_atomic_compare_exchange_u32(&state->wal_rate_limiter.should_limit, &expected, false); + return 0; + } + return Max(lag, 1); + } + // rate limiter decides to not throttle, then return 0. + return 0; + } + + return lag; +} +/* END_HADRON */ + +/* Check if we need to suspend inserts because of lagging replication. */ +static uint64 +backpressure_lag_impl(void) +{ if (max_replication_apply_lag > 0 || max_replication_flush_lag > 0 || max_replication_write_lag > 0) { XLogRecPtr writePtr; @@ -444,30 +522,6 @@ backpressure_lag_impl(void) return (myFlushLsn - applyPtr - max_replication_apply_lag * MB); } } - - /* BEGIN_HADRON */ - if (databricks_max_wal_mb_per_second == -1) { - return 0; - } - - state = GetWalpropShmemState(); - if (state != NULL && !!pg_atomic_read_u32(&state->wal_rate_limiter.should_limit)) - { - TimestampTz now = GetCurrentTimestamp(); - struct WalRateLimiter *limiter = &state->wal_rate_limiter; - uint64 last_recorded_time = pg_atomic_read_u64(&limiter->last_recorded_time_us); - if (now - last_recorded_time > USECS_PER_SEC) - { - /* - * The backend has past 1 second since the last recorded time and it's time to push more WALs. - * If the backends are pushing WALs too fast, the wal proposer will rate limit them again. - */ - uint32 expected = true; - pg_atomic_compare_exchange_u32(&state->wal_rate_limiter.should_limit, &expected, false); - } - return 1; - } - /* END_HADRON */ return 0; } @@ -482,9 +536,9 @@ startup_backpressure_wrap(void) if (AmStartupProcess() || !IsUnderPostmaster) return 0; - delay_backend_us = &backpressure_lag_impl; + delay_backend_us = &hadron_backpressure_lag_impl; - return backpressure_lag_impl(); + return hadron_backpressure_lag_impl(); } /* @@ -514,8 +568,10 @@ WalproposerShmemInit(void) pg_atomic_init_u64(&walprop_shared->backpressureThrottlingTime, 0); pg_atomic_init_u64(&walprop_shared->currentClusterSize, 0); /* BEGIN_HADRON */ + pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.effective_max_wal_bytes_per_second, -1); pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.should_limit, 0); - pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.last_recorded_time_us, 0); + pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_start_time_us, 0); + pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_end_time_us, 0); /* END_HADRON */ } } @@ -530,8 +586,10 @@ WalproposerShmemInit_SyncSafekeeper(void) pg_atomic_init_u64(&walprop_shared->mineLastElectedTerm, 0); pg_atomic_init_u64(&walprop_shared->backpressureThrottlingTime, 0); /* BEGIN_HADRON */ + pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.effective_max_wal_bytes_per_second, -1); pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.should_limit, 0); - pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.last_recorded_time_us, 0); + pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_start_time_us, 0); + pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.batch_end_time_us, 0); /* END_HADRON */ } @@ -563,7 +621,7 @@ backpressure_throttling_impl(void) return retry; /* Calculate replicas lag */ - lag = backpressure_lag_impl(); + lag = hadron_backpressure_lag_impl(); if (lag == 0) return retry; @@ -659,7 +717,7 @@ record_pageserver_feedback(PageserverFeedback *ps_feedback, shardno_t num_shards SpinLockAcquire(&walprop_shared->mutex); - // Hadron: Update the num_shards from the source-of-truth (shard map) lazily when we receive + // Hadron: Update the num_shards from the source-of-truth (shard map) lazily when we receive // a new pageserver feedback. walprop_shared->num_shards = Max(walprop_shared->num_shards, num_shards); @@ -1479,6 +1537,7 @@ XLogBroadcastWalProposer(WalProposer *wp) XLogRecPtr endptr; struct WalproposerShmemState *state = NULL; TimestampTz now = 0; + int effective_max_wal_bytes_per_second = 0; /* Start from the last sent position */ startptr = sentPtr; @@ -1533,22 +1592,36 @@ XLogBroadcastWalProposer(WalProposer *wp) /* BEGIN_HADRON */ state = GetWalpropShmemState(); - if (databricks_max_wal_mb_per_second != -1 && state != NULL) + effective_max_wal_bytes_per_second = pg_atomic_read_u32(&state->wal_rate_limiter.effective_max_wal_bytes_per_second); + if (effective_max_wal_bytes_per_second != -1 && state != NULL) { - uint64 max_wal_bytes = (uint64) databricks_max_wal_mb_per_second * 1024 * 1024; struct WalRateLimiter *limiter = &state->wal_rate_limiter; - uint64 last_recorded_time = pg_atomic_read_u64(&limiter->last_recorded_time_us); - if (now - last_recorded_time > USECS_PER_SEC) + uint64 batch_end_time = pg_atomic_read_u64(&limiter->batch_end_time_us); + if ( now >= batch_end_time ) { - /* Reset the rate limiter */ + // Reset the rate limiter to start a new batch limiter->sent_bytes = 0; - pg_atomic_write_u64(&limiter->last_recorded_time_us, now); pg_atomic_write_u32(&limiter->should_limit, false); + pg_atomic_write_u64(&limiter->batch_start_time_us, now); + /* tentatively assign the batch end time as 1s from now. This could result in one of the following cases: + 1. If sent_bytes does not reach effective_max_wal_bytes_per_second in 1s, + then we will reset the current batch and clear sent_bytes. No throttling happens. + 2. Otherwise, we will recompute the end time (below) based on how many bytes are actually written, + and throttle PG until the batch end time. */ + pg_atomic_write_u64(&limiter->batch_end_time_us, now + USECS_PER_SEC); } limiter->sent_bytes += (endptr - startptr); - if (limiter->sent_bytes > max_wal_bytes) + if (limiter->sent_bytes > effective_max_wal_bytes_per_second) { + uint64_t batch_start_time = pg_atomic_read_u64(&limiter->batch_start_time_us); + uint64 throttle_usecs = USECS_PER_SEC * limiter->sent_bytes / Max(effective_max_wal_bytes_per_second, 1); + if (throttle_usecs > kRateLimitMaxBatchUSecs){ + elog(LOG, "throttle_usecs %lu is too large, limiting to %lu", throttle_usecs, kRateLimitMaxBatchUSecs); + throttle_usecs = kRateLimitMaxBatchUSecs; + } + pg_atomic_write_u32(&limiter->should_limit, true); + pg_atomic_write_u64(&limiter->batch_end_time_us, batch_start_time + throttle_usecs); } } /* END_HADRON */ @@ -2052,7 +2125,7 @@ walprop_pg_process_safekeeper_feedback(WalProposer *wp, Safekeeper *sk) /* Only one main shard sends non-zero currentClusterSize */ if (sk->appendResponse.ps_feedback.currentClusterSize > 0) SetNeonCurrentClusterSize(sk->appendResponse.ps_feedback.currentClusterSize); - + if (min_feedback.disk_consistent_lsn != standby_apply_lsn) { standby_apply_lsn = min_feedback.disk_consistent_lsn; diff --git a/test_runner/regress/test_pg_regress.py b/test_runner/regress/test_pg_regress.py index e7c7abf397..cc7f736239 100644 --- a/test_runner/regress/test_pg_regress.py +++ b/test_runner/regress/test_pg_regress.py @@ -395,23 +395,6 @@ def test_max_wal_rate(neon_simple_env: NeonEnv): tuples = endpoint.safe_psql("SELECT backpressure_throttling_time();") assert tuples[0][0] == 0, "Backpressure throttling detected" - # 0 MB/s max_wal_rate. WAL proposer can still push some WALs but will be super slow. - endpoint.safe_psql_many( - [ - "ALTER SYSTEM SET databricks.max_wal_mb_per_second = 0;", - "SELECT pg_reload_conf();", - ] - ) - - # Write ~10 KB data should hit backpressure. - with endpoint.cursor(dbname=DBNAME) as cur: - cur.execute("SET databricks.max_wal_mb_per_second = 0;") - for _ in range(0, 10): - cur.execute("INSERT INTO usertable SELECT random(), repeat('a', 1000);") - - tuples = endpoint.safe_psql("SELECT backpressure_throttling_time();") - assert tuples[0][0] > 0, "No backpressure throttling detected" - # 1 MB/s max_wal_rate. endpoint.safe_psql_many( [ diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 2252c098c7..c2907d8a4f 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -1508,20 +1508,55 @@ def test_sharding_split_failures( env.storage_controller.consistency_check() -@pytest.mark.skip(reason="The backpressure change has not been merged yet.") +# HADRON +def test_create_tenant_after_split(neon_env_builder: NeonEnvBuilder): + """ + Tests creating a tenant and a timeline should fail after a tenant split. + """ + env = neon_env_builder.init_start(initial_tenant_shard_count=4) + + env.storage_controller.allowed_errors.extend( + [ + ".*already exists with a different shard count.*", + ] + ) + + ep = env.endpoints.create_start("main", tenant_id=env.initial_tenant) + ep.safe_psql("CREATE TABLE usertable ( YCSB_KEY INT, FIELD0 TEXT);") + ep.safe_psql("INSERT INTO usertable VALUES (1, 'test1');") + ep.safe_psql("INSERT INTO usertable VALUES (2, 'test2');") + ep.safe_psql("INSERT INTO usertable VALUES (3, 'test3');") + + # Split the tenant + + env.storage_controller.tenant_shard_split(env.initial_tenant, shard_count=8) + + with pytest.raises(RuntimeError): + env.create_tenant(env.initial_tenant, env.initial_timeline, shard_count=4) + + # run more queries + ep.safe_psql("SELECT * FROM usertable;") + ep.safe_psql("UPDATE usertable set FIELD0 = 'test4';") + + ep.stop_and_destroy() + + +# HADRON def test_back_pressure_during_split(neon_env_builder: NeonEnvBuilder): """ - Test backpressure can ignore new shards during tenant split so that if we abort the split, - PG can continue without being blocked. + Test backpressure works correctly during a shard split, especially after a split is aborted, + PG will not be stuck forever. """ - DBNAME = "regression" - - init_shard_count = 4 + init_shard_count = 1 neon_env_builder.num_pageservers = init_shard_count stripe_size = 32 env = neon_env_builder.init_start( - initial_tenant_shard_count=init_shard_count, initial_tenant_shard_stripe_size=stripe_size + initial_tenant_shard_count=init_shard_count, + initial_tenant_shard_stripe_size=stripe_size, + initial_tenant_conf={ + "checkpoint_distance": 1024 * 1024 * 1024, + }, ) env.storage_controller.allowed_errors.extend( @@ -1537,19 +1572,31 @@ def test_back_pressure_during_split(neon_env_builder: NeonEnvBuilder): "main", config_lines=[ "max_replication_write_lag = 1MB", - "databricks.max_wal_mb_per_second = 1", "neon.max_cluster_size = 10GB", + "databricks.max_wal_mb_per_second=100", ], ) - endpoint.respec(skip_pg_catalog_updates=False) # Needed for databricks_system to get created. + endpoint.respec(skip_pg_catalog_updates=False) endpoint.start() - endpoint.safe_psql(f"CREATE DATABASE {DBNAME}") - - endpoint.safe_psql("CREATE TABLE usertable ( YCSB_KEY INT, FIELD0 TEXT);") + # generate 10MB of data + endpoint.safe_psql( + "CREATE TABLE usertable AS SELECT s AS KEY, repeat('a', 1000) as VALUE from generate_series(1, 10000) s;" + ) write_done = Event() - def write_data(write_done): + def get_write_lag(): + res = endpoint.safe_psql( + """ + SELECT + pg_wal_lsn_diff(pg_current_wal_flush_lsn(), received_lsn) as received_lsn_lag + FROM neon.backpressure_lsns(); + """, + log_query=False, + ) + return res[0][0] + + def write_data(write_done: Event): while not write_done.is_set(): endpoint.safe_psql( "INSERT INTO usertable SELECT random(), repeat('a', 1000);", log_query=False @@ -1560,35 +1607,39 @@ def test_back_pressure_during_split(neon_env_builder: NeonEnvBuilder): writer_thread.start() env.storage_controller.configure_failpoints(("shard-split-pre-complete", "return(1)")) + # sleep 10 seconds before re-activating the old shard when aborting the split. + # this is to add some backpressures to PG + env.pageservers[0].http_client().configure_failpoints( + ("attach-before-activate-sleep", "return(10000)"), + ) # split the tenant with pytest.raises(StorageControllerApiException): - env.storage_controller.tenant_shard_split(env.initial_tenant, shard_count=16) + env.storage_controller.tenant_shard_split(env.initial_tenant, shard_count=4) + + def check_tenant_status(): + status = ( + env.pageservers[0].http_client().tenant_status(TenantShardId(env.initial_tenant, 0, 1)) + ) + assert status["state"]["slug"] == "Active" + + wait_until(check_tenant_status) write_done.set() writer_thread.join() + log.info(f"current write lag: {get_write_lag()}") + # writing more data to page servers after split is aborted - for _i in range(5000): - endpoint.safe_psql( - "INSERT INTO usertable SELECT random(), repeat('a', 1000);", log_query=False - ) + with endpoint.cursor() as cur: + for _i in range(1000): + cur.execute("INSERT INTO usertable SELECT random(), repeat('a', 1000);") # wait until write lag becomes 0 def check_write_lag_is_zero(): - res = endpoint.safe_psql( - """ - SELECT - pg_wal_lsn_diff(pg_current_wal_flush_lsn(), received_lsn) as received_lsn_lag - FROM neon.backpressure_lsns(); - """, - dbname="databricks_system", - log_query=False, - ) - log.info(f"received_lsn_lag = {res[0][0]}") - assert res[0][0] == 0 + res = get_write_lag() + assert res == 0 wait_until(check_write_lag_is_zero) - endpoint.stop_and_destroy() # BEGIN_HADRON @@ -1674,7 +1725,6 @@ def test_shard_resolve_during_split_abort(neon_env_builder: NeonEnvBuilder): # HADRON -@pytest.mark.skip(reason="The backpressure change has not been merged yet.") def test_back_pressure_per_shard(neon_env_builder: NeonEnvBuilder): """ Tests back pressure knobs are enforced on the per shard basis instead of at the tenant level. @@ -1703,20 +1753,16 @@ def test_back_pressure_per_shard(neon_env_builder: NeonEnvBuilder): "neon.max_cluster_size = 10GB", ], ) - endpoint.respec(skip_pg_catalog_updates=False) # Needed for databricks_system to get created. + endpoint.respec(skip_pg_catalog_updates=False) endpoint.start() - # generate 20MB of data + # generate 10MB of data endpoint.safe_psql( - "CREATE TABLE usertable AS SELECT s AS KEY, repeat('a', 1000) as VALUE from generate_series(1, 20000) s;" + "CREATE TABLE usertable AS SELECT s AS KEY, repeat('a', 1000) as VALUE from generate_series(1, 10000) s;" ) - res = endpoint.safe_psql( - "SELECT neon.backpressure_throttling_time() as throttling_time", dbname="databricks_system" - )[0] + res = endpoint.safe_psql("SELECT neon.backpressure_throttling_time() as throttling_time")[0] assert res[0] == 0, f"throttling_time should be 0, but got {res[0]}" - endpoint.stop() - # HADRON def test_shard_split_page_server_timeout(neon_env_builder: NeonEnvBuilder): @@ -1880,14 +1926,14 @@ def test_sharding_backpressure(neon_env_builder: NeonEnvBuilder): shards_info() for _write_iter in range(30): - # approximately 1MB of data - workload.write_rows(8000, upload=False) + # approximately 10MB of data + workload.write_rows(80000, upload=False) update_write_lsn() infos = shards_info() min_lsn = min(Lsn(info["last_record_lsn"]) for info in infos) max_lsn = max(Lsn(info["last_record_lsn"]) for info in infos) diff = max_lsn - min_lsn - assert diff < 2 * 1024 * 1024, f"LSN diff={diff}, expected diff < 2MB due to backpressure" + assert diff < 8 * 1024 * 1024, f"LSN diff={diff}, expected diff < 8MB due to backpressure" def test_sharding_unlogged_relation(neon_env_builder: NeonEnvBuilder): From 0e427fc117fed57e111450db3f652981c0c7b380 Mon Sep 17 00:00:00 2001 From: Ivan Efremov Date: Thu, 24 Jul 2025 11:23:07 +0300 Subject: [PATCH 08/23] Update proxy-bench workflow to use bare-metal script (#12703) Pass the params for run.sh in proxy-bench repo to use bare-metal config. Fix the paths and cleanup procedure. --- .github/workflows/proxy-benchmark.yml | 48 +++++++++++++++++++++------ 1 file changed, 38 insertions(+), 10 deletions(-) diff --git a/.github/workflows/proxy-benchmark.yml b/.github/workflows/proxy-benchmark.yml index 0ae93ce295..e48fe41b45 100644 --- a/.github/workflows/proxy-benchmark.yml +++ b/.github/workflows/proxy-benchmark.yml @@ -3,7 +3,7 @@ name: Periodic proxy performance test on unit-perf-aws-arm runners on: push: # TODO: remove after testing branches: - - test-proxy-bench # Runs on pushes to branches starting with test-proxy-bench + - test-proxy-bench # Runs on pushes to test-proxy-bench branch # schedule: # * is a special character in YAML so you have to quote this string # ┌───────────── minute (0 - 59) @@ -32,7 +32,7 @@ jobs: statuses: write contents: write pull-requests: write - runs-on: [self-hosted, unit-perf-aws-arm] + runs-on: [ self-hosted, unit-perf-aws-arm ] timeout-minutes: 60 # 1h timeout container: image: ghcr.io/neondatabase/build-tools:pinned-bookworm @@ -55,30 +55,58 @@ jobs: { echo "PROXY_BENCH_PATH=$PROXY_BENCH_PATH" echo "NEON_DIR=${RUNNER_TEMP}/neon" + echo "NEON_PROXY_PATH=${RUNNER_TEMP}/neon/bin/proxy" echo "TEST_OUTPUT=${PROXY_BENCH_PATH}/test_output" echo "" } >> "$GITHUB_ENV" - - name: Run proxy-bench - run: ${PROXY_BENCH_PATH}/run.sh + - name: Cache poetry deps + uses: actions/cache@v4 + with: + path: ~/.cache/pypoetry/virtualenvs + key: v2-${{ runner.os }}-${{ runner.arch }}-python-deps-bookworm-${{ hashFiles('poetry.lock') }} - - name: Ingest Bench Results # neon repo script + - name: Install Python deps + shell: bash -euxo pipefail {0} + run: ./scripts/pysync + + - name: show ulimits + shell: bash -euxo pipefail {0} + run: | + ulimit -a + + - name: Run proxy-bench + working-directory: ${{ env.PROXY_BENCH_PATH }} + run: ./run.sh --with-grafana --bare-metal + + - name: Ingest Bench Results if: always() + working-directory: ${{ env.NEON_DIR }} run: | mkdir -p $TEST_OUTPUT python $NEON_DIR/scripts/proxy_bench_results_ingest.py --out $TEST_OUTPUT - name: Push Metrics to Proxy perf database + shell: bash -euxo pipefail {0} if: always() env: PERF_TEST_RESULT_CONNSTR: "${{ secrets.PROXY_TEST_RESULT_CONNSTR }}" REPORT_FROM: $TEST_OUTPUT + working-directory: ${{ env.NEON_DIR }} run: $NEON_DIR/scripts/generate_and_push_perf_report.sh - - name: Docker cleanup - if: always() - run: docker compose down - - name: Notify Failure if: failure() - run: echo "Proxy bench job failed" && exit 1 \ No newline at end of file + run: echo "Proxy bench job failed" && exit 1 + + - name: Cleanup Test Resources + if: always() + shell: bash -euxo pipefail {0} + run: | + # Cleanup the test resources + if [[ -d "${TEST_OUTPUT}" ]]; then + rm -rf ${TEST_OUTPUT} + fi + if [[ -d "${PROXY_BENCH_PATH}/test_output" ]]; then + rm -rf ${PROXY_BENCH_PATH}/test_output + fi \ No newline at end of file From 9997661138c005f69c852ff2c4115fb1dc2c8d76 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Thu, 24 Jul 2025 11:30:02 +0100 Subject: [PATCH 09/23] [proxy/tokio-postgres] garbage collection for codec buffers (#12701) ## Problem A large insert or a large row will cause the codec to allocate a large buffer. The codec never shrinks the buffer however. LKB-2496 ## Summary of changes 1. Introduce a naive GC system for codec buffers 2. Try and reduce copies as much as possible --- libs/proxy/tokio-postgres2/src/client.rs | 19 ++-- libs/proxy/tokio-postgres2/src/codec.rs | 25 ++++- libs/proxy/tokio-postgres2/src/config.rs | 9 +- libs/proxy/tokio-postgres2/src/connect_raw.rs | 105 ++++++------------ libs/proxy/tokio-postgres2/src/connection.rs | 41 +++++-- 5 files changed, 100 insertions(+), 99 deletions(-) diff --git a/libs/proxy/tokio-postgres2/src/client.rs b/libs/proxy/tokio-postgres2/src/client.rs index f8aceb5263..f5aed010ef 100644 --- a/libs/proxy/tokio-postgres2/src/client.rs +++ b/libs/proxy/tokio-postgres2/src/client.rs @@ -15,6 +15,7 @@ use tokio::sync::mpsc; use crate::cancel_token::RawCancelToken; use crate::codec::{BackendMessages, FrontendMessage, RecordNotices}; use crate::config::{Host, SslMode}; +use crate::connection::gc_bytesmut; use crate::query::RowStream; use crate::simple_query::SimpleQueryStream; use crate::types::{Oid, Type}; @@ -95,20 +96,13 @@ impl InnerClient { Ok(PartialQuery(Some(self))) } - // pub fn send_with_sync(&mut self, f: F) -> Result<&mut Responses, Error> - // where - // F: FnOnce(&mut BytesMut) -> Result<(), Error>, - // { - // self.start()?.send_with_sync(f) - // } - pub fn send_simple_query(&mut self, query: &str) -> Result<&mut Responses, Error> { self.responses.waiting += 1; self.buffer.clear(); // simple queries do not need sync. frontend::query(query, &mut self.buffer).map_err(Error::encode)?; - let buf = self.buffer.split().freeze(); + let buf = self.buffer.split(); self.send_message(FrontendMessage::Raw(buf)) } @@ -125,7 +119,7 @@ impl Drop for PartialQuery<'_> { if let Some(client) = self.0.take() { client.buffer.clear(); frontend::sync(&mut client.buffer); - let buf = client.buffer.split().freeze(); + let buf = client.buffer.split(); let _ = client.send_message(FrontendMessage::Raw(buf)); } } @@ -141,7 +135,7 @@ impl<'a> PartialQuery<'a> { client.buffer.clear(); f(&mut client.buffer)?; frontend::flush(&mut client.buffer); - let buf = client.buffer.split().freeze(); + let buf = client.buffer.split(); client.send_message(FrontendMessage::Raw(buf)) } @@ -154,7 +148,7 @@ impl<'a> PartialQuery<'a> { client.buffer.clear(); f(&mut client.buffer)?; frontend::sync(&mut client.buffer); - let buf = client.buffer.split().freeze(); + let buf = client.buffer.split(); let _ = client.send_message(FrontendMessage::Raw(buf)); Ok(&mut self.0.take().unwrap().responses) @@ -317,6 +311,9 @@ impl Client { DISCARD SEQUENCES;", )?; + // Clean up memory usage. + gc_bytesmut(&mut self.inner_mut().buffer); + Ok(()) } diff --git a/libs/proxy/tokio-postgres2/src/codec.rs b/libs/proxy/tokio-postgres2/src/codec.rs index 813faa0e35..71fe062fca 100644 --- a/libs/proxy/tokio-postgres2/src/codec.rs +++ b/libs/proxy/tokio-postgres2/src/codec.rs @@ -1,13 +1,13 @@ use std::io; -use bytes::{Bytes, BytesMut}; +use bytes::BytesMut; use fallible_iterator::FallibleIterator; use postgres_protocol2::message::backend; use tokio::sync::mpsc::UnboundedSender; use tokio_util::codec::{Decoder, Encoder}; pub enum FrontendMessage { - Raw(Bytes), + Raw(BytesMut), RecordNotices(RecordNotices), } @@ -17,7 +17,10 @@ pub struct RecordNotices { } pub enum BackendMessage { - Normal { messages: BackendMessages }, + Normal { + messages: BackendMessages, + ready: bool, + }, Async(backend::Message), } @@ -40,11 +43,18 @@ impl FallibleIterator for BackendMessages { pub struct PostgresCodec; -impl Encoder for PostgresCodec { +impl Encoder for PostgresCodec { type Error = io::Error; - fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> io::Result<()> { - dst.extend_from_slice(&item); + fn encode(&mut self, item: BytesMut, dst: &mut BytesMut) -> io::Result<()> { + // When it comes to request/response workflows, we usually flush the entire write + // buffer in order to wait for the response before we send a new request. + // Therefore we can avoid the copy and just replace the buffer. + if dst.is_empty() { + *dst = item; + } else { + dst.extend_from_slice(&item); + } Ok(()) } } @@ -56,6 +66,7 @@ impl Decoder for PostgresCodec { fn decode(&mut self, src: &mut BytesMut) -> Result, io::Error> { let mut idx = 0; + let mut ready = false; while let Some(header) = backend::Header::parse(&src[idx..])? { let len = header.len() as usize + 1; if src[idx..].len() < len { @@ -79,6 +90,7 @@ impl Decoder for PostgresCodec { idx += len; if header.tag() == backend::READY_FOR_QUERY_TAG { + ready = true; break; } } @@ -88,6 +100,7 @@ impl Decoder for PostgresCodec { } else { Ok(Some(BackendMessage::Normal { messages: BackendMessages(src.split_to(idx)), + ready, })) } } diff --git a/libs/proxy/tokio-postgres2/src/config.rs b/libs/proxy/tokio-postgres2/src/config.rs index c619f92d13..3579dd94a2 100644 --- a/libs/proxy/tokio-postgres2/src/config.rs +++ b/libs/proxy/tokio-postgres2/src/config.rs @@ -250,19 +250,20 @@ impl Config { { let stream = connect_tls(stream, self.ssl_mode, tls).await?; let mut stream = StartupStream::new(stream); - connect_raw::startup(&mut stream, self).await?; connect_raw::authenticate(&mut stream, self).await?; Ok(stream) } - pub async fn authenticate(&self, stream: &mut StartupStream) -> Result<(), Error> + pub fn authenticate( + &self, + stream: &mut StartupStream, + ) -> impl Future> where S: AsyncRead + AsyncWrite + Unpin, T: TlsStream + Unpin, { - connect_raw::startup(stream, self).await?; - connect_raw::authenticate(stream, self).await + connect_raw::authenticate(stream, self) } } diff --git a/libs/proxy/tokio-postgres2/src/connect_raw.rs b/libs/proxy/tokio-postgres2/src/connect_raw.rs index bc35cef339..17237eeef5 100644 --- a/libs/proxy/tokio-postgres2/src/connect_raw.rs +++ b/libs/proxy/tokio-postgres2/src/connect_raw.rs @@ -2,51 +2,28 @@ use std::io; use std::pin::Pin; use std::task::{Context, Poll, ready}; -use bytes::{Bytes, BytesMut}; +use bytes::BytesMut; use fallible_iterator::FallibleIterator; -use futures_util::{Sink, SinkExt, Stream, TryStreamExt}; +use futures_util::{SinkExt, Stream, TryStreamExt}; use postgres_protocol2::authentication::sasl; use postgres_protocol2::authentication::sasl::ScramSha256; use postgres_protocol2::message::backend::{AuthenticationSaslBody, Message}; use postgres_protocol2::message::frontend; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tokio_util::codec::{Framed, FramedParts, FramedWrite}; +use tokio_util::codec::{Framed, FramedParts}; use crate::Error; use crate::codec::PostgresCodec; use crate::config::{self, AuthKeys, Config}; +use crate::connection::{GC_THRESHOLD, INITIAL_CAPACITY}; use crate::maybe_tls_stream::MaybeTlsStream; use crate::tls::TlsStream; pub struct StartupStream { - inner: FramedWrite, PostgresCodec>, + inner: Framed, PostgresCodec>, read_buf: BytesMut, } -impl Sink for StartupStream -where - S: AsyncRead + AsyncWrite + Unpin, - T: AsyncRead + AsyncWrite + Unpin, -{ - type Error = io::Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_ready(cx) - } - - fn start_send(mut self: Pin<&mut Self>, item: Bytes) -> io::Result<()> { - Pin::new(&mut self.inner).start_send(item) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_flush(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_close(cx) - } -} - impl Stream for StartupStream where S: AsyncRead + AsyncWrite + Unpin, @@ -55,6 +32,8 @@ where type Item = io::Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + // We don't use `self.inner.poll_next()` as that might over-read into the read buffer. + // read 1 byte tag, 4 bytes length. let header = ready!(self.as_mut().poll_fill_buf_exact(cx, 5)?); @@ -121,36 +100,28 @@ where } pub fn into_framed(mut self) -> Framed, PostgresCodec> { - let write_buf = std::mem::take(self.inner.write_buffer_mut()); - let io = self.inner.into_inner(); - let mut parts = FramedParts::new(io, PostgresCodec); - parts.read_buf = self.read_buf; - parts.write_buf = write_buf; - Framed::from_parts(parts) + *self.inner.read_buffer_mut() = self.read_buf; + self.inner } pub fn new(io: MaybeTlsStream) -> Self { + let mut parts = FramedParts::new(io, PostgresCodec); + parts.write_buf = BytesMut::with_capacity(INITIAL_CAPACITY); + + let mut inner = Framed::from_parts(parts); + + // This is the default already, but nice to be explicit. + // We divide by two because writes will overshoot the boundary. + // We don't want constant overshoots to cause us to constantly re-shrink the buffer. + inner.set_backpressure_boundary(GC_THRESHOLD / 2); + Self { - inner: FramedWrite::new(io, PostgresCodec), - read_buf: BytesMut::new(), + inner, + read_buf: BytesMut::with_capacity(INITIAL_CAPACITY), } } } -pub(crate) async fn startup( - stream: &mut StartupStream, - config: &Config, -) -> Result<(), Error> -where - S: AsyncRead + AsyncWrite + Unpin, - T: AsyncRead + AsyncWrite + Unpin, -{ - let mut buf = BytesMut::new(); - frontend::startup_message(&config.server_params, &mut buf).map_err(Error::encode)?; - - stream.send(buf.freeze()).await.map_err(Error::io) -} - pub(crate) async fn authenticate( stream: &mut StartupStream, config: &Config, @@ -159,6 +130,10 @@ where S: AsyncRead + AsyncWrite + Unpin, T: TlsStream + Unpin, { + frontend::startup_message(&config.server_params, stream.inner.write_buffer_mut()) + .map_err(Error::encode)?; + + stream.inner.flush().await.map_err(Error::io)?; match stream.try_next().await.map_err(Error::io)? { Some(Message::AuthenticationOk) => { can_skip_channel_binding(config)?; @@ -172,7 +147,8 @@ where .as_ref() .ok_or_else(|| Error::config("password missing".into()))?; - authenticate_password(stream, pass).await?; + frontend::password_message(pass, stream.inner.write_buffer_mut()) + .map_err(Error::encode)?; } Some(Message::AuthenticationSasl(body)) => { authenticate_sasl(stream, body, config).await?; @@ -191,6 +167,7 @@ where None => return Err(Error::closed()), } + stream.inner.flush().await.map_err(Error::io)?; match stream.try_next().await.map_err(Error::io)? { Some(Message::AuthenticationOk) => Ok(()), Some(Message::ErrorResponse(body)) => Err(Error::db(body)), @@ -208,20 +185,6 @@ fn can_skip_channel_binding(config: &Config) -> Result<(), Error> { } } -async fn authenticate_password( - stream: &mut StartupStream, - password: &[u8], -) -> Result<(), Error> -where - S: AsyncRead + AsyncWrite + Unpin, - T: AsyncRead + AsyncWrite + Unpin, -{ - let mut buf = BytesMut::new(); - frontend::password_message(password, &mut buf).map_err(Error::encode)?; - - stream.send(buf.freeze()).await.map_err(Error::io) -} - async fn authenticate_sasl( stream: &mut StartupStream, body: AuthenticationSaslBody, @@ -276,10 +239,10 @@ where return Err(Error::config("password or auth keys missing".into())); }; - let mut buf = BytesMut::new(); - frontend::sasl_initial_response(mechanism, scram.message(), &mut buf).map_err(Error::encode)?; - stream.send(buf.freeze()).await.map_err(Error::io)?; + frontend::sasl_initial_response(mechanism, scram.message(), stream.inner.write_buffer_mut()) + .map_err(Error::encode)?; + stream.inner.flush().await.map_err(Error::io)?; let body = match stream.try_next().await.map_err(Error::io)? { Some(Message::AuthenticationSaslContinue(body)) => body, Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), @@ -292,10 +255,10 @@ where .await .map_err(|e| Error::authentication(e.into()))?; - let mut buf = BytesMut::new(); - frontend::sasl_response(scram.message(), &mut buf).map_err(Error::encode)?; - stream.send(buf.freeze()).await.map_err(Error::io)?; + frontend::sasl_response(scram.message(), stream.inner.write_buffer_mut()) + .map_err(Error::encode)?; + stream.inner.flush().await.map_err(Error::io)?; let body = match stream.try_next().await.map_err(Error::io)? { Some(Message::AuthenticationSaslFinal(body)) => body, Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), diff --git a/libs/proxy/tokio-postgres2/src/connection.rs b/libs/proxy/tokio-postgres2/src/connection.rs index c43a22ffe7..bee4b3372d 100644 --- a/libs/proxy/tokio-postgres2/src/connection.rs +++ b/libs/proxy/tokio-postgres2/src/connection.rs @@ -44,6 +44,27 @@ pub struct Connection { state: State, } +pub const INITIAL_CAPACITY: usize = 2 * 1024; +pub const GC_THRESHOLD: usize = 16 * 1024; + +/// Gargabe collect the [`BytesMut`] if it has too much spare capacity. +pub fn gc_bytesmut(buf: &mut BytesMut) { + // We use a different mode to shrink the buf when above the threshold. + // When above the threshold, we only re-allocate when the buf has 2x spare capacity. + let reclaim = GC_THRESHOLD.checked_sub(buf.len()).unwrap_or(buf.len()); + + // `try_reclaim` tries to get the capacity from any shared `BytesMut`s, + // before then comparing the length against the capacity. + if buf.try_reclaim(reclaim) { + let capacity = usize::max(buf.len(), INITIAL_CAPACITY); + + // Allocate a new `BytesMut` so that we deallocate the old version. + let mut new = BytesMut::with_capacity(capacity); + new.extend_from_slice(buf); + *buf = new; + } +} + pub enum Never {} impl Connection @@ -86,7 +107,14 @@ where continue; } BackendMessage::Async(_) => continue, - BackendMessage::Normal { messages } => messages, + BackendMessage::Normal { messages, ready } => { + // if we read a ReadyForQuery from postgres, let's try GC the read buffer. + if ready { + gc_bytesmut(self.stream.read_buffer_mut()); + } + + messages + } } } }; @@ -177,12 +205,7 @@ where // Send a terminate message to postgres Poll::Ready(None) => { trace!("poll_write: at eof, terminating"); - let mut request = BytesMut::new(); - frontend::terminate(&mut request); - - Pin::new(&mut self.stream) - .start_send(request.freeze()) - .map_err(Error::io)?; + frontend::terminate(self.stream.write_buffer_mut()); trace!("poll_write: sent eof, closing"); trace!("poll_write: done"); @@ -205,6 +228,10 @@ where { Poll::Ready(()) => { trace!("poll_flush: flushed"); + + // GC the write buffer if we managed to flush + gc_bytesmut(self.stream.write_buffer_mut()); + Poll::Ready(Ok(())) } Poll::Pending => { From e82021d6fe37df8fd5bddb16cbad74e39bd004b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Jul 2025 10:51:09 +0000 Subject: [PATCH 10/23] build(deps): bump the npm_and_yarn group across 1 directory with 2 updates (#12678) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build-tools/package-lock.json | 51 +++++++++++++---------------------- build-tools/package.json | 2 +- 2 files changed, 19 insertions(+), 34 deletions(-) diff --git a/build-tools/package-lock.json b/build-tools/package-lock.json index b2c44ed9b4..0d48345fd5 100644 --- a/build-tools/package-lock.json +++ b/build-tools/package-lock.json @@ -6,7 +6,7 @@ "": { "name": "build-tools", "devDependencies": { - "@redocly/cli": "1.34.4", + "@redocly/cli": "1.34.5", "@sourcemeta/jsonschema": "10.0.0" } }, @@ -472,9 +472,9 @@ } }, "node_modules/@redocly/cli": { - "version": "1.34.4", - "resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.34.4.tgz", - "integrity": "sha512-seH/GgrjSB1EeOsgJ/4Ct6Jk2N7sh12POn/7G8UQFARMyUMJpe1oHtBwT2ndfp4EFCpgBAbZ/82Iw6dwczNxEA==", + "version": "1.34.5", + "resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.34.5.tgz", + "integrity": "sha512-5IEwxs7SGP5KEXjBKLU8Ffdz9by/KqNSeBk6YUVQaGxMXK//uYlTJIPntgUXbo1KAGG2d2q2XF8y4iFz6qNeiw==", "dev": true, "license": "MIT", "dependencies": { @@ -484,14 +484,14 @@ "@opentelemetry/sdk-trace-node": "1.26.0", "@opentelemetry/semantic-conventions": "1.27.0", "@redocly/config": "^0.22.0", - "@redocly/openapi-core": "1.34.4", - "@redocly/respect-core": "1.34.4", + "@redocly/openapi-core": "1.34.5", + "@redocly/respect-core": "1.34.5", "abort-controller": "^3.0.0", "chokidar": "^3.5.1", "colorette": "^1.2.0", "core-js": "^3.32.1", "dotenv": "16.4.7", - "form-data": "^4.0.0", + "form-data": "^4.0.4", "get-port-please": "^3.0.1", "glob": "^7.1.6", "handlebars": "^4.7.6", @@ -522,9 +522,9 @@ "license": "MIT" }, "node_modules/@redocly/openapi-core": { - "version": "1.34.4", - "resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.34.4.tgz", - "integrity": "sha512-hf53xEgpXIgWl3b275PgZU3OTpYh1RoD2LHdIfQ1JzBNTWsiNKczTEsI/4Tmh2N1oq9YcphhSMyk3lDh85oDjg==", + "version": "1.34.5", + "resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.34.5.tgz", + "integrity": "sha512-0EbE8LRbkogtcCXU7liAyC00n9uNG9hJ+eMyHFdUsy9lB/WGqnEBgwjA9q2cyzAVcdTkQqTBBU1XePNnN3OijA==", "dev": true, "license": "MIT", "dependencies": { @@ -544,21 +544,21 @@ } }, "node_modules/@redocly/respect-core": { - "version": "1.34.4", - "resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.34.4.tgz", - "integrity": "sha512-MitKyKyQpsizA4qCVv+MjXL4WltfhFQAoiKiAzrVR1Kusro3VhYb6yJuzoXjiJhR0ukLP5QOP19Vcs7qmj9dZg==", + "version": "1.34.5", + "resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.34.5.tgz", + "integrity": "sha512-GheC/g/QFztPe9UA9LamooSplQuy9pe0Yr8XGTqkz0ahivLDl7svoy/LSQNn1QH3XGtLKwFYMfTwFR2TAYyh5Q==", "dev": true, "license": "MIT", "dependencies": { "@faker-js/faker": "^7.6.0", "@redocly/ajv": "8.11.2", - "@redocly/openapi-core": "1.34.4", + "@redocly/openapi-core": "1.34.5", "better-ajv-errors": "^1.2.0", "colorette": "^2.0.20", "concat-stream": "^2.0.0", "cookie": "^0.7.2", "dotenv": "16.4.7", - "form-data": "4.0.0", + "form-data": "^4.0.4", "jest-diff": "^29.3.1", "jest-matcher-utils": "^29.3.1", "js-yaml": "4.1.0", @@ -582,21 +582,6 @@ "dev": true, "license": "MIT" }, - "node_modules/@redocly/respect-core/node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "dev": true, - "license": "MIT", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/@sinclair/typebox": { "version": "0.27.8", "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", @@ -1345,9 +1330,9 @@ "license": "MIT" }, "node_modules/form-data": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.3.tgz", - "integrity": "sha512-qsITQPfmvMOSAdeyZ+12I1c+CKSstAFAwu+97zrnWAbIr5u8wfsExUzCesVLC8NgHuRUqNN4Zy6UPWUTRGslcA==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz", + "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==", "dev": true, "license": "MIT", "dependencies": { diff --git a/build-tools/package.json b/build-tools/package.json index 000969c672..2dc1359075 100644 --- a/build-tools/package.json +++ b/build-tools/package.json @@ -2,7 +2,7 @@ "name": "build-tools", "private": true, "devDependencies": { - "@redocly/cli": "1.34.4", + "@redocly/cli": "1.34.5", "@sourcemeta/jsonschema": "10.0.0" } } From ab14521ea5c94894f27ff3e939a891817077b219 Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Thu, 24 Jul 2025 13:52:31 +0200 Subject: [PATCH 11/23] fix(compute): Turn off database collector in postgres_exporter (#12684) ## Problem `postgres_exporter` has database collector enabled by default and it doesn't filter out invalid databases, see https://github.com/prometheus-community/postgres_exporter/blob/06a553c8166512c9d9c5ccf257b0f9bba8751dbc/collector/pg_database.go#L67 so if it hits one, it starts spamming logs ``` ERROR: [NEON_SMGR] [reqid d9700000018] could not read db size of db 705302 from page server at lsn 5/A2457EB0 ``` ## Summary of changes We don't use `pg_database_size_bytes` metric anyway, see https://github.com/neondatabase/flux-fleet/blob/5e19b3fd897667b70d9a7ad4aa06df0ca22b49ff/apps/base/compute-metrics/scrape-compute-pg-exporter-neon.yaml#L29 so just turn it off by passing `--no-collector.database`. --- compute/vm-image-spec-bookworm.yaml | 8 +++++++- compute/vm-image-spec-bullseye.yaml | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/compute/vm-image-spec-bookworm.yaml b/compute/vm-image-spec-bookworm.yaml index 267e4c83b5..5f27b6bf9d 100644 --- a/compute/vm-image-spec-bookworm.yaml +++ b/compute/vm-image-spec-bookworm.yaml @@ -26,7 +26,13 @@ commands: - name: postgres-exporter user: nobody sysvInitAction: respawn - shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml' + # Turn off database collector (`--no-collector.database`), we don't use `pg_database_size_bytes` metric anyway, see + # https://github.com/neondatabase/flux-fleet/blob/5e19b3fd897667b70d9a7ad4aa06df0ca22b49ff/apps/base/compute-metrics/scrape-compute-pg-exporter-neon.yaml#L29 + # but it's enabled by default and it doesn't filter out invalid databases, see + # https://github.com/prometheus-community/postgres_exporter/blob/06a553c8166512c9d9c5ccf257b0f9bba8751dbc/collector/pg_database.go#L67 + # so if it hits one, it starts spamming logs + # ERROR: [NEON_SMGR] [reqid d9700000018] could not read db size of db 705302 from page server at lsn 5/A2457EB0 + shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --no-collector.database --config.file=/etc/postgres_exporter.yml' - name: pgbouncer-exporter user: postgres sysvInitAction: respawn diff --git a/compute/vm-image-spec-bullseye.yaml b/compute/vm-image-spec-bullseye.yaml index 2b6e77b656..cf26ace72a 100644 --- a/compute/vm-image-spec-bullseye.yaml +++ b/compute/vm-image-spec-bullseye.yaml @@ -26,7 +26,13 @@ commands: - name: postgres-exporter user: nobody sysvInitAction: respawn - shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml' + # Turn off database collector (`--no-collector.database`), we don't use `pg_database_size_bytes` metric anyway, see + # https://github.com/neondatabase/flux-fleet/blob/5e19b3fd897667b70d9a7ad4aa06df0ca22b49ff/apps/base/compute-metrics/scrape-compute-pg-exporter-neon.yaml#L29 + # but it's enabled by default and it doesn't filter out invalid databases, see + # https://github.com/prometheus-community/postgres_exporter/blob/06a553c8166512c9d9c5ccf257b0f9bba8751dbc/collector/pg_database.go#L67 + # so if it hits one, it starts spamming logs + # ERROR: [NEON_SMGR] [reqid d9700000018] could not read db size of db 705302 from page server at lsn 5/A2457EB0 + shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --no-collector.database --config.file=/etc/postgres_exporter.yml' - name: pgbouncer-exporter user: postgres sysvInitAction: respawn From 8daebb6ed4022e4c984a3ab166850de87d6563f8 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Thu, 24 Jul 2025 13:37:04 +0100 Subject: [PATCH 12/23] [proxy] remove TokioMechanism and HyperMechanism (#12672) Another go at #12341. LKB-2497 We now only need 1 connect mechanism (and 1 more for testing) which saves us some code and complexity. We should be able to remove the final connect mechanism when we create a separate worker task for pglb->compute connections - either via QUIC streams or via in-memory channels. This also now ensures that connect_once always returns a ConnectionError type - something simple enough we can probably define a serialisation for in pglb. * I've abstracted connect_to_compute to always use TcpMechanism and the ProxyConfig. * I've abstracted connect_to_compute_and_auth to perform authentication, managing any retries for stale computes * I had to introduce a separate `managed` function for taking ownership of the compute connection into the Client/Connection pair --- libs/proxy/tokio-postgres2/src/connect.rs | 32 +- libs/proxy/tokio-postgres2/src/lib.rs | 2 +- proxy/src/compute/mod.rs | 19 +- proxy/src/compute/tls.rs | 17 +- proxy/src/console_redirect_proxy.rs | 14 +- proxy/src/control_plane/mod.rs | 11 - proxy/src/proxy/connect_auth.rs | 82 +++++ proxy/src/proxy/connect_compute.rs | 72 +++-- proxy/src/proxy/mod.rs | 70 +---- proxy/src/proxy/retry.rs | 27 +- proxy/src/proxy/tests/mod.rs | 77 ++--- proxy/src/serverless/backend.rs | 360 +++++----------------- 12 files changed, 315 insertions(+), 468 deletions(-) create mode 100644 proxy/src/proxy/connect_auth.rs diff --git a/libs/proxy/tokio-postgres2/src/connect.rs b/libs/proxy/tokio-postgres2/src/connect.rs index 41d95c5f84..ca6f69f049 100644 --- a/libs/proxy/tokio-postgres2/src/connect.rs +++ b/libs/proxy/tokio-postgres2/src/connect.rs @@ -7,7 +7,7 @@ use tokio::net::TcpStream; use tokio::sync::mpsc; use crate::client::SocketConfig; -use crate::config::Host; +use crate::config::{Host, SslMode}; use crate::connect_raw::StartupStream; use crate::connect_socket::connect_socket; use crate::tls::{MakeTlsConnect, TlsConnect}; @@ -45,14 +45,36 @@ where T: TlsConnect, { let socket = connect_socket(host_addr, host, port, config.connect_timeout).await?; - let mut stream = config.tls_and_authenticate(socket, tls).await?; + let stream = config.tls_and_authenticate(socket, tls).await?; + managed( + stream, + host_addr, + host.clone(), + port, + config.ssl_mode, + config.connect_timeout, + ) + .await +} + +pub async fn managed( + mut stream: StartupStream, + host_addr: Option, + host: Host, + port: u16, + ssl_mode: SslMode, + connect_timeout: Option, +) -> Result<(Client, Connection), Error> +where + TlsStream: AsyncRead + AsyncWrite + Unpin, +{ let (process_id, secret_key) = wait_until_ready(&mut stream).await?; let socket_config = SocketConfig { host_addr, - host: host.clone(), + host, port, - connect_timeout: config.connect_timeout, + connect_timeout, }; let (client_tx, conn_rx) = mpsc::unbounded_channel(); @@ -61,7 +83,7 @@ where client_tx, client_rx, socket_config, - config.ssl_mode, + ssl_mode, process_id, secret_key, ); diff --git a/libs/proxy/tokio-postgres2/src/lib.rs b/libs/proxy/tokio-postgres2/src/lib.rs index a858ddca39..da2665095c 100644 --- a/libs/proxy/tokio-postgres2/src/lib.rs +++ b/libs/proxy/tokio-postgres2/src/lib.rs @@ -48,7 +48,7 @@ mod cancel_token; mod client; mod codec; pub mod config; -mod connect; +pub mod connect; pub mod connect_raw; mod connect_socket; mod connect_tls; diff --git a/proxy/src/compute/mod.rs b/proxy/src/compute/mod.rs index 1e3631363e..ca784423ee 100644 --- a/proxy/src/compute/mod.rs +++ b/proxy/src/compute/mod.rs @@ -25,6 +25,7 @@ use crate::control_plane::messages::MetricsAuxInfo; use crate::error::{ReportableError, UserFacingError}; use crate::metrics::{Metrics, NumDbConnectionsGuard}; use crate::pqproto::StartupMessageParams; +use crate::proxy::connect_compute::TlsNegotiation; use crate::proxy::neon_option; use crate::types::Host; @@ -84,6 +85,14 @@ pub(crate) enum ConnectionError { #[error("error acquiring resource permit: {0}")] TooManyConnectionAttempts(#[from] ApiLockError), + + #[cfg(test)] + #[error("retryable: {retryable}, wakeable: {wakeable}, kind: {kind:?}")] + TestError { + retryable: bool, + wakeable: bool, + kind: crate::error::ErrorKind, + }, } impl UserFacingError for ConnectionError { @@ -94,6 +103,8 @@ impl UserFacingError for ConnectionError { "Failed to acquire permit to connect to the database. Too many database connection attempts are currently ongoing.".to_owned() } ConnectionError::TlsError(_) => COULD_NOT_CONNECT.to_owned(), + #[cfg(test)] + ConnectionError::TestError { .. } => self.to_string(), } } } @@ -104,6 +115,8 @@ impl ReportableError for ConnectionError { ConnectionError::TlsError(_) => crate::error::ErrorKind::Compute, ConnectionError::WakeComputeError(e) => e.get_error_kind(), ConnectionError::TooManyConnectionAttempts(e) => e.get_error_kind(), + #[cfg(test)] + ConnectionError::TestError { kind, .. } => *kind, } } } @@ -256,6 +269,7 @@ impl ConnectInfo { async fn connect_raw( &self, config: &ComputeConfig, + tls: TlsNegotiation, ) -> Result<(SocketAddr, MaybeTlsStream), TlsError> { let timeout = config.timeout; @@ -298,7 +312,7 @@ impl ConnectInfo { match connect_once(&*addrs).await { Ok((sockaddr, stream)) => Ok(( sockaddr, - tls::connect_tls(stream, self.ssl_mode, config, host).await?, + tls::connect_tls(stream, self.ssl_mode, config, host, tls).await?, )), Err(err) => { warn!("couldn't connect to compute node at {host}:{port}: {err}"); @@ -329,9 +343,10 @@ impl ConnectInfo { ctx: &RequestContext, aux: &MetricsAuxInfo, config: &ComputeConfig, + tls: TlsNegotiation, ) -> Result { let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute); - let (socket_addr, stream) = self.connect_raw(config).await?; + let (socket_addr, stream) = self.connect_raw(config, tls).await?; drop(pause); tracing::Span::current().record("compute_id", tracing::field::display(&aux.compute_id)); diff --git a/proxy/src/compute/tls.rs b/proxy/src/compute/tls.rs index 000d75fca5..cc1c0d1658 100644 --- a/proxy/src/compute/tls.rs +++ b/proxy/src/compute/tls.rs @@ -7,6 +7,7 @@ use thiserror::Error; use tokio::io::{AsyncRead, AsyncWrite}; use crate::pqproto::request_tls; +use crate::proxy::connect_compute::TlsNegotiation; use crate::proxy::retry::CouldRetry; #[derive(Debug, Error)] @@ -35,6 +36,7 @@ pub async fn connect_tls( mode: SslMode, tls: &T, host: &str, + negotiation: TlsNegotiation, ) -> Result, TlsError> where S: AsyncRead + AsyncWrite + Unpin + Send, @@ -49,12 +51,15 @@ where SslMode::Prefer | SslMode::Require => {} } - if !request_tls(&mut stream).await? { - if SslMode::Require == mode { - return Err(TlsError::Required); - } - - return Ok(MaybeTlsStream::Raw(stream)); + match negotiation { + // No TLS request needed + TlsNegotiation::Direct => {} + // TLS request successful + TlsNegotiation::Postgres if request_tls(&mut stream).await? => {} + // TLS request failed but is required + TlsNegotiation::Postgres if SslMode::Require == mode => return Err(TlsError::Required), + // TLS request failed but is not required + TlsNegotiation::Postgres => return Ok(MaybeTlsStream::Raw(stream)), } Ok(MaybeTlsStream::Tls( diff --git a/proxy/src/console_redirect_proxy.rs b/proxy/src/console_redirect_proxy.rs index 639cd123e1..f947abebc0 100644 --- a/proxy/src/console_redirect_proxy.rs +++ b/proxy/src/console_redirect_proxy.rs @@ -16,8 +16,9 @@ use crate::pglb::ClientRequestError; use crate::pglb::handshake::{HandshakeData, handshake}; use crate::pglb::passthrough::ProxyPassthrough; use crate::protocol2::{ConnectHeader, ConnectionInfo, read_proxy_protocol}; -use crate::proxy::connect_compute::{TcpMechanism, connect_to_compute}; -use crate::proxy::{ErrorSource, forward_compute_params_to_client, send_client_greeting}; +use crate::proxy::{ + ErrorSource, connect_compute, forward_compute_params_to_client, send_client_greeting, +}; use crate::util::run_until_cancelled; pub async fn task_main( @@ -215,14 +216,11 @@ pub(crate) async fn handle_client( }; auth_info.set_startup_params(¶ms, true); - let mut node = connect_to_compute( + let mut node = connect_compute::connect_to_compute( ctx, - &TcpMechanism { - locks: &config.connect_compute_locks, - }, + config, &node_info, - config.wake_compute_retry_config, - &config.connect_to_compute, + connect_compute::TlsNegotiation::Postgres, ) .or_else(|e| async { Err(stream.throw_error(e, Some(ctx)).await) }) .await?; diff --git a/proxy/src/control_plane/mod.rs b/proxy/src/control_plane/mod.rs index 9bbd3f4fb7..5bfa24c92d 100644 --- a/proxy/src/control_plane/mod.rs +++ b/proxy/src/control_plane/mod.rs @@ -17,7 +17,6 @@ use crate::auth::backend::ComputeUserInfo; use crate::auth::backend::jwt::AuthRule; use crate::auth::{AuthError, IpPattern, check_peer_addr_is_in_list}; use crate::cache::{Cached, TimedLru}; -use crate::config::ComputeConfig; use crate::context::RequestContext; use crate::control_plane::messages::{ControlPlaneErrorMessage, MetricsAuxInfo}; use crate::intern::{AccountIdInt, EndpointIdInt, ProjectIdInt}; @@ -72,16 +71,6 @@ pub(crate) struct NodeInfo { pub(crate) aux: MetricsAuxInfo, } -impl NodeInfo { - pub(crate) async fn connect( - &self, - ctx: &RequestContext, - config: &ComputeConfig, - ) -> Result { - self.conn_info.connect(ctx, &self.aux, config).await - } -} - #[derive(Copy, Clone, Default, Debug)] pub(crate) struct AccessBlockerFlags { pub public_access_blocked: bool, diff --git a/proxy/src/proxy/connect_auth.rs b/proxy/src/proxy/connect_auth.rs new file mode 100644 index 0000000000..5a1d1ae314 --- /dev/null +++ b/proxy/src/proxy/connect_auth.rs @@ -0,0 +1,82 @@ +use thiserror::Error; + +use crate::auth::Backend; +use crate::auth::backend::ComputeUserInfo; +use crate::cache::Cache; +use crate::compute::{AuthInfo, ComputeConnection, ConnectionError, PostgresError}; +use crate::config::ProxyConfig; +use crate::context::RequestContext; +use crate::control_plane::client::ControlPlaneClient; +use crate::error::{ReportableError, UserFacingError}; +use crate::proxy::connect_compute::{TlsNegotiation, connect_to_compute}; +use crate::proxy::retry::ShouldRetryWakeCompute; + +#[derive(Debug, Error)] +pub enum AuthError { + #[error(transparent)] + Auth(#[from] PostgresError), + #[error(transparent)] + Connect(#[from] ConnectionError), +} + +impl UserFacingError for AuthError { + fn to_string_client(&self) -> String { + match self { + AuthError::Auth(postgres_error) => postgres_error.to_string_client(), + AuthError::Connect(connection_error) => connection_error.to_string_client(), + } + } +} + +impl ReportableError for AuthError { + fn get_error_kind(&self) -> crate::error::ErrorKind { + match self { + AuthError::Auth(postgres_error) => postgres_error.get_error_kind(), + AuthError::Connect(connection_error) => connection_error.get_error_kind(), + } + } +} + +/// Try to connect to the compute node, retrying if necessary. +#[tracing::instrument(skip_all)] +pub(crate) async fn connect_to_compute_and_auth( + ctx: &RequestContext, + config: &ProxyConfig, + user_info: &Backend<'_, ComputeUserInfo>, + auth_info: AuthInfo, + tls: TlsNegotiation, +) -> Result { + let mut attempt = 0; + + // NOTE: This is messy, but should hopefully be detangled with PGLB. + // We wanted to separate the concerns of **connect** to compute (a PGLB operation), + // from **authenticate** to compute (a NeonKeeper operation). + // + // This unfortunately removed retry handling for one error case where + // the compute was cached, and we connected, but the compute cache was actually stale + // and is associated with the wrong endpoint. We detect this when the **authentication** fails. + // As such, we retry once here if the `authenticate` function fails and the error is valid to retry. + loop { + attempt += 1; + let mut node = connect_to_compute(ctx, config, user_info, tls).await?; + + let res = auth_info.authenticate(ctx, &mut node).await; + match res { + Ok(()) => return Ok(node), + Err(e) => { + if attempt < 2 + && let Backend::ControlPlane(cplane, user_info) = user_info + && let ControlPlaneClient::ProxyV1(cplane_proxy_v1) = &**cplane + && e.should_retry_wake_compute() + { + tracing::warn!(error = ?e, "retrying wake compute"); + let key = user_info.endpoint_cache_key(); + cplane_proxy_v1.caches.node_info.invalidate(&key); + continue; + } + + return Err(e)?; + } + } + } +} diff --git a/proxy/src/proxy/connect_compute.rs b/proxy/src/proxy/connect_compute.rs index ce9774e3eb..1a4e5f77d2 100644 --- a/proxy/src/proxy/connect_compute.rs +++ b/proxy/src/proxy/connect_compute.rs @@ -1,18 +1,15 @@ -use async_trait::async_trait; use tokio::time; use tracing::{debug, info, warn}; use crate::compute::{self, COULD_NOT_CONNECT, ComputeConnection}; -use crate::config::{ComputeConfig, RetryConfig}; +use crate::config::{ComputeConfig, ProxyConfig, RetryConfig}; use crate::context::RequestContext; -use crate::control_plane::errors::WakeComputeError; use crate::control_plane::locks::ApiLocks; use crate::control_plane::{self, NodeInfo}; -use crate::error::ReportableError; use crate::metrics::{ ConnectOutcome, ConnectionFailureKind, Metrics, RetriesMetricGroup, RetryType, }; -use crate::proxy::retry::{CouldRetry, ShouldRetryWakeCompute, retry_after, should_retry}; +use crate::proxy::retry::{ShouldRetryWakeCompute, retry_after, should_retry}; use crate::proxy::wake_compute::{WakeComputeBackend, wake_compute}; use crate::types::Host; @@ -35,29 +32,32 @@ pub(crate) fn invalidate_cache(node_info: control_plane::CachedNodeInfo) -> Node node_info.invalidate() } -#[async_trait] pub(crate) trait ConnectMechanism { type Connection; - type ConnectError: ReportableError; - type Error: From; async fn connect_once( &self, ctx: &RequestContext, node_info: &control_plane::CachedNodeInfo, config: &ComputeConfig, - ) -> Result; + ) -> Result; } -pub(crate) struct TcpMechanism { +struct TcpMechanism<'a> { /// connect_to_compute concurrency lock - pub(crate) locks: &'static ApiLocks, + locks: &'a ApiLocks, + tls: TlsNegotiation, } -#[async_trait] -impl ConnectMechanism for TcpMechanism { +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum TlsNegotiation { + /// TLS is assumed + Direct, + /// We must ask for TLS using the postgres SSLRequest message + Postgres, +} + +impl ConnectMechanism for TcpMechanism<'_> { type Connection = ComputeConnection; - type ConnectError = compute::ConnectionError; - type Error = compute::ConnectionError; #[tracing::instrument(skip_all, fields( pid = tracing::field::Empty, @@ -68,25 +68,47 @@ impl ConnectMechanism for TcpMechanism { ctx: &RequestContext, node_info: &control_plane::CachedNodeInfo, config: &ComputeConfig, - ) -> Result { + ) -> Result { let permit = self.locks.get_permit(&node_info.conn_info.host).await?; - permit.release_result(node_info.connect(ctx, config).await) + + permit.release_result( + node_info + .conn_info + .connect(ctx, &node_info.aux, config, self.tls) + .await, + ) } } /// Try to connect to the compute node, retrying if necessary. #[tracing::instrument(skip_all)] -pub(crate) async fn connect_to_compute( +pub(crate) async fn connect_to_compute( + ctx: &RequestContext, + config: &ProxyConfig, + user_info: &B, + tls: TlsNegotiation, +) -> Result { + connect_to_compute_inner( + ctx, + &TcpMechanism { + locks: &config.connect_compute_locks, + tls, + }, + user_info, + config.wake_compute_retry_config, + &config.connect_to_compute, + ) + .await +} + +/// Try to connect to the compute node, retrying if necessary. +pub(crate) async fn connect_to_compute_inner( ctx: &RequestContext, mechanism: &M, user_info: &B, wake_compute_retry_config: RetryConfig, compute: &ComputeConfig, -) -> Result -where - M::ConnectError: CouldRetry + ShouldRetryWakeCompute + std::fmt::Debug, - M::Error: From, -{ +) -> Result { let mut num_retries = 0; let node_info = wake_compute(&mut num_retries, ctx, user_info, wake_compute_retry_config).await?; @@ -120,7 +142,7 @@ where }, num_retries.into(), ); - return Err(err.into()); + return Err(err); } node_info } else { @@ -161,7 +183,7 @@ where }, num_retries.into(), ); - return Err(e.into()); + return Err(e); } warn!(error = ?e, num_retries, retriable = true, COULD_NOT_CONNECT); diff --git a/proxy/src/proxy/mod.rs b/proxy/src/proxy/mod.rs index 053726505d..b42457cd95 100644 --- a/proxy/src/proxy/mod.rs +++ b/proxy/src/proxy/mod.rs @@ -1,6 +1,7 @@ #[cfg(test)] mod tests; +pub(crate) mod connect_auth; pub(crate) mod connect_compute; pub(crate) mod retry; pub(crate) mod wake_compute; @@ -23,17 +24,13 @@ use tokio::net::TcpStream; use tokio::sync::oneshot; use tracing::Instrument; -use crate::cache::Cache; use crate::cancellation::{CancelClosure, CancellationHandler}; use crate::compute::{ComputeConnection, PostgresError, RustlsStream}; use crate::config::ProxyConfig; use crate::context::RequestContext; -use crate::control_plane::client::ControlPlaneClient; pub use crate::pglb::copy_bidirectional::{ErrorSource, copy_bidirectional_client_compute}; use crate::pglb::{ClientMode, ClientRequestError}; use crate::pqproto::{BeMessage, CancelKeyData, StartupMessageParams}; -use crate::proxy::connect_compute::{TcpMechanism, connect_to_compute}; -use crate::proxy::retry::ShouldRetryWakeCompute; use crate::rate_limiter::EndpointRateLimiter; use crate::stream::{PqStream, Stream}; use crate::types::EndpointCacheKey; @@ -95,61 +92,24 @@ pub(crate) async fn handle_client( let mut auth_info = compute::AuthInfo::with_auth_keys(creds.keys); auth_info.set_startup_params(params, params_compat); - let mut node; - let mut attempt = 0; - let connect = TcpMechanism { - locks: &config.connect_compute_locks, - }; let backend = auth::Backend::ControlPlane(cplane, creds.info); - // NOTE: This is messy, but should hopefully be detangled with PGLB. - // We wanted to separate the concerns of **connect** to compute (a PGLB operation), - // from **authenticate** to compute (a NeonKeeper operation). - // - // This unfortunately removed retry handling for one error case where - // the compute was cached, and we connected, but the compute cache was actually stale - // and is associated with the wrong endpoint. We detect this when the **authentication** fails. - // As such, we retry once here if the `authenticate` function fails and the error is valid to retry. - loop { - attempt += 1; + // TODO: callback to pglb + let res = connect_auth::connect_to_compute_and_auth( + ctx, + config, + &backend, + auth_info, + connect_compute::TlsNegotiation::Postgres, + ) + .await; - // TODO: callback to pglb - let res = connect_to_compute( - ctx, - &connect, - &backend, - config.wake_compute_retry_config, - &config.connect_to_compute, - ) - .await; + let mut node = match res { + Ok(node) => node, + Err(e) => Err(client.throw_error(e, Some(ctx)).await)?, + }; - match res { - Ok(n) => node = n, - Err(e) => return Err(client.throw_error(e, Some(ctx)).await)?, - } - - let auth::Backend::ControlPlane(cplane, user_info) = &backend else { - unreachable!("ensured above"); - }; - - let res = auth_info.authenticate(ctx, &mut node).await; - match res { - Ok(()) => { - send_client_greeting(ctx, &config.greetings, client); - break; - } - Err(e) if attempt < 2 && e.should_retry_wake_compute() => { - tracing::warn!(error = ?e, "retrying wake compute"); - - #[allow(irrefutable_let_patterns)] - if let ControlPlaneClient::ProxyV1(cplane_proxy_v1) = &**cplane { - let key = user_info.endpoint_cache_key(); - cplane_proxy_v1.caches.node_info.invalidate(&key); - } - } - Err(e) => Err(client.throw_error(e, Some(ctx)).await)?, - } - } + send_client_greeting(ctx, &config.greetings, client); let auth::Backend::ControlPlane(_, user_info) = backend else { unreachable!("ensured above"); diff --git a/proxy/src/proxy/retry.rs b/proxy/src/proxy/retry.rs index b06c3be72c..876d252517 100644 --- a/proxy/src/proxy/retry.rs +++ b/proxy/src/proxy/retry.rs @@ -31,18 +31,6 @@ impl CouldRetry for io::Error { } } -impl CouldRetry for postgres_client::error::DbError { - fn could_retry(&self) -> bool { - use postgres_client::error::SqlState; - matches!( - self.code(), - &SqlState::CONNECTION_FAILURE - | &SqlState::CONNECTION_EXCEPTION - | &SqlState::CONNECTION_DOES_NOT_EXIST - | &SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, - ) - } -} impl ShouldRetryWakeCompute for postgres_client::error::DbError { fn should_retry_wake_compute(&self) -> bool { use postgres_client::error::SqlState; @@ -73,17 +61,6 @@ impl ShouldRetryWakeCompute for postgres_client::error::DbError { } } -impl CouldRetry for postgres_client::Error { - fn could_retry(&self) -> bool { - if let Some(io_err) = self.source().and_then(|x| x.downcast_ref()) { - io::Error::could_retry(io_err) - } else if let Some(db_err) = self.source().and_then(|x| x.downcast_ref()) { - postgres_client::error::DbError::could_retry(db_err) - } else { - false - } - } -} impl ShouldRetryWakeCompute for postgres_client::Error { fn should_retry_wake_compute(&self) -> bool { if let Some(db_err) = self.source().and_then(|x| x.downcast_ref()) { @@ -102,6 +79,8 @@ impl CouldRetry for compute::ConnectionError { compute::ConnectionError::TlsError(err) => err.could_retry(), compute::ConnectionError::WakeComputeError(err) => err.could_retry(), compute::ConnectionError::TooManyConnectionAttempts(_) => false, + #[cfg(test)] + compute::ConnectionError::TestError { retryable, .. } => *retryable, } } } @@ -110,6 +89,8 @@ impl ShouldRetryWakeCompute for compute::ConnectionError { match self { // the cache entry was not checked for validity compute::ConnectionError::TooManyConnectionAttempts(_) => false, + #[cfg(test)] + compute::ConnectionError::TestError { wakeable, .. } => *wakeable, _ => true, } } diff --git a/proxy/src/proxy/tests/mod.rs b/proxy/src/proxy/tests/mod.rs index f8bff450e1..d1084628b1 100644 --- a/proxy/src/proxy/tests/mod.rs +++ b/proxy/src/proxy/tests/mod.rs @@ -24,13 +24,13 @@ use crate::context::RequestContext; use crate::control_plane::client::{ControlPlaneClient, TestControlPlaneClient}; use crate::control_plane::messages::{ControlPlaneErrorMessage, Details, MetricsAuxInfo, Status}; use crate::control_plane::{self, CachedNodeInfo, NodeInfo, NodeInfoCache}; -use crate::error::{ErrorKind, ReportableError}; +use crate::error::ErrorKind; use crate::pglb::ERR_INSECURE_CONNECTION; use crate::pglb::handshake::{HandshakeData, handshake}; use crate::pqproto::BeMessage; use crate::proxy::NeonOptions; -use crate::proxy::connect_compute::{ConnectMechanism, connect_to_compute}; -use crate::proxy::retry::{ShouldRetryWakeCompute, retry_after}; +use crate::proxy::connect_compute::{ConnectMechanism, connect_to_compute_inner}; +use crate::proxy::retry::retry_after; use crate::stream::{PqStream, Stream}; use crate::tls::client_config::compute_client_config_with_certs; use crate::tls::server_config::CertResolver; @@ -430,71 +430,36 @@ impl TestConnectMechanism { #[derive(Debug)] struct TestConnection; -#[derive(Debug)] -struct TestConnectError { - retryable: bool, - wakeable: bool, - kind: crate::error::ErrorKind, -} - -impl ReportableError for TestConnectError { - fn get_error_kind(&self) -> crate::error::ErrorKind { - self.kind - } -} - -impl std::fmt::Display for TestConnectError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self:?}") - } -} - -impl std::error::Error for TestConnectError {} - -impl CouldRetry for TestConnectError { - fn could_retry(&self) -> bool { - self.retryable - } -} -impl ShouldRetryWakeCompute for TestConnectError { - fn should_retry_wake_compute(&self) -> bool { - self.wakeable - } -} - -#[async_trait] impl ConnectMechanism for TestConnectMechanism { type Connection = TestConnection; - type ConnectError = TestConnectError; - type Error = anyhow::Error; async fn connect_once( &self, _ctx: &RequestContext, _node_info: &control_plane::CachedNodeInfo, _config: &ComputeConfig, - ) -> Result { + ) -> Result { let mut counter = self.counter.lock().unwrap(); let action = self.sequence[*counter]; *counter += 1; match action { ConnectAction::Connect => Ok(TestConnection), - ConnectAction::Retry => Err(TestConnectError { + ConnectAction::Retry => Err(compute::ConnectionError::TestError { retryable: true, wakeable: true, kind: ErrorKind::Compute, }), - ConnectAction::RetryNoWake => Err(TestConnectError { + ConnectAction::RetryNoWake => Err(compute::ConnectionError::TestError { retryable: true, wakeable: false, kind: ErrorKind::Compute, }), - ConnectAction::Fail => Err(TestConnectError { + ConnectAction::Fail => Err(compute::ConnectionError::TestError { retryable: false, wakeable: true, kind: ErrorKind::Compute, }), - ConnectAction::FailNoWake => Err(TestConnectError { + ConnectAction::FailNoWake => Err(compute::ConnectionError::TestError { retryable: false, wakeable: false, kind: ErrorKind::Compute, @@ -620,7 +585,7 @@ async fn connect_to_compute_success() { let mechanism = TestConnectMechanism::new(vec![Wake, Connect]); let user_info = helper_create_connect_info(&mechanism); let config = config(); - connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config) + connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config) .await .unwrap(); mechanism.verify(); @@ -634,7 +599,7 @@ async fn connect_to_compute_retry() { let mechanism = TestConnectMechanism::new(vec![Wake, Retry, Wake, Connect]); let user_info = helper_create_connect_info(&mechanism); let config = config(); - connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config) + connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config) .await .unwrap(); mechanism.verify(); @@ -649,7 +614,7 @@ async fn connect_to_compute_non_retry_1() { let mechanism = TestConnectMechanism::new(vec![Wake, Retry, Wake, Fail]); let user_info = helper_create_connect_info(&mechanism); let config = config(); - connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config) + connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config) .await .unwrap_err(); mechanism.verify(); @@ -664,7 +629,7 @@ async fn connect_to_compute_non_retry_2() { let mechanism = TestConnectMechanism::new(vec![Wake, Fail, Wake, Connect]); let user_info = helper_create_connect_info(&mechanism); let config = config(); - connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config) + connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config) .await .unwrap(); mechanism.verify(); @@ -686,7 +651,7 @@ async fn connect_to_compute_non_retry_3() { backoff_factor: 2.0, }; let config = config(); - connect_to_compute( + connect_to_compute_inner( &ctx, &mechanism, &user_info, @@ -707,7 +672,7 @@ async fn wake_retry() { let mechanism = TestConnectMechanism::new(vec![WakeRetry, Wake, Connect]); let user_info = helper_create_connect_info(&mechanism); let config = config(); - connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config) + connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config) .await .unwrap(); mechanism.verify(); @@ -722,7 +687,7 @@ async fn wake_non_retry() { let mechanism = TestConnectMechanism::new(vec![WakeRetry, WakeFail]); let user_info = helper_create_connect_info(&mechanism); let config = config(); - connect_to_compute(&ctx, &mechanism, &user_info, config.retry, &config) + connect_to_compute_inner(&ctx, &mechanism, &user_info, config.retry, &config) .await .unwrap_err(); mechanism.verify(); @@ -741,7 +706,7 @@ async fn fail_but_wake_invalidates_cache() { let user = helper_create_connect_info(&mech); let cfg = config(); - connect_to_compute(&ctx, &mech, &user, cfg.retry, &cfg) + connect_to_compute_inner(&ctx, &mech, &user, cfg.retry, &cfg) .await .unwrap(); @@ -762,7 +727,7 @@ async fn fail_no_wake_skips_cache_invalidation() { let user = helper_create_connect_info(&mech); let cfg = config(); - connect_to_compute(&ctx, &mech, &user, cfg.retry, &cfg) + connect_to_compute_inner(&ctx, &mech, &user, cfg.retry, &cfg) .await .unwrap(); @@ -783,7 +748,7 @@ async fn retry_but_wake_invalidates_cache() { let user_info = helper_create_connect_info(&mechanism); let cfg = config(); - connect_to_compute(&ctx, &mechanism, &user_info, cfg.retry, &cfg) + connect_to_compute_inner(&ctx, &mechanism, &user_info, cfg.retry, &cfg) .await .unwrap(); mechanism.verify(); @@ -806,7 +771,7 @@ async fn retry_no_wake_skips_invalidation() { let user_info = helper_create_connect_info(&mechanism); let cfg = config(); - connect_to_compute(&ctx, &mechanism, &user_info, cfg.retry, &cfg) + connect_to_compute_inner(&ctx, &mechanism, &user_info, cfg.retry, &cfg) .await .unwrap_err(); mechanism.verify(); @@ -829,7 +794,7 @@ async fn retry_no_wake_error_fast() { let user_info = helper_create_connect_info(&mechanism); let cfg = config(); - connect_to_compute(&ctx, &mechanism, &user_info, cfg.retry, &cfg) + connect_to_compute_inner(&ctx, &mechanism, &user_info, cfg.retry, &cfg) .await .unwrap_err(); mechanism.verify(); @@ -852,7 +817,7 @@ async fn retry_cold_wake_skips_invalidation() { let user_info = helper_create_connect_info(&mechanism); let cfg = config(); - connect_to_compute(&ctx, &mechanism, &user_info, cfg.retry, &cfg) + connect_to_compute_inner(&ctx, &mechanism, &user_info, cfg.retry, &cfg) .await .unwrap(); mechanism.verify(); diff --git a/proxy/src/serverless/backend.rs b/proxy/src/serverless/backend.rs index 31df7eb9f1..0987b6927f 100644 --- a/proxy/src/serverless/backend.rs +++ b/proxy/src/serverless/backend.rs @@ -1,17 +1,11 @@ -use std::io; -use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use std::time::Duration; -use async_trait::async_trait; use ed25519_dalek::SigningKey; use hyper_util::rt::{TokioExecutor, TokioIo, TokioTimer}; use jose_jwk::jose_b64; -use postgres_client::config::SslMode; +use postgres_client::maybe_tls_stream::MaybeTlsStream; use rand_core::OsRng; -use rustls::pki_types::{DnsName, ServerName}; -use tokio::net::{TcpStream, lookup_host}; -use tokio_rustls::TlsConnector; use tracing::field::display; use tracing::{debug, info}; @@ -21,23 +15,22 @@ use super::conn_pool_lib::{Client, ConnInfo, EndpointConnPool, GlobalConnPool}; use super::http_conn_pool::{self, HttpConnPool, LocalProxyClient, poll_http2_client}; use super::local_conn_pool::{self, EXT_NAME, EXT_SCHEMA, EXT_VERSION, LocalConnPool}; use crate::auth::backend::local::StaticAuthRules; -use crate::auth::backend::{ComputeCredentialKeys, ComputeCredentials, ComputeUserInfo}; +use crate::auth::backend::{ComputeCredentials, ComputeUserInfo}; use crate::auth::{self, AuthError}; +use crate::compute; use crate::compute_ctl::{ ComputeCtlError, ExtensionInstallRequest, Privilege, SetRoleGrantsRequest, }; -use crate::config::{ComputeConfig, ProxyConfig}; +use crate::config::ProxyConfig; use crate::context::RequestContext; -use crate::control_plane::CachedNodeInfo; use crate::control_plane::client::ApiLockError; use crate::control_plane::errors::{GetAuthInfoError, WakeComputeError}; -use crate::control_plane::locks::ApiLocks; use crate::error::{ErrorKind, ReportableError, UserFacingError}; use crate::intern::EndpointIdInt; -use crate::proxy::connect_compute::ConnectMechanism; -use crate::proxy::retry::{CouldRetry, ShouldRetryWakeCompute}; +use crate::pqproto::StartupMessageParams; +use crate::proxy::{connect_auth, connect_compute}; use crate::rate_limiter::EndpointRateLimiter; -use crate::types::{EndpointId, Host, LOCAL_PROXY_SUFFIX}; +use crate::types::{EndpointId, LOCAL_PROXY_SUFFIX}; pub(crate) struct PoolingBackend { pub(crate) http_conn_pool: @@ -186,20 +179,42 @@ impl PoolingBackend { tracing::Span::current().record("conn_id", display(conn_id)); info!(%conn_id, "pool: opening a new connection '{conn_info}'"); let backend = self.auth_backend.as_ref().map(|()| keys.info); - crate::proxy::connect_compute::connect_to_compute( + + let mut params = StartupMessageParams::default(); + params.insert("database", &conn_info.dbname); + params.insert("user", &conn_info.user_info.user); + + let mut auth_info = compute::AuthInfo::with_auth_keys(keys.keys); + auth_info.set_startup_params(¶ms, true); + + let node = connect_auth::connect_to_compute_and_auth( ctx, - &TokioMechanism { - conn_id, - conn_info, - pool: self.pool.clone(), - locks: &self.config.connect_compute_locks, - keys: keys.keys, - }, + self.config, &backend, - self.config.wake_compute_retry_config, - &self.config.connect_to_compute, + auth_info, + connect_compute::TlsNegotiation::Postgres, ) - .await + .await?; + + let (client, connection) = postgres_client::connect::managed( + node.stream, + Some(node.socket_addr.ip()), + postgres_client::config::Host::Tcp(node.hostname.to_string()), + node.socket_addr.port(), + node.ssl_mode, + Some(self.config.connect_to_compute.timeout), + ) + .await?; + + Ok(poll_client( + self.pool.clone(), + ctx, + conn_info, + client, + connection, + conn_id, + node.aux, + )) } // Wake up the destination if needed @@ -228,19 +243,38 @@ impl PoolingBackend { )), options: conn_info.user_info.options.clone(), }); - crate::proxy::connect_compute::connect_to_compute( + + let node = connect_compute::connect_to_compute( ctx, - &HyperMechanism { - conn_id, - conn_info, - pool: self.http_conn_pool.clone(), - locks: &self.config.connect_compute_locks, - }, + self.config, &backend, - self.config.wake_compute_retry_config, - &self.config.connect_to_compute, + connect_compute::TlsNegotiation::Direct, ) - .await + .await?; + + let stream = match node.stream.into_framed().into_inner() { + MaybeTlsStream::Raw(s) => Box::pin(s) as AsyncRW, + MaybeTlsStream::Tls(s) => Box::pin(s) as AsyncRW, + }; + + let (client, connection) = hyper::client::conn::http2::Builder::new(TokioExecutor::new()) + .timer(TokioTimer::new()) + .keep_alive_interval(Duration::from_secs(20)) + .keep_alive_while_idle(true) + .keep_alive_timeout(Duration::from_secs(5)) + .handshake(TokioIo::new(stream)) + .await + .map_err(LocalProxyConnError::H2)?; + + Ok(poll_http2_client( + self.http_conn_pool.clone(), + ctx, + &conn_info, + client, + connection, + conn_id, + node.aux.clone(), + )) } /// Connect to postgres over localhost. @@ -380,6 +414,8 @@ fn create_random_jwk() -> (SigningKey, jose_jwk::Key) { pub(crate) enum HttpConnError { #[error("pooled connection closed at inconsistent state")] ConnectionClosedAbruptly(#[from] tokio::sync::watch::error::SendError), + #[error("could not connect to compute")] + ConnectError(#[from] compute::ConnectionError), #[error("could not connect to postgres in compute")] PostgresConnectionError(#[from] postgres_client::Error), #[error("could not connect to local-proxy in compute")] @@ -399,10 +435,19 @@ pub(crate) enum HttpConnError { TooManyConnectionAttempts(#[from] ApiLockError), } +impl From for HttpConnError { + fn from(value: connect_auth::AuthError) -> Self { + match value { + connect_auth::AuthError::Auth(compute::PostgresError::Postgres(error)) => { + Self::PostgresConnectionError(error) + } + connect_auth::AuthError::Connect(error) => Self::ConnectError(error), + } + } +} + #[derive(Debug, thiserror::Error)] pub(crate) enum LocalProxyConnError { - #[error("error with connection to local-proxy")] - Io(#[source] std::io::Error), #[error("could not establish h2 connection")] H2(#[from] hyper::Error), } @@ -410,6 +455,7 @@ pub(crate) enum LocalProxyConnError { impl ReportableError for HttpConnError { fn get_error_kind(&self) -> ErrorKind { match self { + HttpConnError::ConnectError(_) => ErrorKind::Compute, HttpConnError::ConnectionClosedAbruptly(_) => ErrorKind::Compute, HttpConnError::PostgresConnectionError(p) => { if p.as_db_error().is_some() { @@ -434,6 +480,7 @@ impl ReportableError for HttpConnError { impl UserFacingError for HttpConnError { fn to_string_client(&self) -> String { match self { + HttpConnError::ConnectError(p) => p.to_string_client(), HttpConnError::ConnectionClosedAbruptly(_) => self.to_string(), HttpConnError::PostgresConnectionError(p) => p.to_string(), HttpConnError::LocalProxyConnectionError(p) => p.to_string(), @@ -449,36 +496,9 @@ impl UserFacingError for HttpConnError { } } -impl CouldRetry for HttpConnError { - fn could_retry(&self) -> bool { - match self { - HttpConnError::PostgresConnectionError(e) => e.could_retry(), - HttpConnError::LocalProxyConnectionError(e) => e.could_retry(), - HttpConnError::ComputeCtl(_) => false, - HttpConnError::ConnectionClosedAbruptly(_) => false, - HttpConnError::JwtPayloadError(_) => false, - HttpConnError::GetAuthInfo(_) => false, - HttpConnError::AuthError(_) => false, - HttpConnError::WakeCompute(_) => false, - HttpConnError::TooManyConnectionAttempts(_) => false, - } - } -} -impl ShouldRetryWakeCompute for HttpConnError { - fn should_retry_wake_compute(&self) -> bool { - match self { - HttpConnError::PostgresConnectionError(e) => e.should_retry_wake_compute(), - // we never checked cache validity - HttpConnError::TooManyConnectionAttempts(_) => false, - _ => true, - } - } -} - impl ReportableError for LocalProxyConnError { fn get_error_kind(&self) -> ErrorKind { match self { - LocalProxyConnError::Io(_) => ErrorKind::Compute, LocalProxyConnError::H2(_) => ErrorKind::Compute, } } @@ -489,215 +509,3 @@ impl UserFacingError for LocalProxyConnError { "Could not establish HTTP connection to the database".to_string() } } - -impl CouldRetry for LocalProxyConnError { - fn could_retry(&self) -> bool { - match self { - LocalProxyConnError::Io(_) => false, - LocalProxyConnError::H2(_) => false, - } - } -} -impl ShouldRetryWakeCompute for LocalProxyConnError { - fn should_retry_wake_compute(&self) -> bool { - match self { - LocalProxyConnError::Io(_) => false, - LocalProxyConnError::H2(_) => false, - } - } -} - -struct TokioMechanism { - pool: Arc>>, - conn_info: ConnInfo, - conn_id: uuid::Uuid, - keys: ComputeCredentialKeys, - - /// connect_to_compute concurrency lock - locks: &'static ApiLocks, -} - -#[async_trait] -impl ConnectMechanism for TokioMechanism { - type Connection = Client; - type ConnectError = HttpConnError; - type Error = HttpConnError; - - async fn connect_once( - &self, - ctx: &RequestContext, - node_info: &CachedNodeInfo, - compute_config: &ComputeConfig, - ) -> Result { - let permit = self.locks.get_permit(&node_info.conn_info.host).await?; - - let mut config = node_info.conn_info.to_postgres_client_config(); - let config = config - .user(&self.conn_info.user_info.user) - .dbname(&self.conn_info.dbname) - .connect_timeout(compute_config.timeout); - - if let ComputeCredentialKeys::AuthKeys(auth_keys) = self.keys { - config.auth_keys(auth_keys); - } - - let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute); - let res = config.connect(compute_config).await; - drop(pause); - let (client, connection) = permit.release_result(res)?; - - tracing::Span::current().record("pid", tracing::field::display(client.get_process_id())); - tracing::Span::current().record( - "compute_id", - tracing::field::display(&node_info.aux.compute_id), - ); - - if let Some(query_id) = ctx.get_testodrome_id() { - info!("latency={}, query_id={}", ctx.get_proxy_latency(), query_id); - } - - Ok(poll_client( - self.pool.clone(), - ctx, - self.conn_info.clone(), - client, - connection, - self.conn_id, - node_info.aux.clone(), - )) - } -} - -struct HyperMechanism { - pool: Arc>>, - conn_info: ConnInfo, - conn_id: uuid::Uuid, - - /// connect_to_compute concurrency lock - locks: &'static ApiLocks, -} - -#[async_trait] -impl ConnectMechanism for HyperMechanism { - type Connection = http_conn_pool::Client; - type ConnectError = HttpConnError; - type Error = HttpConnError; - - async fn connect_once( - &self, - ctx: &RequestContext, - node_info: &CachedNodeInfo, - config: &ComputeConfig, - ) -> Result { - let host_addr = node_info.conn_info.host_addr; - let host = &node_info.conn_info.host; - let permit = self.locks.get_permit(host).await?; - - let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute); - - let tls = if node_info.conn_info.ssl_mode == SslMode::Disable { - None - } else { - Some(&config.tls) - }; - - let port = node_info.conn_info.port; - let res = connect_http2(host_addr, host, port, config.timeout, tls).await; - drop(pause); - let (client, connection) = permit.release_result(res)?; - - tracing::Span::current().record( - "compute_id", - tracing::field::display(&node_info.aux.compute_id), - ); - - if let Some(query_id) = ctx.get_testodrome_id() { - info!("latency={}, query_id={}", ctx.get_proxy_latency(), query_id); - } - - Ok(poll_http2_client( - self.pool.clone(), - ctx, - &self.conn_info, - client, - connection, - self.conn_id, - node_info.aux.clone(), - )) - } -} - -async fn connect_http2( - host_addr: Option, - host: &str, - port: u16, - timeout: Duration, - tls: Option<&Arc>, -) -> Result< - ( - http_conn_pool::LocalProxyClient, - http_conn_pool::LocalProxyConnection, - ), - LocalProxyConnError, -> { - let addrs = match host_addr { - Some(addr) => vec![SocketAddr::new(addr, port)], - None => lookup_host((host, port)) - .await - .map_err(LocalProxyConnError::Io)? - .collect(), - }; - let mut last_err = None; - - let mut addrs = addrs.into_iter(); - let stream = loop { - let Some(addr) = addrs.next() else { - return Err(last_err.unwrap_or_else(|| { - LocalProxyConnError::Io(io::Error::new( - io::ErrorKind::InvalidInput, - "could not resolve any addresses", - )) - })); - }; - - match tokio::time::timeout(timeout, TcpStream::connect(addr)).await { - Ok(Ok(stream)) => { - stream.set_nodelay(true).map_err(LocalProxyConnError::Io)?; - break stream; - } - Ok(Err(e)) => { - last_err = Some(LocalProxyConnError::Io(e)); - } - Err(e) => { - last_err = Some(LocalProxyConnError::Io(io::Error::new( - io::ErrorKind::TimedOut, - e, - ))); - } - } - }; - - let stream = if let Some(tls) = tls { - let host = DnsName::try_from(host) - .map_err(io::Error::other) - .map_err(LocalProxyConnError::Io)? - .to_owned(); - let stream = TlsConnector::from(tls.clone()) - .connect(ServerName::DnsName(host), stream) - .await - .map_err(LocalProxyConnError::Io)?; - Box::pin(stream) as AsyncRW - } else { - Box::pin(stream) as AsyncRW - }; - - let (client, connection) = hyper::client::conn::http2::Builder::new(TokioExecutor::new()) - .timer(TokioTimer::new()) - .keep_alive_interval(Duration::from_secs(20)) - .keep_alive_while_idle(true) - .keep_alive_timeout(Duration::from_secs(5)) - .handshake(TokioIo::new(stream)) - .await?; - - Ok((client, connection)) -} From 643448b1a23f48bee3e0c9c679ad9b787e46b73d Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Thu, 24 Jul 2025 16:00:22 +0200 Subject: [PATCH 13/23] test_hot_standby_gc: work around standby_horizon-related flakiness/raciness uncovered by #12431 (#12704) PR #12431 set initial lease deadline = 0s for tests. This turned test_hot_standby_gc flaky because it now runs GC: it started failing with `tried to request a page version that was garbage collected` because the replica reads below applied gc cutoff. The leading theory is that, we run the timeline_gc() before the first standby_horizon push arrives at PS. That is definitively a thing that can happen with the current standby_horizon mechanism, and it's now tracked as such in https://databricks.atlassian.net/browse/LKB-2499. We don't have logs to confirm this theory though, but regardless, try the fix in this PR and see if it stabilizes things. Refs - flaky test issue: https://databricks.atlassian.net/browse/LKB-2465 ## Problem ## Summary of changes --- test_runner/regress/test_hot_standby.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/test_runner/regress/test_hot_standby.py b/test_runner/regress/test_hot_standby.py index 1ff61ce8dc..a329a5e842 100644 --- a/test_runner/regress/test_hot_standby.py +++ b/test_runner/regress/test_hot_standby.py @@ -133,6 +133,9 @@ def test_hot_standby_gc(neon_env_builder: NeonEnvBuilder, pause_apply: bool): tenant_conf = { # set PITR interval to be small, so we can do GC "pitr_interval": "0 s", + # we want to control gc and checkpoint frequency precisely + "gc_period": "0s", + "compaction_period": "0s", } env = neon_env_builder.init_start(initial_tenant_conf=tenant_conf) timeline_id = env.initial_timeline @@ -186,6 +189,23 @@ def test_hot_standby_gc(neon_env_builder: NeonEnvBuilder, pause_apply: bool): client = pageserver.http_client() client.timeline_checkpoint(tenant_shard_id, timeline_id) client.timeline_compact(tenant_shard_id, timeline_id) + # Wait for standby horizon to get propagated. + # This shouldn't be necessary, but the current mechanism for + # standby_horizon propagation is imperfect. Detailed + # description in https://databricks.atlassian.net/browse/LKB-2499 + while True: + val = client.get_metric_value( + "pageserver_standby_horizon", + { + "tenant_id": str(tenant_shard_id.tenant_id), + "shard_id": str(tenant_shard_id.shard_index), + "timeline_id": str(timeline_id), + }, + ) + log.info("waiting for next standby_horizon push from safekeeper, {val=}") + if val != 0: + break + time.sleep(0.1) client.timeline_gc(tenant_shard_id, timeline_id, 0) # Re-execute the query. The GetPage requests that this From 90cd5a5be85100dd27a782d0863db19bba498ef9 Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Thu, 24 Jul 2025 09:26:21 -0500 Subject: [PATCH 14/23] [BRC-1778] Add mechanism to `compute_ctl` to pull a new config (#12711) ## Problem We have been dealing with a number of issues with the SC compute notification mechanism. Various race conditions exist in the PG/HCC/cplane/PS distributed system, and relying on the SC to send notifications to the compute node to notify it of PS changes is not robust. We decided to pursue a more robust option where the compute node itself discovers whether it may be pointing to the incorrect PSs and proactively reconfigure itself if issues are suspected. ## Summary of changes To support this self-healing reconfiguration mechanism several pieces are needed. This PR adds a mechanism to `compute_ctl` called "refresh configuration", where the compute node reaches out to the control plane to pull a new config and reconfigure PG using the new config, instead of listening for a notification message containing a config to arrive from the control plane. Main changes to compute_ctl: 1. The `compute_ctl` state machine now has a new State, `RefreshConfigurationPending`. The compute node may enter this state upon receiving a signal that it may be using the incorrect page servers. 2. Upon entering the `RefreshConfigurationPending` state, the background configurator thread in `compute_ctl` wakes up, pulls a new config from the control plane, and reconfigures PG (with `pg_ctl reload`) according to the new config. 3. The compute node may enter the new `RefreshConfigurationPending` state from `Running` or `Failed` states. If the configurator managed to configure the compute node successfully, it will enter the `Running` state, otherwise, it stays in `RefreshConfigurationPending` and the configurator thread will wait for the next notification if an incorrect config is still suspected. 4. Added various plumbing in `compute_ctl` data structures to allow the configurator thread to perform the config fetch. The "incorrect config suspected" notification is delivered using a HTTP endpoint, `/refresh_configuration`, on `compute_ctl`. This endpoint is currently not called by anyone other than the tests. In a follow up PR I will set up some code in the PG extension/libpagestore to call this HTTP endpoint whenever PG suspects that it is pointing to the wrong page servers. ## How is this tested? Modified `test_runner/regress/test_change_pageserver.py` to add a scenario where we use the new `/refresh_configuration` mechanism instead of the existing `/configure` mechanism (which requires us sending a full config to compute_ctl) to have the compute node reload and reconfigure its pageservers. I took one shortcut to reduce the scope of this change when it comes to testing: the compute node uses a local config file instead of pulling a config over the network from the HCC. This simplifies the test setup in the following ways: * The existing test framework is set up to use local config files for compute nodes only, so it's convenient if I just stick with it. * The HCC today generates a compute config with production settings (e.g., assuming 4 CPUs, 16GB RAM, with local file caches), which is probably not suitable in tests. We may need to add another test-only endpoint config to the control plane to make this work. The config-fetch part of the code is relatively straightforward (and well-covered in both production and the KIND test) so it is probably fine to replace it with loading from the local config file for these integration tests. In addition to making sure that the tests pass, I also manually inspected the logs to make sure that the compute node is indeed reloading the config using the new mechanism instead of going down the old `/configure` path (it turns out the test has bugs which causes compute `/configure` messages to be sent despite the test intending to disable/blackhole them). ```test 2024-09-24T18:53:29.573650Z INFO http request{otel.name=/refresh_configuration http.method=POST}: serving /refresh_configuration POST request 2024-09-24T18:53:29.573689Z INFO configurator_main_loop: compute node suspects its configuration is out of date, now refreshing configuration 2024-09-24T18:53:29.573706Z INFO configurator_main_loop: reloading config.json from path: /workspaces/hadron/test_output/test_change_pageserver_using_refresh[release-pg16]/repo/endpoints/ep-1/spec.json PG:2024-09-24 18:53:29.574 GMT [52799] LOG: received SIGHUP, reloading configuration files PG:2024-09-24 18:53:29.575 GMT [52799] LOG: parameter "neon.extension_server_port" cannot be changed without restarting the server PG:2024-09-24 18:53:29.575 GMT [52799] LOG: parameter "neon.pageserver_connstring" changed to "postgresql://no_user@localhost:15008" ... ``` Co-authored-by: William Huang --- compute_tools/README.md | 6 + compute_tools/src/bin/compute_ctl.rs | 3 + compute_tools/src/compute.rs | 12 +- compute_tools/src/configurator.rs | 104 ++++++++++++++++-- .../src/http/routes/refresh_configuration.rs | 20 ++-- compute_tools/src/http/server.rs | 4 +- control_plane/src/bin/neon_local.rs | 65 +++++++++++ control_plane/src/endpoint.rs | 53 ++++++++- libs/compute_api/src/responses.rs | 5 + test_runner/fixtures/neon_cli.py | 25 +++++ test_runner/fixtures/neon_fixtures.py | 8 ++ test_runner/regress/test_change_pageserver.py | 42 ++++++- 12 files changed, 315 insertions(+), 32 deletions(-) diff --git a/compute_tools/README.md b/compute_tools/README.md index 49f1368f0e..446b441c18 100644 --- a/compute_tools/README.md +++ b/compute_tools/README.md @@ -52,8 +52,14 @@ stateDiagram-v2 Init --> Running : Started Postgres Running --> TerminationPendingFast : Requested termination Running --> TerminationPendingImmediate : Requested termination + Running --> ConfigurationPending : Received a /configure request with spec + Running --> RefreshConfigurationPending : Received a /refresh_configuration request, compute node will pull a new spec and reconfigure + RefreshConfigurationPending --> Running : Compute has been re-configured TerminationPendingFast --> Terminated compute with 30s delay for cplane to inspect status TerminationPendingImmediate --> Terminated : Terminated compute immediately + Running --> TerminationPending : Requested termination + TerminationPending --> Terminated : Terminated compute + Failed --> RefreshConfigurationPending : Received a /refresh_configuration request Failed --> [*] : Compute exited Terminated --> [*] : Compute exited ``` diff --git a/compute_tools/src/bin/compute_ctl.rs b/compute_tools/src/bin/compute_ctl.rs index ee8a504429..83a2e6dc68 100644 --- a/compute_tools/src/bin/compute_ctl.rs +++ b/compute_tools/src/bin/compute_ctl.rs @@ -235,6 +235,9 @@ fn main() -> Result<()> { pg_isready_bin: get_pg_isready_bin(&cli.pgbin), instance_id: std::env::var("INSTANCE_ID").ok(), lakebase_mode: cli.lakebase_mode, + build_tag: BUILD_TAG.to_string(), + control_plane_uri: cli.control_plane_uri, + config_path_test_only: cli.config, }, config, )?; diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index 56bf7b8632..e3ac887e9c 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -21,6 +21,7 @@ use postgres::NoTls; use postgres::error::SqlState; use remote_storage::{DownloadError, RemotePath}; use std::collections::{HashMap, HashSet}; +use std::ffi::OsString; use std::os::unix::fs::{PermissionsExt, symlink}; use std::path::Path; use std::process::{Command, Stdio}; @@ -120,6 +121,10 @@ pub struct ComputeNodeParams { // Path to the `pg_isready` binary. pub pg_isready_bin: String, pub lakebase_mode: bool, + + pub build_tag: String, + pub control_plane_uri: Option, + pub config_path_test_only: Option, } type TaskHandle = Mutex>>; @@ -1796,12 +1801,12 @@ impl ComputeNode { let states_allowing_configuration_refresh = [ ComputeStatus::Running, ComputeStatus::Failed, - // ComputeStatus::RefreshConfigurationPending, + ComputeStatus::RefreshConfigurationPending, ]; - let state = self.state.lock().expect("state lock poisoned"); + let mut state = self.state.lock().expect("state lock poisoned"); if states_allowing_configuration_refresh.contains(&state.status) { - // state.status = ComputeStatus::RefreshConfigurationPending; + state.status = ComputeStatus::RefreshConfigurationPending; self.state_changed.notify_all(); Ok(()) } else if state.status == ComputeStatus::Init { @@ -1988,6 +1993,7 @@ impl ComputeNode { // wait ComputeStatus::Init | ComputeStatus::Configuration + | ComputeStatus::RefreshConfigurationPending | ComputeStatus::Empty => { state = self.state_changed.wait(state).unwrap(); } diff --git a/compute_tools/src/configurator.rs b/compute_tools/src/configurator.rs index d97bd37285..864335fd2c 100644 --- a/compute_tools/src/configurator.rs +++ b/compute_tools/src/configurator.rs @@ -1,10 +1,12 @@ -use std::sync::Arc; +use std::fs::File; use std::thread; +use std::{path::Path, sync::Arc}; -use compute_api::responses::ComputeStatus; +use compute_api::responses::{ComputeConfig, ComputeStatus}; use tracing::{error, info, instrument}; -use crate::compute::ComputeNode; +use crate::compute::{ComputeNode, ParsedSpec}; +use crate::spec::get_config_from_control_plane; #[instrument(skip_all)] fn configurator_main_loop(compute: &Arc) { @@ -12,12 +14,22 @@ fn configurator_main_loop(compute: &Arc) { loop { let mut state = compute.state.lock().unwrap(); - // We have to re-check the status after re-acquiring the lock because it could be that - // the status has changed while we were waiting for the lock, and we might not need to - // wait on the condition variable. Otherwise, we might end up in some soft-/deadlock, i.e. - // we are waiting for a condition variable that will never be signaled. - if state.status != ComputeStatus::ConfigurationPending { - state = compute.state_changed.wait(state).unwrap(); + if compute.params.lakebase_mode { + while state.status != ComputeStatus::ConfigurationPending + && state.status != ComputeStatus::RefreshConfigurationPending + && state.status != ComputeStatus::Failed + { + info!("configurator: compute status: {:?}, sleeping", state.status); + state = compute.state_changed.wait(state).unwrap(); + } + } else { + // We have to re-check the status after re-acquiring the lock because it could be that + // the status has changed while we were waiting for the lock, and we might not need to + // wait on the condition variable. Otherwise, we might end up in some soft-/deadlock, i.e. + // we are waiting for a condition variable that will never be signaled. + if state.status != ComputeStatus::ConfigurationPending { + state = compute.state_changed.wait(state).unwrap(); + } } // Re-check the status after waking up @@ -38,6 +50,80 @@ fn configurator_main_loop(compute: &Arc) { // std::thread::sleep(std::time::Duration::from_millis(10000)); compute.set_status(new_status); + } else if state.status == ComputeStatus::RefreshConfigurationPending { + info!( + "compute node suspects its configuration is out of date, now refreshing configuration" + ); + // Drop the lock guard here to avoid holding the lock while downloading spec from the control plane / HCC. + // This is the only thread that can move compute_ctl out of the `RefreshConfigurationPending` state, so it + // is safe to drop the lock like this. + drop(state); + + let spec = if let Some(config_path) = &compute.params.config_path_test_only { + // This path is only to make testing easier. In production we always get the spec from the HCC. + info!( + "reloading config.json from path: {}", + config_path.to_string_lossy() + ); + let path = Path::new(config_path); + if let Ok(file) = File::open(path) { + match serde_json::from_reader::(file) { + Ok(config) => config.spec, + Err(e) => { + error!("could not parse spec file: {}", e); + None + } + } + } else { + error!( + "could not open config file at path: {}", + config_path.to_string_lossy() + ); + None + } + } else if let Some(control_plane_uri) = &compute.params.control_plane_uri { + match get_config_from_control_plane(control_plane_uri, &compute.params.compute_id) { + Ok(config) => config.spec, + Err(e) => { + error!("could not get config from control plane: {}", e); + None + } + } + } else { + None + }; + + if let Some(spec) = spec { + if let Ok(pspec) = ParsedSpec::try_from(spec) { + { + let mut state = compute.state.lock().unwrap(); + // Defensive programming to make sure this thread is indeed the only one that can move the compute + // node out of the `RefreshConfigurationPending` state. Would be nice if we can encode this invariant + // into the type system. + assert_eq!(state.status, ComputeStatus::RefreshConfigurationPending); + // state.pspec is consumed by compute.reconfigure() below. Note that compute.reconfigure() will acquire + // the compute.state lock again so we need to have the lock guard go out of scope here. We could add a + // "locked" variant of compute.reconfigure() that takes the lock guard as an argument to make this cleaner, + // but it's not worth forking the codebase too much for this minor point alone right now. + state.pspec = Some(pspec); + } + match compute.reconfigure() { + Ok(_) => { + info!("Refresh configuration: compute node configured"); + compute.set_status(ComputeStatus::Running); + } + Err(e) => { + error!( + "Refresh configuration: could not configure compute node: {}", + e + ); + // Leave the compute node in the `RefreshConfigurationPending` state if the configuration + // was not successful. It should be okay to treat this situation the same as if the loop + // hasn't executed yet as long as the detection side keeps notifying. + } + } + } + } } else if state.status == ComputeStatus::Failed { info!("compute node is now in Failed state, exiting"); break; diff --git a/compute_tools/src/http/routes/refresh_configuration.rs b/compute_tools/src/http/routes/refresh_configuration.rs index d00f5a285a..512abaa0a6 100644 --- a/compute_tools/src/http/routes/refresh_configuration.rs +++ b/compute_tools/src/http/routes/refresh_configuration.rs @@ -7,28 +7,22 @@ use axum::{ response::{IntoResponse, Response}, }; use http::StatusCode; -use tracing::debug; use crate::compute::ComputeNode; // use crate::hadron_metrics::POSTGRES_PAGESTREAM_REQUEST_ERRORS; use crate::http::JsonResponse; -// The /refresh_configuration POST method is used to nudge compute_ctl to pull a new spec -// from the HCC and attempt to reconfigure Postgres with the new spec. The method does not wait -// for the reconfiguration to complete. Rather, it simply delivers a signal that will cause -// configuration to be reloaded in a best effort manner. Invocation of this method does not -// guarantee that a reconfiguration will occur. The caller should consider keep sending this -// request while it believes that the compute configuration is out of date. +/// The /refresh_configuration POST method is used to nudge compute_ctl to pull a new spec +/// from the HCC and attempt to reconfigure Postgres with the new spec. The method does not wait +/// for the reconfiguration to complete. Rather, it simply delivers a signal that will cause +/// configuration to be reloaded in a best effort manner. Invocation of this method does not +/// guarantee that a reconfiguration will occur. The caller should consider keep sending this +/// request while it believes that the compute configuration is out of date. pub(in crate::http) async fn refresh_configuration( State(compute): State>, ) -> Response { - debug!("serving /refresh_configuration POST request"); - // POSTGRES_PAGESTREAM_REQUEST_ERRORS.inc(); match compute.signal_refresh_configuration().await { Ok(_) => StatusCode::OK.into_response(), - Err(e) => { - tracing::error!("error handling /refresh_configuration request: {}", e); - JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e) - } + Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e), } } diff --git a/compute_tools/src/http/server.rs b/compute_tools/src/http/server.rs index 9901a6de07..2fd3121f4f 100644 --- a/compute_tools/src/http/server.rs +++ b/compute_tools/src/http/server.rs @@ -23,11 +23,11 @@ use super::{ middleware::authorize::Authorize, routes::{ check_writability, configure, database_schema, dbs_and_roles, extension_server, extensions, - grants, insights, lfc, metrics, metrics_json, promote, status, terminate, + grants, hadron_liveness_probe, insights, lfc, metrics, metrics_json, promote, + refresh_configuration, status, terminate, }, }; use crate::compute::ComputeNode; -use crate::http::routes::{hadron_liveness_probe, refresh_configuration}; /// `compute_ctl` has two servers: internal and external. The internal server /// binds to the loopback interface and handles communication from clients on diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index f68bc1ed48..c126835066 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -560,7 +560,9 @@ enum EndpointCmd { Create(EndpointCreateCmdArgs), Start(EndpointStartCmdArgs), Reconfigure(EndpointReconfigureCmdArgs), + RefreshConfiguration(EndpointRefreshConfigurationArgs), Stop(EndpointStopCmdArgs), + UpdatePageservers(EndpointUpdatePageserversCmdArgs), GenerateJwt(EndpointGenerateJwtCmdArgs), } @@ -721,6 +723,13 @@ struct EndpointReconfigureCmdArgs { safekeepers: Option, } +#[derive(clap::Args)] +#[clap(about = "Refresh the endpoint's configuration by forcing it reload it's spec")] +struct EndpointRefreshConfigurationArgs { + #[clap(help = "Postgres endpoint id")] + endpoint_id: String, +} + #[derive(clap::Args)] #[clap(about = "Stop an endpoint")] struct EndpointStopCmdArgs { @@ -738,6 +747,16 @@ struct EndpointStopCmdArgs { mode: EndpointTerminateMode, } +#[derive(clap::Args)] +#[clap(about = "Update the pageservers in the spec file of the compute endpoint")] +struct EndpointUpdatePageserversCmdArgs { + #[clap(help = "Postgres endpoint id")] + endpoint_id: String, + + #[clap(short = 'p', long, help = "Specified pageserver id")] + pageserver_id: Option, +} + #[derive(clap::Args)] #[clap(about = "Generate a JWT for an endpoint")] struct EndpointGenerateJwtCmdArgs { @@ -1625,6 +1644,44 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res println!("Starting existing endpoint {endpoint_id}..."); endpoint.start(args).await?; } + EndpointCmd::UpdatePageservers(args) => { + let endpoint_id = &args.endpoint_id; + let endpoint = cplane + .endpoints + .get(endpoint_id.as_str()) + .with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?; + let pageservers = match args.pageserver_id { + Some(pageserver_id) => { + let pageserver = + PageServerNode::from_env(env, env.get_pageserver_conf(pageserver_id)?); + + vec![( + PageserverProtocol::Libpq, + pageserver.pg_connection_config.host().clone(), + pageserver.pg_connection_config.port(), + )] + } + None => { + let storage_controller = StorageController::from_env(env); + storage_controller + .tenant_locate(endpoint.tenant_id) + .await? + .shards + .into_iter() + .map(|shard| { + ( + PageserverProtocol::Libpq, + Host::parse(&shard.listen_pg_addr) + .expect("Storage controller reported malformed host"), + shard.listen_pg_port, + ) + }) + .collect::>() + } + }; + + endpoint.update_pageservers_in_config(pageservers).await?; + } EndpointCmd::Reconfigure(args) => { let endpoint_id = &args.endpoint_id; let endpoint = cplane @@ -1678,6 +1735,14 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res .reconfigure(Some(pageservers), None, safekeepers, None) .await?; } + EndpointCmd::RefreshConfiguration(args) => { + let endpoint_id = &args.endpoint_id; + let endpoint = cplane + .endpoints + .get(endpoint_id.as_str()) + .with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?; + endpoint.refresh_configuration().await?; + } EndpointCmd::Stop(args) => { let endpoint_id = &args.endpoint_id; let endpoint = cplane diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index 4c569d7005..20dcf85562 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -937,7 +937,8 @@ impl Endpoint { | ComputeStatus::Configuration | ComputeStatus::TerminationPendingFast | ComputeStatus::TerminationPendingImmediate - | ComputeStatus::Terminated => { + | ComputeStatus::Terminated + | ComputeStatus::RefreshConfigurationPending => { bail!("unexpected compute status: {:?}", state.status) } } @@ -960,6 +961,29 @@ impl Endpoint { Ok(()) } + // Update the pageservers in the spec file of the endpoint. This is useful to test the spec refresh scenario. + pub async fn update_pageservers_in_config( + &self, + pageservers: Vec<(PageserverProtocol, Host, u16)>, + ) -> Result<()> { + let config_path = self.endpoint_path().join("config.json"); + let mut config: ComputeConfig = { + let file = std::fs::File::open(&config_path)?; + serde_json::from_reader(file)? + }; + + let pageserver_connstring = Self::build_pageserver_connstr(&pageservers); + assert!(!pageserver_connstring.is_empty()); + let mut spec = config.spec.unwrap(); + spec.pageserver_connstring = Some(pageserver_connstring); + config.spec = Some(spec); + + let file = std::fs::File::create(&config_path)?; + serde_json::to_writer_pretty(file, &config)?; + + Ok(()) + } + // Call the /status HTTP API pub async fn get_status(&self) -> Result { let client = reqwest::Client::new(); @@ -1125,6 +1149,33 @@ impl Endpoint { Ok(response) } + pub async fn refresh_configuration(&self) -> Result<()> { + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(30)) + .build() + .unwrap(); + let response = client + .post(format!( + "http://{}:{}/refresh_configuration", + self.internal_http_address.ip(), + self.internal_http_address.port() + )) + .send() + .await?; + + let status = response.status(); + if !(status.is_client_error() || status.is_server_error()) { + Ok(()) + } else { + let url = response.url().to_owned(); + let msg = match response.text().await { + Ok(err_body) => format!("Error: {err_body}"), + Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url), + }; + Err(anyhow::anyhow!(msg)) + } + } + pub fn connstr(&self, user: &str, db_name: &str) -> String { format!( "postgresql://{}@{}:{}/{}", diff --git a/libs/compute_api/src/responses.rs b/libs/compute_api/src/responses.rs index 2ef1e6aab8..7efd94c76a 100644 --- a/libs/compute_api/src/responses.rs +++ b/libs/compute_api/src/responses.rs @@ -172,6 +172,8 @@ pub enum ComputeStatus { TerminationPendingImmediate, // Terminated Postgres Terminated, + // A spec refresh is being requested + RefreshConfigurationPending, } #[derive(Deserialize, Serialize)] @@ -193,6 +195,9 @@ impl Display for ComputeStatus { f.write_str("termination-pending-immediate") } ComputeStatus::Terminated => f.write_str("terminated"), + ComputeStatus::RefreshConfigurationPending => { + f.write_str("refresh-configuration-pending") + } } } } diff --git a/test_runner/fixtures/neon_cli.py b/test_runner/fixtures/neon_cli.py index 1931d6aaa5..d7634f24a4 100644 --- a/test_runner/fixtures/neon_cli.py +++ b/test_runner/fixtures/neon_cli.py @@ -633,6 +633,15 @@ class NeonLocalCli(AbstractNeonCli): args.extend(["--safekeepers", (",".join(map(str, safekeepers)))]) return self.raw_cli(args, check_return_code=check_return_code) + def endpoint_refresh_configuration( + self, + endpoint_id: str, + ) -> subprocess.CompletedProcess[str]: + args = ["endpoint", "refresh-configuration", endpoint_id] + res = self.raw_cli(args) + res.check_returncode() + return res + def endpoint_stop( self, endpoint_id: str, @@ -657,6 +666,22 @@ class NeonLocalCli(AbstractNeonCli): lsn: Lsn | None = None if lsn_str == "null" else Lsn(lsn_str) return lsn, proc + def endpoint_update_pageservers( + self, + endpoint_id: str, + pageserver_id: int | None = None, + ) -> subprocess.CompletedProcess[str]: + args = [ + "endpoint", + "update-pageservers", + endpoint_id, + ] + if pageserver_id is not None: + args.extend(["--pageserver-id", str(pageserver_id)]) + res = self.raw_cli(args) + res.check_returncode() + return res + def mappings_map_branch( self, name: str, tenant_id: TenantId, timeline_id: TimelineId ) -> subprocess.CompletedProcess[str]: diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index e7763de0e7..e02b3b12f8 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -4940,6 +4940,10 @@ class Endpoint(PgProtocol, LogUtils): self.endpoint_id, self.tenant_id, pageserver_id, self.active_safekeepers ) + def refresh_configuration(self): + assert self.endpoint_id is not None + self.env.neon_cli.endpoint_refresh_configuration(self.endpoint_id) + def respec(self, **kwargs: Any) -> None: """Update the endpoint.json file used by control_plane.""" # Read config @@ -4986,6 +4990,10 @@ class Endpoint(PgProtocol, LogUtils): log.debug("Updating compute config to: %s", json.dumps(config, indent=4)) json.dump(config, file, indent=4) + def update_pageservers_in_config(self, pageserver_id: int | None = None): + assert self.endpoint_id is not None + self.env.neon_cli.endpoint_update_pageservers(self.endpoint_id, pageserver_id) + def wait_for_migrations(self, wait_for: int = NUM_COMPUTE_MIGRATIONS) -> None: """ Wait for all compute migrations to be ran. Remember that migrations only diff --git a/test_runner/regress/test_change_pageserver.py b/test_runner/regress/test_change_pageserver.py index b004db310c..bcdccac14e 100644 --- a/test_runner/regress/test_change_pageserver.py +++ b/test_runner/regress/test_change_pageserver.py @@ -3,14 +3,35 @@ from __future__ import annotations import asyncio from typing import TYPE_CHECKING +import pytest from fixtures.log_helper import log +from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.remote_storage import RemoteStorageKind if TYPE_CHECKING: - from fixtures.neon_fixtures import NeonEnvBuilder + from fixtures.neon_fixtures import Endpoint, NeonEnvBuilder -def test_change_pageserver(neon_env_builder: NeonEnvBuilder): +def reconfigure_endpoint(endpoint: Endpoint, pageserver_id: int, use_explicit_reconfigure: bool): + # It's important that we always update config.json before issuing any reconfigure requests + # to make sure that PG-initiated config refresh doesn't mess things up by reverting to the old config. + endpoint.update_pageservers_in_config(pageserver_id=pageserver_id) + + # PG will eventually automatically refresh its configuration if it detects connectivity issues with pageservers. + # We also allow the test to explicitly request a reconfigure so that the test can be sure that the + # endpoint is running with the latest configuration. + # + # Note that explicit reconfiguration is not required for the system to function or for this test to pass. + # It is kept for reference as this is how this test used to work before the capability of initiating + # configuration refreshes was added to compute nodes. + if use_explicit_reconfigure: + endpoint.reconfigure(pageserver_id=pageserver_id) + + +@pytest.mark.parametrize("use_explicit_reconfigure_for_failover", [False, True]) +def test_change_pageserver( + neon_env_builder: NeonEnvBuilder, use_explicit_reconfigure_for_failover: bool +): """ A relatively low level test of reconfiguring a compute's pageserver at runtime. Usually this is all done via the storage controller, but this test will disable the storage controller's compute @@ -72,7 +93,10 @@ def test_change_pageserver(neon_env_builder: NeonEnvBuilder): execute("SELECT count(*) FROM foo") assert fetchone() == (100000,) - endpoint.reconfigure(pageserver_id=alt_pageserver_id) + # Reconfigure the endpoint to use the alt pageserver. We issue an explicit reconfigure request here + # regardless of test mode as this is testing the externally driven reconfiguration scenario, not the + # compute-initiated reconfiguration scenario upon detecting failures. + reconfigure_endpoint(endpoint, pageserver_id=alt_pageserver_id, use_explicit_reconfigure=True) # Verify that the neon.pageserver_connstring GUC is set to the correct thing execute("SELECT setting FROM pg_settings WHERE name='neon.pageserver_connstring'") @@ -100,6 +124,12 @@ def test_change_pageserver(neon_env_builder: NeonEnvBuilder): env.storage_controller.node_configure(env.pageservers[1].id, {"availability": "Offline"}) env.storage_controller.reconcile_until_idle() + reconfigure_endpoint( + endpoint, + pageserver_id=env.pageservers[0].id, + use_explicit_reconfigure=use_explicit_reconfigure_for_failover, + ) + endpoint.reconfigure(pageserver_id=env.pageservers[0].id) execute("SELECT count(*) FROM foo") @@ -116,7 +146,11 @@ def test_change_pageserver(neon_env_builder: NeonEnvBuilder): await asyncio.sleep( 1 ) # Sleep for 1 second just to make sure we actually started our count(*) query - endpoint.reconfigure(pageserver_id=env.pageservers[1].id) + reconfigure_endpoint( + endpoint, + pageserver_id=env.pageservers[1].id, + use_explicit_reconfigure=use_explicit_reconfigure_for_failover, + ) def execute_count(): execute("SELECT count(*) FROM FOO") From 67ad420e26b9a03d9758e9c03b924d6333a61ecd Mon Sep 17 00:00:00 2001 From: John Spray Date: Thu, 24 Jul 2025 07:42:39 -0700 Subject: [PATCH 15/23] tests: turn down error rate in test_compute_pageserver_connection_stress (#12721) ## Problem Compute retries are finite (e.g. 5x in a basebackup) -- with a 50% failure rate we have pretty good chance of exceeding that and the test failing. Fixes: https://databricks.atlassian.net/browse/LKB-2278 ## Summary of changes - Turn connection error rate down to 20% Co-authored-by: John Spray --- test_runner/regress/test_bad_connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_runner/regress/test_bad_connection.py b/test_runner/regress/test_bad_connection.py index d31c0c95d3..3c30296e6e 100644 --- a/test_runner/regress/test_bad_connection.py +++ b/test_runner/regress/test_bad_connection.py @@ -26,7 +26,7 @@ def test_compute_pageserver_connection_stress(neon_env_builder: NeonEnvBuilder): # Enable failpoint before starting everything else up so that we exercise the retry # on fetching basebackup pageserver_http = env.pageserver.http_client() - pageserver_http.configure_failpoints(("simulated-bad-compute-connection", "50%return(15)")) + pageserver_http.configure_failpoints(("simulated-bad-compute-connection", "20%return(15)")) env.create_branch("test_compute_pageserver_connection_stress") endpoint = env.endpoints.create_start("test_compute_pageserver_connection_stress") From d79308822596afdf359de3f7b050ad77f9f7cfd4 Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Thu, 24 Jul 2025 16:48:35 +0200 Subject: [PATCH 16/23] pgxn: set `MACOSX_DEPLOYMENT_TARGET` (#12723) ## Problem Compiling `neon-pg-ext-v17` results in these linker warnings for `libcommunicator.a`: ``` $ make -j`nproc` -s neon-pg-ext-v17 Installing PostgreSQL v17 headers Compiling PostgreSQL v17 Compiling neon-specific Postgres extensions for v17 ld: warning: object file (/Users/erik.grinaker/Projects/neon/target/debug/libcommunicator.a[1159](25ac62e5b3c53843-curve25519.o)) was built for newer 'macOS' version (15.5) than being linked (15.0) ld: warning: object file (/Users/erik.grinaker/Projects/neon/target/debug/libcommunicator.a[1160](0bbbd18bda93c05b-aes_nohw.o)) was built for newer 'macOS' version (15.5) than being linked (15.0) ld: warning: object file (/Users/erik.grinaker/Projects/neon/target/debug/libcommunicator.a[1161](00c879ee3285a50d-montgomery.o)) was built for newer 'macOS' version (15.5) than being linked (15.0) [...] ``` ## Summary of changes Set `MACOSX_DEPLOYMENT_TARGET` to the current local SDK version (15.5 in this case), which links against object files for that version. --- pgxn/neon/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pgxn/neon/Makefile b/pgxn/neon/Makefile index 34cabaca62..958ca5c378 100644 --- a/pgxn/neon/Makefile +++ b/pgxn/neon/Makefile @@ -33,6 +33,10 @@ SHLIB_LINK = -lcurl UNAME_S := $(shell uname -s) ifeq ($(UNAME_S), Darwin) SHLIB_LINK += -framework Security -framework CoreFoundation -framework SystemConfiguration + + # Link against object files for the current macOS version, to avoid spurious linker warnings. + MACOSX_DEPLOYMENT_TARGET := $(shell xcrun --sdk macosx --show-sdk-version) + export MACOSX_DEPLOYMENT_TARGET endif EXTENSION = neon From 94b41b531b31dec2d065d1c892748f4ea93fc384 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 24 Jul 2025 17:14:47 +0100 Subject: [PATCH 17/23] storecon: Fix panic due to race with chaos migration on staging (#12727) ## Problem * Fixes LKB-743 We get regular assertion failures on staging caused by a race with chaos injector. If chaos injector decides to migrate a tenant shard between the background optimisation planning and applying optimisations then we attempt to migrate and already migrated shard and hit an assertion failure. ## Summary of changes @VladLazar fixed a variant of this issue by adding`validate_optimization` recently, however it didn't validate the specific property this other assertion requires. Fix is just to update it to cover all the expected properties. --- storage_controller/src/tenant_shard.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/storage_controller/src/tenant_shard.rs b/storage_controller/src/tenant_shard.rs index f60378470e..3eb54d714d 100644 --- a/storage_controller/src/tenant_shard.rs +++ b/storage_controller/src/tenant_shard.rs @@ -249,6 +249,10 @@ impl IntentState { } pub(crate) fn push_secondary(&mut self, scheduler: &mut Scheduler, new_secondary: NodeId) { + // Every assertion here should probably have a corresponding check in + // `validate_optimization` unless it is an invariant that should never be violated. Note + // that the lock is not held between planning optimizations and applying them so you have to + // assume any valid state transition of the intent state may have occurred assert!(!self.secondary.contains(&new_secondary)); assert!(self.attached != Some(new_secondary)); scheduler.update_node_ref_counts( @@ -1335,8 +1339,9 @@ impl TenantShard { true } - /// Check that the desired modifications to the intent state are compatible with - /// the current intent state + /// Check that the desired modifications to the intent state are compatible with the current + /// intent state. Note that the lock is not held between planning optimizations and applying + /// them so any valid state transition of the intent state may have occurred. fn validate_optimization(&self, optimization: &ScheduleOptimization) -> bool { match optimization.action { ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment { @@ -1352,6 +1357,9 @@ impl TenantShard { }) => { // It's legal to remove a secondary that is not present in the intent state !self.intent.secondary.contains(&new_node_id) + // Ensure the secondary hasn't already been promoted to attached by a concurrent + // optimization/migration. + && self.intent.attached != Some(new_node_id) } ScheduleOptimizationAction::CreateSecondary(new_node_id) => { !self.intent.secondary.contains(&new_node_id) From f391186aa7865adec0fd88cb7990a3a0ad73c60e Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Thu, 24 Jul 2025 18:26:54 +0200 Subject: [PATCH 18/23] TPC-C like periodic benchmark using benchbase (#12665) ## Problem We don't have a well-documented, periodic benchmark for TPC-C like OLTP workload. ## Summary of changes # Benchbase TPC-C-like Performance Results Runs TPC-C-like benchmarks on Neon databases using [Benchbase](https://github.com/cmu-db/benchbase). Docker images are built [here](https://github.com/neondatabase-labs/benchbase-docker-images) We run the benchmarks at different scale factors aligned with different compute sizes we offer to customers. For each scale factor, we determine a max rate (see Throughput in warmup phase) and then run the benchmark at a target rate of approx. 70 % of the max rate. We use different warehouse sizes which determine the working set size - it is optimized for LFC size of the respected pricing tier. Usually we should get LFC hit rates above 70 % for this setup and quite good, consistent (non-flaky) latencies. ## Expected performance as of first testing this | Tier | CU | Warehouses | Terminals | Max TPS | LFC size | Working set size | LFC hit rate | Median latency | p95 latency | |------------|------------|---------------|-----------|---------|----------|------------------|--------------|----------------|-------------| | free | 0.25-2 | 50 - 5 GB | 150 | 800 | 5 GB | 6.3 GB | 95 % | 170 ms | 600 ms | | serverless | 2-8 | 500 - 50 GB | 230 | 2000 | 26 GB | ?? GB | 91 % | 50 ms | 200 ms | | business | 2-16 | 1000 - 100 GB | 330 | 2900 | 51 GB | 50 GB | 72 % | 40 ms | 180 ms | Each run - first loads the database (not shown in the dashboard). - Then we run a warmup phase for 20 minutes to warm up the database and the LFC at unlimited target rate (max rate) (highest throughput but flaky latencies). The warmup phase can be used to determine the max rate and adjust it in the github workflow in case Neon is faster in the future. - Then we run the benchmark at a target rate of approx. 70 % of the max rate for 1 hour (expecting consistent latencies and throughput). ## Important notes on implementation: - we want to eventually publish the process how to reproduce these benchmarks - thus we want to reduce all dependencies necessary to run the benchmark, the only thing needed are - docker - the docker images referenced above for benchbase - python >= 3.9 to run some config generation steps and create diagrams - to reduce dependencies we deliberatly do NOT use some of our python fixture test infrastructure to make the dependency chain really small - so pls don't add a review comment "should reuse fixture xy" - we also upload all generator python scripts, generated bash shell scripts and configs as well as raw results to S3 bucket that we later want to publish once this benchmark is reviewed and approved. --- .github/workflows/benchbase_tpcc.yml | 384 ++++++++++++ .../generate_diagrams.py | 152 +++++ .../generate_workload_size.py | 339 ++++++++++ .../upload_results_to_perf_test_results.py | 591 ++++++++++++++++++ 4 files changed, 1466 insertions(+) create mode 100644 .github/workflows/benchbase_tpcc.yml create mode 100644 test_runner/performance/benchbase_tpc_c_helpers/generate_diagrams.py create mode 100644 test_runner/performance/benchbase_tpc_c_helpers/generate_workload_size.py create mode 100644 test_runner/performance/benchbase_tpc_c_helpers/upload_results_to_perf_test_results.py diff --git a/.github/workflows/benchbase_tpcc.yml b/.github/workflows/benchbase_tpcc.yml new file mode 100644 index 0000000000..3a36a97bb1 --- /dev/null +++ b/.github/workflows/benchbase_tpcc.yml @@ -0,0 +1,384 @@ +name: TPC-C like benchmark using benchbase + +on: + schedule: + # * is a special character in YAML so you have to quote this string + # ┌───────────── minute (0 - 59) + # │ ┌───────────── hour (0 - 23) + # │ │ ┌───────────── day of the month (1 - 31) + # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) + # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) + - cron: '0 6 * * *' # run once a day at 6 AM UTC + workflow_dispatch: # adds ability to run this manually + +defaults: + run: + shell: bash -euxo pipefail {0} + +concurrency: + # Allow only one workflow globally because we do not want to be too noisy in production environment + group: benchbase-tpcc-workflow + cancel-in-progress: false + +permissions: + contents: read + +jobs: + benchbase-tpcc: + strategy: + fail-fast: false # allow other variants to continue even if one fails + matrix: + include: + - warehouses: 50 # defines number of warehouses and is used to compute number of terminals + max_rate: 800 # measured max TPS at scale factor based on experiments. Adjust if performance is better/worse + min_cu: 0.25 # simulate free tier plan (0.25 -2 CU) + max_cu: 2 + - warehouses: 500 # serverless plan (2-8 CU) + max_rate: 2000 + min_cu: 2 + max_cu: 8 + - warehouses: 1000 # business plan (2-16 CU) + max_rate: 2900 + min_cu: 2 + max_cu: 16 + max-parallel: 1 # we want to run each workload size sequentially to avoid noisy neighbors + permissions: + contents: write + statuses: write + id-token: write # aws-actions/configure-aws-credentials + env: + PG_CONFIG: /tmp/neon/pg_install/v17/bin/pg_config + PSQL: /tmp/neon/pg_install/v17/bin/psql + PG_17_LIB_PATH: /tmp/neon/pg_install/v17/lib + POSTGRES_VERSION: 17 + runs-on: [ self-hosted, us-east-2, x64 ] + timeout-minutes: 1440 + + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Configure AWS credentials # necessary to download artefacts + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-region: eu-central-1 + role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role + + - name: Download Neon artifact + uses: ./.github/actions/download + with: + name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact + path: /tmp/neon/ + prefix: latest + aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + + - name: Create Neon Project + id: create-neon-project-tpcc + uses: ./.github/actions/neon-project-create + with: + region_id: aws-us-east-2 + postgres_version: ${{ env.POSTGRES_VERSION }} + compute_units: '[${{ matrix.min_cu }}, ${{ matrix.max_cu }}]' + api_key: ${{ secrets.NEON_PRODUCTION_API_KEY_4_BENCHMARKS }} + api_host: console.neon.tech # production (!) + + - name: Initialize Neon project + env: + BENCHMARK_TPCC_CONNSTR: ${{ steps.create-neon-project-tpcc.outputs.dsn }} + PROJECT_ID: ${{ steps.create-neon-project-tpcc.outputs.project_id }} + run: | + echo "Initializing Neon project with project_id: ${PROJECT_ID}" + export LD_LIBRARY_PATH=${PG_17_LIB_PATH} + + # Retry logic for psql connection with 1 minute sleep between attempts + for attempt in {1..3}; do + echo "Attempt ${attempt}/3: Creating extensions in Neon project" + if ${PSQL} "${BENCHMARK_TPCC_CONNSTR}" -c "CREATE EXTENSION IF NOT EXISTS neon; CREATE EXTENSION IF NOT EXISTS neon_utils;"; then + echo "Successfully created extensions" + break + else + echo "Failed to create extensions on attempt ${attempt}" + if [ ${attempt} -lt 3 ]; then + echo "Waiting 60 seconds before retry..." + sleep 60 + else + echo "All attempts failed, exiting" + exit 1 + fi + fi + done + + echo "BENCHMARK_TPCC_CONNSTR=${BENCHMARK_TPCC_CONNSTR}" >> $GITHUB_ENV + + - name: Generate BenchBase workload configuration + env: + WAREHOUSES: ${{ matrix.warehouses }} + MAX_RATE: ${{ matrix.max_rate }} + run: | + echo "Generating BenchBase configs for warehouses: ${WAREHOUSES}, max_rate: ${MAX_RATE}" + + # Extract hostname and password from connection string + # Format: postgresql://username:password@hostname/database?params (no port for Neon) + HOSTNAME=$(echo "${BENCHMARK_TPCC_CONNSTR}" | sed -n 's|.*://[^:]*:[^@]*@\([^/]*\)/.*|\1|p') + PASSWORD=$(echo "${BENCHMARK_TPCC_CONNSTR}" | sed -n 's|.*://[^:]*:\([^@]*\)@.*|\1|p') + + echo "Extracted hostname: ${HOSTNAME}" + + # Use runner temp (NVMe) as working directory + cd "${RUNNER_TEMP}" + + # Copy the generator script + cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/generate_workload_size.py" . + + # Generate configs and scripts + python3 generate_workload_size.py \ + --warehouses ${WAREHOUSES} \ + --max-rate ${MAX_RATE} \ + --hostname ${HOSTNAME} \ + --password ${PASSWORD} \ + --runner-arch ${{ runner.arch }} + + # Fix path mismatch: move generated configs and scripts to expected locations + mv ../configs ./configs + mv ../scripts ./scripts + + - name: Prepare database (load data) + env: + WAREHOUSES: ${{ matrix.warehouses }} + run: | + cd "${RUNNER_TEMP}" + + echo "Loading ${WAREHOUSES} warehouses into database..." + + # Run the loader script and capture output to log file while preserving stdout/stderr + ./scripts/load_${WAREHOUSES}_warehouses.sh 2>&1 | tee "load_${WAREHOUSES}_warehouses.log" + + echo "Database loading completed" + + - name: Run TPC-C benchmark (warmup phase, then benchmark at 70% of configuredmax TPS) + env: + WAREHOUSES: ${{ matrix.warehouses }} + run: | + cd "${RUNNER_TEMP}" + + echo "Running TPC-C benchmark with ${WAREHOUSES} warehouses..." + + # Run the optimal rate benchmark + ./scripts/execute_${WAREHOUSES}_warehouses_opt_rate.sh + + echo "Benchmark execution completed" + + - name: Run TPC-C benchmark (warmup phase, then ramp down TPS and up again in 5 minute intervals) + + env: + WAREHOUSES: ${{ matrix.warehouses }} + run: | + cd "${RUNNER_TEMP}" + + echo "Running TPC-C ramp-down-up with ${WAREHOUSES} warehouses..." + + # Run the optimal rate benchmark + ./scripts/execute_${WAREHOUSES}_warehouses_ramp_up.sh + + echo "Benchmark execution completed" + + - name: Process results (upload to test results database and generate diagrams) + env: + WAREHOUSES: ${{ matrix.warehouses }} + MIN_CU: ${{ matrix.min_cu }} + MAX_CU: ${{ matrix.max_cu }} + PROJECT_ID: ${{ steps.create-neon-project-tpcc.outputs.project_id }} + REVISION: ${{ github.sha }} + PERF_DB_CONNSTR: ${{ secrets.PERF_TEST_RESULT_CONNSTR }} + run: | + cd "${RUNNER_TEMP}" + + echo "Creating temporary Python environment for results processing..." + + # Create temporary virtual environment + python3 -m venv temp_results_env + source temp_results_env/bin/activate + + # Install required packages in virtual environment + pip install matplotlib pandas psycopg2-binary + + echo "Copying results processing scripts..." + + # Copy both processing scripts + cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/generate_diagrams.py" . + cp "${GITHUB_WORKSPACE}/test_runner/performance/benchbase_tpc_c_helpers/upload_results_to_perf_test_results.py" . + + echo "Processing load phase metrics..." + + # Find and process load log + LOAD_LOG=$(find . -name "load_${WAREHOUSES}_warehouses.log" -type f | head -1) + if [ -n "$LOAD_LOG" ]; then + echo "Processing load metrics from: $LOAD_LOG" + python upload_results_to_perf_test_results.py \ + --load-log "$LOAD_LOG" \ + --run-type "load" \ + --warehouses "${WAREHOUSES}" \ + --min-cu "${MIN_CU}" \ + --max-cu "${MAX_CU}" \ + --project-id "${PROJECT_ID}" \ + --revision "${REVISION}" \ + --connection-string "${PERF_DB_CONNSTR}" + else + echo "Warning: Load log file not found: load_${WAREHOUSES}_warehouses.log" + fi + + echo "Processing warmup results for optimal rate..." + + # Find and process warmup results + WARMUP_CSV=$(find results_warmup -name "*.results.csv" -type f | head -1) + WARMUP_JSON=$(find results_warmup -name "*.summary.json" -type f | head -1) + + if [ -n "$WARMUP_CSV" ] && [ -n "$WARMUP_JSON" ]; then + echo "Generating warmup diagram from: $WARMUP_CSV" + python generate_diagrams.py \ + --input-csv "$WARMUP_CSV" \ + --output-svg "warmup_${WAREHOUSES}_warehouses_performance.svg" \ + --title-suffix "Warmup at max TPS" + + echo "Uploading warmup metrics from: $WARMUP_JSON" + python upload_results_to_perf_test_results.py \ + --summary-json "$WARMUP_JSON" \ + --results-csv "$WARMUP_CSV" \ + --run-type "warmup" \ + --min-cu "${MIN_CU}" \ + --max-cu "${MAX_CU}" \ + --project-id "${PROJECT_ID}" \ + --revision "${REVISION}" \ + --connection-string "${PERF_DB_CONNSTR}" + else + echo "Warning: Missing warmup results files (CSV: $WARMUP_CSV, JSON: $WARMUP_JSON)" + fi + + echo "Processing optimal rate results..." + + # Find and process optimal rate results + OPTRATE_CSV=$(find results_opt_rate -name "*.results.csv" -type f | head -1) + OPTRATE_JSON=$(find results_opt_rate -name "*.summary.json" -type f | head -1) + + if [ -n "$OPTRATE_CSV" ] && [ -n "$OPTRATE_JSON" ]; then + echo "Generating optimal rate diagram from: $OPTRATE_CSV" + python generate_diagrams.py \ + --input-csv "$OPTRATE_CSV" \ + --output-svg "benchmark_${WAREHOUSES}_warehouses_performance.svg" \ + --title-suffix "70% of max TPS" + + echo "Uploading optimal rate metrics from: $OPTRATE_JSON" + python upload_results_to_perf_test_results.py \ + --summary-json "$OPTRATE_JSON" \ + --results-csv "$OPTRATE_CSV" \ + --run-type "opt-rate" \ + --min-cu "${MIN_CU}" \ + --max-cu "${MAX_CU}" \ + --project-id "${PROJECT_ID}" \ + --revision "${REVISION}" \ + --connection-string "${PERF_DB_CONNSTR}" + else + echo "Warning: Missing optimal rate results files (CSV: $OPTRATE_CSV, JSON: $OPTRATE_JSON)" + fi + + echo "Processing warmup 2 results for ramp down/up phase..." + + # Find and process warmup results + WARMUP_CSV=$(find results_warmup -name "*.results.csv" -type f | tail -1) + WARMUP_JSON=$(find results_warmup -name "*.summary.json" -type f | tail -1) + + if [ -n "$WARMUP_CSV" ] && [ -n "$WARMUP_JSON" ]; then + echo "Generating warmup diagram from: $WARMUP_CSV" + python generate_diagrams.py \ + --input-csv "$WARMUP_CSV" \ + --output-svg "warmup_2_${WAREHOUSES}_warehouses_performance.svg" \ + --title-suffix "Warmup at max TPS" + + echo "Uploading warmup metrics from: $WARMUP_JSON" + python upload_results_to_perf_test_results.py \ + --summary-json "$WARMUP_JSON" \ + --results-csv "$WARMUP_CSV" \ + --run-type "warmup" \ + --min-cu "${MIN_CU}" \ + --max-cu "${MAX_CU}" \ + --project-id "${PROJECT_ID}" \ + --revision "${REVISION}" \ + --connection-string "${PERF_DB_CONNSTR}" + else + echo "Warning: Missing warmup results files (CSV: $WARMUP_CSV, JSON: $WARMUP_JSON)" + fi + + echo "Processing ramp results..." + + # Find and process ramp results + RAMPUP_CSV=$(find results_ramp_up -name "*.results.csv" -type f | head -1) + RAMPUP_JSON=$(find results_ramp_up -name "*.summary.json" -type f | head -1) + + if [ -n "$RAMPUP_CSV" ] && [ -n "$RAMPUP_JSON" ]; then + echo "Generating ramp diagram from: $RAMPUP_CSV" + python generate_diagrams.py \ + --input-csv "$RAMPUP_CSV" \ + --output-svg "ramp_${WAREHOUSES}_warehouses_performance.svg" \ + --title-suffix "ramp TPS down and up in 5 minute intervals" + + echo "Uploading ramp metrics from: $RAMPUP_JSON" + python upload_results_to_perf_test_results.py \ + --summary-json "$RAMPUP_JSON" \ + --results-csv "$RAMPUP_CSV" \ + --run-type "ramp-up" \ + --min-cu "${MIN_CU}" \ + --max-cu "${MAX_CU}" \ + --project-id "${PROJECT_ID}" \ + --revision "${REVISION}" \ + --connection-string "${PERF_DB_CONNSTR}" + else + echo "Warning: Missing ramp results files (CSV: $RAMPUP_CSV, JSON: $RAMPUP_JSON)" + fi + + # Deactivate and clean up virtual environment + deactivate + rm -rf temp_results_env + rm upload_results_to_perf_test_results.py + + echo "Results processing completed and environment cleaned up" + + - name: Set date for upload + id: set-date + run: echo "date=$(date +%Y-%m-%d)" >> $GITHUB_OUTPUT + + - name: Configure AWS credentials # necessary to upload results + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-region: us-east-2 + role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + role-duration-seconds: 900 # 900 is minimum value + + - name: Upload benchmark results to S3 + env: + S3_BUCKET: neon-public-benchmark-results + S3_PREFIX: benchbase-tpc-c/${{ steps.set-date.outputs.date }}/${{ github.run_id }}/${{ matrix.warehouses }}-warehouses + run: | + echo "Redacting passwords from configuration files before upload..." + + # Mask all passwords in XML config files + find "${RUNNER_TEMP}/configs" -name "*.xml" -type f -exec sed -i 's|[^<]*|redacted|g' {} \; + + echo "Uploading benchmark results to s3://${S3_BUCKET}/${S3_PREFIX}/" + + # Upload the entire benchmark directory recursively + aws s3 cp --only-show-errors --recursive "${RUNNER_TEMP}" s3://${S3_BUCKET}/${S3_PREFIX}/ + + echo "Upload completed" + + - name: Delete Neon Project + if: ${{ always() }} + uses: ./.github/actions/neon-project-delete + with: + project_id: ${{ steps.create-neon-project-tpcc.outputs.project_id }} + api_key: ${{ secrets.NEON_PRODUCTION_API_KEY_4_BENCHMARKS }} + api_host: console.neon.tech # production (!) \ No newline at end of file diff --git a/test_runner/performance/benchbase_tpc_c_helpers/generate_diagrams.py b/test_runner/performance/benchbase_tpc_c_helpers/generate_diagrams.py new file mode 100644 index 0000000000..cf41a4ff59 --- /dev/null +++ b/test_runner/performance/benchbase_tpc_c_helpers/generate_diagrams.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 +""" +Generate TPS and latency charts from BenchBase TPC-C results CSV files. + +This script reads a CSV file containing BenchBase results and generates two charts: +1. TPS (requests per second) over time +2. P95 and P99 latencies over time + +Both charts are combined in a single SVG file. +""" + +import argparse +import sys +from pathlib import Path + +import matplotlib.pyplot as plt # type: ignore[import-not-found] +import pandas as pd # type: ignore[import-untyped] + + +def load_results_csv(csv_file_path): + """Load BenchBase results CSV file into a pandas DataFrame.""" + try: + df = pd.read_csv(csv_file_path) + + # Validate required columns exist + required_columns = [ + "Time (seconds)", + "Throughput (requests/second)", + "95th Percentile Latency (millisecond)", + "99th Percentile Latency (millisecond)", + ] + + missing_columns = [col for col in required_columns if col not in df.columns] + if missing_columns: + print(f"Error: Missing required columns: {missing_columns}") + sys.exit(1) + + return df + + except FileNotFoundError: + print(f"Error: CSV file not found: {csv_file_path}") + sys.exit(1) + except pd.errors.EmptyDataError: + print(f"Error: CSV file is empty: {csv_file_path}") + sys.exit(1) + except Exception as e: + print(f"Error reading CSV file: {e}") + sys.exit(1) + + +def generate_charts(df, input_filename, output_svg_path, title_suffix=None): + """Generate combined TPS and latency charts and save as SVG.""" + + # Get the filename without extension for chart titles + file_label = Path(input_filename).stem + + # Build title ending with optional suffix + if title_suffix: + title_ending = f"{title_suffix} - {file_label}" + else: + title_ending = file_label + + # Create figure with two subplots + fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10)) + + # Chart 1: Time vs TPS + ax1.plot( + df["Time (seconds)"], + df["Throughput (requests/second)"], + linewidth=1, + color="blue", + alpha=0.7, + ) + ax1.set_xlabel("Time (seconds)") + ax1.set_ylabel("TPS (Requests Per Second)") + ax1.set_title(f"Benchbase TPC-C Like Throughput (TPS) - {title_ending}") + ax1.grid(True, alpha=0.3) + ax1.set_xlim(0, df["Time (seconds)"].max()) + + # Chart 2: Time vs P95 and P99 Latencies + ax2.plot( + df["Time (seconds)"], + df["95th Percentile Latency (millisecond)"], + linewidth=1, + color="orange", + alpha=0.7, + label="Latency P95", + ) + ax2.plot( + df["Time (seconds)"], + df["99th Percentile Latency (millisecond)"], + linewidth=1, + color="red", + alpha=0.7, + label="Latency P99", + ) + ax2.set_xlabel("Time (seconds)") + ax2.set_ylabel("Latency (ms)") + ax2.set_title(f"Benchbase TPC-C Like Latency - {title_ending}") + ax2.grid(True, alpha=0.3) + ax2.set_xlim(0, df["Time (seconds)"].max()) + ax2.legend() + + plt.tight_layout() + + # Save as SVG + try: + plt.savefig(output_svg_path, format="svg", dpi=300, bbox_inches="tight") + print(f"Charts saved to: {output_svg_path}") + except Exception as e: + print(f"Error saving SVG file: {e}") + sys.exit(1) + + +def main(): + """Main function to parse arguments and generate charts.""" + parser = argparse.ArgumentParser( + description="Generate TPS and latency charts from BenchBase TPC-C results CSV" + ) + parser.add_argument( + "--input-csv", type=str, required=True, help="Path to the input CSV results file" + ) + parser.add_argument( + "--output-svg", type=str, required=True, help="Path for the output SVG chart file" + ) + parser.add_argument( + "--title-suffix", + type=str, + required=False, + help="Optional suffix to add to chart titles (e.g., 'Warmup', 'Benchmark Phase')", + ) + + args = parser.parse_args() + + # Validate input file exists + if not Path(args.input_csv).exists(): + print(f"Error: Input CSV file does not exist: {args.input_csv}") + sys.exit(1) + + # Create output directory if it doesn't exist + output_path = Path(args.output_svg) + output_path.parent.mkdir(parents=True, exist_ok=True) + + # Load data and generate charts + df = load_results_csv(args.input_csv) + generate_charts(df, args.input_csv, args.output_svg, args.title_suffix) + + print(f"Successfully generated charts from {len(df)} data points") + + +if __name__ == "__main__": + main() diff --git a/test_runner/performance/benchbase_tpc_c_helpers/generate_workload_size.py b/test_runner/performance/benchbase_tpc_c_helpers/generate_workload_size.py new file mode 100644 index 0000000000..1549c74b87 --- /dev/null +++ b/test_runner/performance/benchbase_tpc_c_helpers/generate_workload_size.py @@ -0,0 +1,339 @@ +import argparse +import html +import math +import os +import sys +from pathlib import Path + +CONFIGS_DIR = Path("../configs") +SCRIPTS_DIR = Path("../scripts") + +# Constants +## TODO increase times after testing +WARMUP_TIME_SECONDS = 1200 # 20 minutes +BENCHMARK_TIME_SECONDS = 3600 # 1 hour +RAMP_STEP_TIME_SECONDS = 300 # 5 minutes +BASE_TERMINALS = 130 +TERMINALS_PER_WAREHOUSE = 0.2 +OPTIMAL_RATE_FACTOR = 0.7 # 70% of max rate +BATCH_SIZE = 1000 +LOADER_THREADS = 4 +TRANSACTION_WEIGHTS = "45,43,4,4,4" # NewOrder, Payment, OrderStatus, Delivery, StockLevel +# Ramp-up rate multipliers +RAMP_RATE_FACTORS = [1.5, 1.1, 0.9, 0.7, 0.6, 0.4, 0.6, 0.7, 0.9, 1.1] + +# Templates for XML configs +WARMUP_XML = """ + + POSTGRES + org.postgresql.Driver + jdbc:postgresql://{hostname}/neondb?sslmode=require&ApplicationName=tpcc&reWriteBatchedInserts=true + neondb_owner + {password} + true + TRANSACTION_READ_COMMITTED + {batch_size} + {warehouses} + 0 + {terminals} + + + + {transaction_weights} + unlimited + POISSON + ZIPFIAN + + + + NewOrder + Payment + OrderStatus + Delivery + StockLevel + + +""" + +MAX_RATE_XML = """ + + POSTGRES + org.postgresql.Driver + jdbc:postgresql://{hostname}/neondb?sslmode=require&ApplicationName=tpcc&reWriteBatchedInserts=true + neondb_owner + {password} + true + TRANSACTION_READ_COMMITTED + {batch_size} + {warehouses} + 0 + {terminals} + + + + {transaction_weights} + unlimited + POISSON + ZIPFIAN + + + + NewOrder + Payment + OrderStatus + Delivery + StockLevel + + +""" + +OPT_RATE_XML = """ + + POSTGRES + org.postgresql.Driver + jdbc:postgresql://{hostname}/neondb?sslmode=require&ApplicationName=tpcc&reWriteBatchedInserts=true + neondb_owner + {password} + true + TRANSACTION_READ_COMMITTED + {batch_size} + {warehouses} + 0 + {terminals} + + + + {opt_rate} + {transaction_weights} + POISSON + ZIPFIAN + + + + NewOrder + Payment + OrderStatus + Delivery + StockLevel + + +""" + +RAMP_UP_XML = """ + + POSTGRES + org.postgresql.Driver + jdbc:postgresql://{hostname}/neondb?sslmode=require&ApplicationName=tpcc&reWriteBatchedInserts=true + neondb_owner + {password} + true + TRANSACTION_READ_COMMITTED + {batch_size} + {warehouses} + 0 + {terminals} + +{works} + + + NewOrder + Payment + OrderStatus + Delivery + StockLevel + + +""" + +WORK_TEMPLATE = f""" \n \n {{rate}}\n {TRANSACTION_WEIGHTS}\n POISSON\n ZIPFIAN\n \n""" + +# Templates for shell scripts +EXECUTE_SCRIPT = """# Create results directories +mkdir -p results_warmup +mkdir -p results_{suffix} +chmod 777 results_warmup results_{suffix} + +# Run warmup phase +docker run --network=host --rm \ + -v $(pwd)/configs:/configs \ + -v $(pwd)/results_warmup:/results \ + {docker_image}\ + -b tpcc \ + -c /configs/execute_{warehouses}_warehouses_warmup.xml \ + -d /results \ + --create=false --load=false --execute=true + +# Run benchmark phase +docker run --network=host --rm \ + -v $(pwd)/configs:/configs \ + -v $(pwd)/results_{suffix}:/results \ + {docker_image}\ + -b tpcc \ + -c /configs/execute_{warehouses}_warehouses_{suffix}.xml \ + -d /results \ + --create=false --load=false --execute=true\n""" + +LOAD_XML = """ + + POSTGRES + org.postgresql.Driver + jdbc:postgresql://{hostname}/neondb?sslmode=require&ApplicationName=tpcc&reWriteBatchedInserts=true + neondb_owner + {password} + true + TRANSACTION_READ_COMMITTED + {batch_size} + {warehouses} + {loader_threads} + +""" + +LOAD_SCRIPT = """# Create results directory for loading +mkdir -p results_load +chmod 777 results_load + +docker run --network=host --rm \ + -v $(pwd)/configs:/configs \ + -v $(pwd)/results_load:/results \ + {docker_image}\ + -b tpcc \ + -c /configs/load_{warehouses}_warehouses.xml \ + -d /results \ + --create=true --load=true --execute=false\n""" + + +def write_file(path, content): + path.parent.mkdir(parents=True, exist_ok=True) + try: + with open(path, "w") as f: + f.write(content) + except OSError as e: + print(f"Error writing {path}: {e}") + sys.exit(1) + # If it's a shell script, set executable permission + if str(path).endswith(".sh"): + os.chmod(path, 0o755) + + +def escape_xml_password(password): + """Escape XML special characters in password.""" + return html.escape(password, quote=True) + + +def get_docker_arch_tag(runner_arch): + """Map GitHub Actions runner.arch to Docker image architecture tag.""" + arch_mapping = {"X64": "amd64", "ARM64": "arm64"} + return arch_mapping.get(runner_arch, "amd64") # Default to amd64 + + +def main(): + parser = argparse.ArgumentParser(description="Generate BenchBase workload configs and scripts.") + parser.add_argument("--warehouses", type=int, required=True, help="Number of warehouses") + parser.add_argument("--max-rate", type=int, required=True, help="Max rate (TPS)") + parser.add_argument("--hostname", type=str, required=True, help="Database hostname") + parser.add_argument("--password", type=str, required=True, help="Database password") + parser.add_argument( + "--runner-arch", type=str, required=True, help="GitHub Actions runner architecture" + ) + args = parser.parse_args() + + warehouses = args.warehouses + max_rate = args.max_rate + hostname = args.hostname + password = args.password + runner_arch = args.runner_arch + + # Escape password for safe XML insertion + escaped_password = escape_xml_password(password) + + # Get the appropriate Docker architecture tag + docker_arch = get_docker_arch_tag(runner_arch) + docker_image = f"ghcr.io/neondatabase-labs/benchbase-postgres:latest-{docker_arch}" + + opt_rate = math.ceil(max_rate * OPTIMAL_RATE_FACTOR) + # Calculate terminals as next rounded integer of 40% of warehouses + terminals = math.ceil(BASE_TERMINALS + warehouses * TERMINALS_PER_WAREHOUSE) + ramp_rates = [math.ceil(max_rate * factor) for factor in RAMP_RATE_FACTORS] + + # Write configs + write_file( + CONFIGS_DIR / f"execute_{warehouses}_warehouses_warmup.xml", + WARMUP_XML.format( + warehouses=warehouses, + hostname=hostname, + password=escaped_password, + terminals=terminals, + batch_size=BATCH_SIZE, + warmup_time=WARMUP_TIME_SECONDS, + transaction_weights=TRANSACTION_WEIGHTS, + ), + ) + write_file( + CONFIGS_DIR / f"execute_{warehouses}_warehouses_max_rate.xml", + MAX_RATE_XML.format( + warehouses=warehouses, + hostname=hostname, + password=escaped_password, + terminals=terminals, + batch_size=BATCH_SIZE, + benchmark_time=BENCHMARK_TIME_SECONDS, + transaction_weights=TRANSACTION_WEIGHTS, + ), + ) + write_file( + CONFIGS_DIR / f"execute_{warehouses}_warehouses_opt_rate.xml", + OPT_RATE_XML.format( + warehouses=warehouses, + opt_rate=opt_rate, + hostname=hostname, + password=escaped_password, + terminals=terminals, + batch_size=BATCH_SIZE, + benchmark_time=BENCHMARK_TIME_SECONDS, + transaction_weights=TRANSACTION_WEIGHTS, + ), + ) + + ramp_works = "".join([WORK_TEMPLATE.format(rate=rate) for rate in ramp_rates]) + write_file( + CONFIGS_DIR / f"execute_{warehouses}_warehouses_ramp_up.xml", + RAMP_UP_XML.format( + warehouses=warehouses, + works=ramp_works, + hostname=hostname, + password=escaped_password, + terminals=terminals, + batch_size=BATCH_SIZE, + ), + ) + + # Loader config + write_file( + CONFIGS_DIR / f"load_{warehouses}_warehouses.xml", + LOAD_XML.format( + warehouses=warehouses, + hostname=hostname, + password=escaped_password, + batch_size=BATCH_SIZE, + loader_threads=LOADER_THREADS, + ), + ) + + # Write scripts + for suffix in ["max_rate", "opt_rate", "ramp_up"]: + script = EXECUTE_SCRIPT.format( + warehouses=warehouses, suffix=suffix, docker_image=docker_image + ) + write_file(SCRIPTS_DIR / f"execute_{warehouses}_warehouses_{suffix}.sh", script) + + # Loader script + write_file( + SCRIPTS_DIR / f"load_{warehouses}_warehouses.sh", + LOAD_SCRIPT.format(warehouses=warehouses, docker_image=docker_image), + ) + + print(f"Generated configs and scripts for {warehouses} warehouses and max rate {max_rate}.") + + +if __name__ == "__main__": + main() diff --git a/test_runner/performance/benchbase_tpc_c_helpers/upload_results_to_perf_test_results.py b/test_runner/performance/benchbase_tpc_c_helpers/upload_results_to_perf_test_results.py new file mode 100644 index 0000000000..3706d14fd4 --- /dev/null +++ b/test_runner/performance/benchbase_tpc_c_helpers/upload_results_to_perf_test_results.py @@ -0,0 +1,591 @@ +#!/usr/bin/env python3 +# ruff: noqa +# we exclude the file from ruff because on the github runner we have python 3.9 and ruff +# is running with newer python 3.12 which suggests changes incompatible with python 3.9 +""" +Upload BenchBase TPC-C results from summary.json and results.csv files to perf_test_results database. + +This script extracts metrics from BenchBase *.summary.json and *.results.csv files and uploads them +to a PostgreSQL database table for performance tracking and analysis. +""" + +import argparse +import json +import re +import sys +from datetime import datetime, timezone +from pathlib import Path + +import pandas as pd # type: ignore[import-untyped] +import psycopg2 + + +def load_summary_json(json_file_path): + """Load summary.json file and return parsed data.""" + try: + with open(json_file_path) as f: + return json.load(f) + except FileNotFoundError: + print(f"Error: Summary JSON file not found: {json_file_path}") + sys.exit(1) + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON in file {json_file_path}: {e}") + sys.exit(1) + except Exception as e: + print(f"Error loading JSON file {json_file_path}: {e}") + sys.exit(1) + + +def get_metric_info(metric_name): + """Get metric unit and report type for a given metric name.""" + metrics_config = { + "Throughput": {"unit": "req/s", "report_type": "higher_is_better"}, + "Goodput": {"unit": "req/s", "report_type": "higher_is_better"}, + "Measured Requests": {"unit": "requests", "report_type": "higher_is_better"}, + "95th Percentile Latency": {"unit": "µs", "report_type": "lower_is_better"}, + "Maximum Latency": {"unit": "µs", "report_type": "lower_is_better"}, + "Median Latency": {"unit": "µs", "report_type": "lower_is_better"}, + "Minimum Latency": {"unit": "µs", "report_type": "lower_is_better"}, + "25th Percentile Latency": {"unit": "µs", "report_type": "lower_is_better"}, + "90th Percentile Latency": {"unit": "µs", "report_type": "lower_is_better"}, + "99th Percentile Latency": {"unit": "µs", "report_type": "lower_is_better"}, + "75th Percentile Latency": {"unit": "µs", "report_type": "lower_is_better"}, + "Average Latency": {"unit": "µs", "report_type": "lower_is_better"}, + } + + return metrics_config.get(metric_name, {"unit": "", "report_type": "higher_is_better"}) + + +def extract_metrics(summary_data): + """Extract relevant metrics from summary JSON data.""" + metrics = [] + + # Direct top-level metrics + direct_metrics = { + "Throughput (requests/second)": "Throughput", + "Goodput (requests/second)": "Goodput", + "Measured Requests": "Measured Requests", + } + + for json_key, clean_name in direct_metrics.items(): + if json_key in summary_data: + metrics.append((clean_name, summary_data[json_key])) + + # Latency metrics from nested "Latency Distribution" object + if "Latency Distribution" in summary_data: + latency_data = summary_data["Latency Distribution"] + latency_metrics = { + "95th Percentile Latency (microseconds)": "95th Percentile Latency", + "Maximum Latency (microseconds)": "Maximum Latency", + "Median Latency (microseconds)": "Median Latency", + "Minimum Latency (microseconds)": "Minimum Latency", + "25th Percentile Latency (microseconds)": "25th Percentile Latency", + "90th Percentile Latency (microseconds)": "90th Percentile Latency", + "99th Percentile Latency (microseconds)": "99th Percentile Latency", + "75th Percentile Latency (microseconds)": "75th Percentile Latency", + "Average Latency (microseconds)": "Average Latency", + } + + for json_key, clean_name in latency_metrics.items(): + if json_key in latency_data: + metrics.append((clean_name, latency_data[json_key])) + + return metrics + + +def build_labels(summary_data, project_id): + """Build labels JSON object from summary data and project info.""" + labels = {} + + # Extract required label keys from summary data + label_keys = [ + "DBMS Type", + "DBMS Version", + "Benchmark Type", + "Final State", + "isolation", + "scalefactor", + "terminals", + ] + + for key in label_keys: + if key in summary_data: + labels[key] = summary_data[key] + + # Add project_id from workflow + labels["project_id"] = project_id + + return labels + + +def build_suit_name(scalefactor, terminals, run_type, min_cu, max_cu): + """Build the suit name according to specification.""" + return f"benchbase-tpc-c-{scalefactor}-{terminals}-{run_type}-{min_cu}-{max_cu}" + + +def convert_timestamp_to_utc(timestamp_ms): + """Convert millisecond timestamp to PostgreSQL-compatible UTC timestamp.""" + try: + dt = datetime.fromtimestamp(timestamp_ms / 1000.0, tz=timezone.utc) + return dt.isoformat() + except (ValueError, TypeError) as e: + print(f"Warning: Could not convert timestamp {timestamp_ms}: {e}") + return datetime.now(timezone.utc).isoformat() + + +def insert_metrics(conn, metrics_data): + """Insert metrics data into the perf_test_results table.""" + insert_query = """ + INSERT INTO perf_test_results + (suit, revision, platform, metric_name, metric_value, metric_unit, + metric_report_type, recorded_at_timestamp, labels) + VALUES (%(suit)s, %(revision)s, %(platform)s, %(metric_name)s, %(metric_value)s, + %(metric_unit)s, %(metric_report_type)s, %(recorded_at_timestamp)s, %(labels)s) + """ + + try: + with conn.cursor() as cursor: + cursor.executemany(insert_query, metrics_data) + conn.commit() + print(f"Successfully inserted {len(metrics_data)} metrics into perf_test_results") + + # Log some sample data for verification + if metrics_data: + print( + f"Sample metric: {metrics_data[0]['metric_name']} = {metrics_data[0]['metric_value']} {metrics_data[0]['metric_unit']}" + ) + + except Exception as e: + print(f"Error inserting metrics into database: {e}") + sys.exit(1) + + +def create_benchbase_results_details_table(conn): + """Create benchbase_results_details table if it doesn't exist.""" + create_table_query = """ + CREATE TABLE IF NOT EXISTS benchbase_results_details ( + id BIGSERIAL PRIMARY KEY, + suit TEXT, + revision CHAR(40), + platform TEXT, + recorded_at_timestamp TIMESTAMP WITH TIME ZONE, + requests_per_second NUMERIC, + average_latency_ms NUMERIC, + minimum_latency_ms NUMERIC, + p25_latency_ms NUMERIC, + median_latency_ms NUMERIC, + p75_latency_ms NUMERIC, + p90_latency_ms NUMERIC, + p95_latency_ms NUMERIC, + p99_latency_ms NUMERIC, + maximum_latency_ms NUMERIC + ); + + CREATE INDEX IF NOT EXISTS benchbase_results_details_recorded_at_timestamp_idx + ON benchbase_results_details USING BRIN (recorded_at_timestamp); + CREATE INDEX IF NOT EXISTS benchbase_results_details_suit_idx + ON benchbase_results_details USING BTREE (suit text_pattern_ops); + """ + + try: + with conn.cursor() as cursor: + cursor.execute(create_table_query) + conn.commit() + print("Successfully created/verified benchbase_results_details table") + except Exception as e: + print(f"Error creating benchbase_results_details table: {e}") + sys.exit(1) + + +def process_csv_results(csv_file_path, start_timestamp_ms, suit, revision, platform): + """Process CSV results and return data for database insertion.""" + try: + # Read CSV file + df = pd.read_csv(csv_file_path) + + # Validate required columns exist + required_columns = [ + "Time (seconds)", + "Throughput (requests/second)", + "Average Latency (millisecond)", + "Minimum Latency (millisecond)", + "25th Percentile Latency (millisecond)", + "Median Latency (millisecond)", + "75th Percentile Latency (millisecond)", + "90th Percentile Latency (millisecond)", + "95th Percentile Latency (millisecond)", + "99th Percentile Latency (millisecond)", + "Maximum Latency (millisecond)", + ] + + missing_columns = [col for col in required_columns if col not in df.columns] + if missing_columns: + print(f"Error: Missing required columns in CSV: {missing_columns}") + return [] + + csv_data = [] + + for _, row in df.iterrows(): + # Calculate timestamp: start_timestamp_ms + (time_seconds * 1000) + time_seconds = row["Time (seconds)"] + row_timestamp_ms = start_timestamp_ms + (time_seconds * 1000) + + # Convert to UTC timestamp + row_timestamp = datetime.fromtimestamp( + row_timestamp_ms / 1000.0, tz=timezone.utc + ).isoformat() + + csv_row = { + "suit": suit, + "revision": revision, + "platform": platform, + "recorded_at_timestamp": row_timestamp, + "requests_per_second": float(row["Throughput (requests/second)"]), + "average_latency_ms": float(row["Average Latency (millisecond)"]), + "minimum_latency_ms": float(row["Minimum Latency (millisecond)"]), + "p25_latency_ms": float(row["25th Percentile Latency (millisecond)"]), + "median_latency_ms": float(row["Median Latency (millisecond)"]), + "p75_latency_ms": float(row["75th Percentile Latency (millisecond)"]), + "p90_latency_ms": float(row["90th Percentile Latency (millisecond)"]), + "p95_latency_ms": float(row["95th Percentile Latency (millisecond)"]), + "p99_latency_ms": float(row["99th Percentile Latency (millisecond)"]), + "maximum_latency_ms": float(row["Maximum Latency (millisecond)"]), + } + csv_data.append(csv_row) + + print(f"Processed {len(csv_data)} rows from CSV file") + return csv_data + + except FileNotFoundError: + print(f"Error: CSV file not found: {csv_file_path}") + return [] + except Exception as e: + print(f"Error processing CSV file {csv_file_path}: {e}") + return [] + + +def insert_csv_results(conn, csv_data): + """Insert CSV results into benchbase_results_details table.""" + if not csv_data: + print("No CSV data to insert") + return + + insert_query = """ + INSERT INTO benchbase_results_details + (suit, revision, platform, recorded_at_timestamp, requests_per_second, + average_latency_ms, minimum_latency_ms, p25_latency_ms, median_latency_ms, + p75_latency_ms, p90_latency_ms, p95_latency_ms, p99_latency_ms, maximum_latency_ms) + VALUES (%(suit)s, %(revision)s, %(platform)s, %(recorded_at_timestamp)s, %(requests_per_second)s, + %(average_latency_ms)s, %(minimum_latency_ms)s, %(p25_latency_ms)s, %(median_latency_ms)s, + %(p75_latency_ms)s, %(p90_latency_ms)s, %(p95_latency_ms)s, %(p99_latency_ms)s, %(maximum_latency_ms)s) + """ + + try: + with conn.cursor() as cursor: + cursor.executemany(insert_query, csv_data) + conn.commit() + print( + f"Successfully inserted {len(csv_data)} detailed results into benchbase_results_details" + ) + + # Log some sample data for verification + sample = csv_data[0] + print( + f"Sample detail: {sample['requests_per_second']} req/s at {sample['recorded_at_timestamp']}" + ) + + except Exception as e: + print(f"Error inserting CSV results into database: {e}") + sys.exit(1) + + +def parse_load_log(log_file_path, scalefactor): + """Parse load log file and extract load metrics.""" + try: + with open(log_file_path) as f: + log_content = f.read() + + # Regex patterns to match the timestamp lines + loading_pattern = r"\[INFO \] (\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}),\d{3}.*Loading data into TPCC database" + finished_pattern = r"\[INFO \] (\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}),\d{3}.*Finished loading data into TPCC database" + + loading_match = re.search(loading_pattern, log_content) + finished_match = re.search(finished_pattern, log_content) + + if not loading_match or not finished_match: + print(f"Warning: Could not find loading timestamps in log file {log_file_path}") + return None + + # Parse timestamps + loading_time = datetime.strptime(loading_match.group(1), "%Y-%m-%d %H:%M:%S") + finished_time = datetime.strptime(finished_match.group(1), "%Y-%m-%d %H:%M:%S") + + # Calculate duration in seconds + duration_seconds = (finished_time - loading_time).total_seconds() + + # Calculate throughput: scalefactor/warehouses: 10 warehouses is approx. 1 GB of data + load_throughput = (scalefactor * 1024 / 10.0) / duration_seconds + + # Convert end time to UTC timestamp for database + finished_time_utc = finished_time.replace(tzinfo=timezone.utc).isoformat() + + print(f"Load metrics: Duration={duration_seconds}s, Throughput={load_throughput:.2f} MB/s") + + return { + "duration_seconds": duration_seconds, + "throughput_mb_per_sec": load_throughput, + "end_timestamp": finished_time_utc, + } + + except FileNotFoundError: + print(f"Warning: Load log file not found: {log_file_path}") + return None + except Exception as e: + print(f"Error parsing load log file {log_file_path}: {e}") + return None + + +def insert_load_metrics(conn, load_metrics, suit, revision, platform, labels_json): + """Insert load metrics into perf_test_results table.""" + if not load_metrics: + print("No load metrics to insert") + return + + load_metrics_data = [ + { + "suit": suit, + "revision": revision, + "platform": platform, + "metric_name": "load_duration_seconds", + "metric_value": load_metrics["duration_seconds"], + "metric_unit": "seconds", + "metric_report_type": "lower_is_better", + "recorded_at_timestamp": load_metrics["end_timestamp"], + "labels": labels_json, + }, + { + "suit": suit, + "revision": revision, + "platform": platform, + "metric_name": "load_throughput", + "metric_value": load_metrics["throughput_mb_per_sec"], + "metric_unit": "MB/second", + "metric_report_type": "higher_is_better", + "recorded_at_timestamp": load_metrics["end_timestamp"], + "labels": labels_json, + }, + ] + + insert_query = """ + INSERT INTO perf_test_results + (suit, revision, platform, metric_name, metric_value, metric_unit, + metric_report_type, recorded_at_timestamp, labels) + VALUES (%(suit)s, %(revision)s, %(platform)s, %(metric_name)s, %(metric_value)s, + %(metric_unit)s, %(metric_report_type)s, %(recorded_at_timestamp)s, %(labels)s) + """ + + try: + with conn.cursor() as cursor: + cursor.executemany(insert_query, load_metrics_data) + conn.commit() + print(f"Successfully inserted {len(load_metrics_data)} load metrics into perf_test_results") + + except Exception as e: + print(f"Error inserting load metrics into database: {e}") + sys.exit(1) + + +def main(): + """Main function to parse arguments and upload results.""" + parser = argparse.ArgumentParser( + description="Upload BenchBase TPC-C results to perf_test_results database" + ) + parser.add_argument( + "--summary-json", type=str, required=False, help="Path to the summary.json file" + ) + parser.add_argument( + "--run-type", + type=str, + required=True, + choices=["warmup", "opt-rate", "ramp-up", "load"], + help="Type of benchmark run", + ) + parser.add_argument("--min-cu", type=float, required=True, help="Minimum compute units") + parser.add_argument("--max-cu", type=float, required=True, help="Maximum compute units") + parser.add_argument("--project-id", type=str, required=True, help="Neon project ID") + parser.add_argument( + "--revision", type=str, required=True, help="Git commit hash (40 characters)" + ) + parser.add_argument( + "--connection-string", type=str, required=True, help="PostgreSQL connection string" + ) + parser.add_argument( + "--results-csv", + type=str, + required=False, + help="Path to the results.csv file for detailed metrics upload", + ) + parser.add_argument( + "--load-log", + type=str, + required=False, + help="Path to the load log file for load phase metrics", + ) + parser.add_argument( + "--warehouses", + type=int, + required=False, + help="Number of warehouses (scalefactor) for load metrics calculation", + ) + + args = parser.parse_args() + + # Validate inputs + if args.summary_json and not Path(args.summary_json).exists(): + print(f"Error: Summary JSON file does not exist: {args.summary_json}") + sys.exit(1) + + if not args.summary_json and not args.load_log: + print("Error: Either summary JSON or load log file must be provided") + sys.exit(1) + + if len(args.revision) != 40: + print(f"Warning: Revision should be 40 characters, got {len(args.revision)}") + + # Load and process summary data if provided + summary_data = None + metrics = [] + + if args.summary_json: + summary_data = load_summary_json(args.summary_json) + metrics = extract_metrics(summary_data) + if not metrics: + print("Warning: No metrics found in summary JSON") + + # Build common data for all metrics + if summary_data: + scalefactor = summary_data.get("scalefactor", "unknown") + terminals = summary_data.get("terminals", "unknown") + labels = build_labels(summary_data, args.project_id) + else: + # For load-only processing, use warehouses argument as scalefactor + scalefactor = args.warehouses if args.warehouses else "unknown" + terminals = "unknown" + labels = {"project_id": args.project_id} + + suit = build_suit_name(scalefactor, terminals, args.run_type, args.min_cu, args.max_cu) + platform = f"prod-us-east-2-{args.project_id}" + + # Convert timestamp - only needed for summary metrics and CSV processing + current_timestamp_ms = None + start_timestamp_ms = None + recorded_at = None + + if summary_data: + current_timestamp_ms = summary_data.get("Current Timestamp (milliseconds)") + start_timestamp_ms = summary_data.get("Start timestamp (milliseconds)") + + if current_timestamp_ms: + recorded_at = convert_timestamp_to_utc(current_timestamp_ms) + else: + print("Warning: No timestamp found in JSON, using current time") + recorded_at = datetime.now(timezone.utc).isoformat() + + if not start_timestamp_ms: + print("Warning: No start timestamp found in JSON, CSV upload may be incorrect") + start_timestamp_ms = ( + current_timestamp_ms or datetime.now(timezone.utc).timestamp() * 1000 + ) + + # Print Grafana dashboard link for cross-service endpoint debugging + if start_timestamp_ms and current_timestamp_ms: + grafana_url = ( + f"https://neonprod.grafana.net/d/cdya0okb81zwga/cross-service-endpoint-debugging" + f"?orgId=1&from={int(start_timestamp_ms)}&to={int(current_timestamp_ms)}" + f"&timezone=utc&var-env=prod&var-input_project_id={args.project_id}" + ) + print(f'Cross service endpoint dashboard for "{args.run_type}" phase: {grafana_url}') + + # Prepare metrics data for database insertion (only if we have summary metrics) + metrics_data = [] + if metrics and recorded_at: + for metric_name, metric_value in metrics: + metric_info = get_metric_info(metric_name) + + row = { + "suit": suit, + "revision": args.revision, + "platform": platform, + "metric_name": metric_name, + "metric_value": float(metric_value), # Ensure numeric type + "metric_unit": metric_info["unit"], + "metric_report_type": metric_info["report_type"], + "recorded_at_timestamp": recorded_at, + "labels": json.dumps(labels), # Convert to JSON string for JSONB column + } + metrics_data.append(row) + + print(f"Prepared {len(metrics_data)} summary metrics for upload to database") + print(f"Suit: {suit}") + print(f"Platform: {platform}") + + # Connect to database and insert metrics + try: + conn = psycopg2.connect(args.connection_string) + + # Insert summary metrics into perf_test_results (if any) + if metrics_data: + insert_metrics(conn, metrics_data) + else: + print("No summary metrics to upload") + + # Process and insert detailed CSV results if provided + if args.results_csv: + print(f"Processing detailed CSV results from: {args.results_csv}") + + # Create table if it doesn't exist + create_benchbase_results_details_table(conn) + + # Process CSV data + csv_data = process_csv_results( + args.results_csv, start_timestamp_ms, suit, args.revision, platform + ) + + # Insert CSV data + if csv_data: + insert_csv_results(conn, csv_data) + else: + print("No CSV data to upload") + else: + print("No CSV file provided, skipping detailed results upload") + + # Process and insert load metrics if provided + if args.load_log: + print(f"Processing load metrics from: {args.load_log}") + + # Parse load log and extract metrics + load_metrics = parse_load_log(args.load_log, scalefactor) + + # Insert load metrics + if load_metrics: + insert_load_metrics( + conn, load_metrics, suit, args.revision, platform, json.dumps(labels) + ) + else: + print("No load metrics to upload") + else: + print("No load log file provided, skipping load metrics upload") + + conn.close() + print("Database upload completed successfully") + + except psycopg2.Error as e: + print(f"Database connection/query error: {e}") + sys.exit(1) + except Exception as e: + print(f"Unexpected error: {e}") + sys.exit(1) + + +if __name__ == "__main__": + main() From 89554af1bd7c0ecfd79b5aab87629f042425f66d Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Thu, 24 Jul 2025 11:44:45 -0500 Subject: [PATCH 19/23] [BRC-1778] Have PG signal compute_ctl to refresh configuration if it suspects that it is talking to the wrong PSs (#12712) ## Problem This is a follow-up to TODO, as part of the effort to rewire the compute reconfiguration/notification mechanism to make it more robust. Please refer to that commit or ticket BRC-1778 for full context of the problem. ## Summary of changes The previous change added mechanism in `compute_ctl` that makes it possible to refresh the configuration of PG on-demand by having `compute_ctl` go out to download a new config from the control plane/HCC. This change wired this mechanism up with PG so that PG will signal `compute_ctl` to refresh its configuration when it suspects that it could be talking to incorrect pageservers due to a stale configuration. PG will become suspicious that it is talking to the wrong pageservers in the following situations: 1. It cannot connect to a pageserver (e.g., getting a network-level connection refused error) 2. It can connect to a pageserver, but the pageserver does not return any data for the GetPage request 3. It can connect to a pageserver, but the pageserver returns a malformed response 4. It can connect to a pageserver, but there is an error receiving the GetPage request response for any other reason This change also includes a minor tweak to `compute_ctl`'s config refresh behavior. Upon receiving a request to refresh PG configuration, `compute_ctl` will reach out to download a config, but it will not attempt to apply the configuration if the config is the same as the old config is it replacing. This optimization is added because the act of reconfiguring itself requires working pageserver connections. In many failure situations it is likely that PG detects an issue with a pageserver before the control plane can detect the issue, migrate tenants, and update the compute config. In this case even the latest compute config won't point PG to working pageservers, causing the configuration attempt to hang and negatively impact PG's time-to-recovery. With this change, `compute_ctl` only attempts reconfiguration if the refreshed config points PG to different pageservers. ## How is this tested? The new code paths are exercised in all existing tests because this mechanism is on by default. Explicitly tested in `test_runner/regress/test_change_pageserver.py`. Co-authored-by: William Huang --- compute_tools/src/configurator.rs | 13 +++ pgxn/neon/extension_server.c | 6 +- pgxn/neon/libpagestore.c | 90 ++++++++++++++++++- test_runner/fixtures/neon_fixtures.py | 16 +++- test_runner/fixtures/workload.py | 11 ++- test_runner/regress/test_change_pageserver.py | 2 +- 6 files changed, 124 insertions(+), 14 deletions(-) diff --git a/compute_tools/src/configurator.rs b/compute_tools/src/configurator.rs index 864335fd2c..93900b5c2b 100644 --- a/compute_tools/src/configurator.rs +++ b/compute_tools/src/configurator.rs @@ -101,6 +101,19 @@ fn configurator_main_loop(compute: &Arc) { // node out of the `RefreshConfigurationPending` state. Would be nice if we can encode this invariant // into the type system. assert_eq!(state.status, ComputeStatus::RefreshConfigurationPending); + + if state.pspec.as_ref().map(|ps| ps.pageserver_connstr.clone()) + == Some(pspec.pageserver_connstr.clone()) + { + info!( + "Refresh configuration: Retrieved spec is the same as the current spec. Waiting for control plane to update the spec before attempting reconfiguration." + ); + state.status = ComputeStatus::Running; + compute.state_changed.notify_all(); + drop(state); + std::thread::sleep(std::time::Duration::from_secs(5)); + continue; + } // state.pspec is consumed by compute.reconfigure() below. Note that compute.reconfigure() will acquire // the compute.state lock again so we need to have the lock guard go out of scope here. We could add a // "locked" variant of compute.reconfigure() that takes the lock guard as an argument to make this cleaner, diff --git a/pgxn/neon/extension_server.c b/pgxn/neon/extension_server.c index 00dcb6920e..d64cd3e4af 100644 --- a/pgxn/neon/extension_server.c +++ b/pgxn/neon/extension_server.c @@ -14,7 +14,7 @@ #include "extension_server.h" #include "neon_utils.h" -static int extension_server_port = 0; +int hadron_extension_server_port = 0; static int extension_server_request_timeout = 60; static int extension_server_connect_timeout = 60; @@ -47,7 +47,7 @@ neon_download_extension_file_http(const char *filename, bool is_library) curl_easy_setopt(handle, CURLOPT_CONNECTTIMEOUT, (long)extension_server_connect_timeout /* seconds */ ); compute_ctl_url = psprintf("http://localhost:%d/extension_server/%s%s", - extension_server_port, filename, is_library ? "?is_library=true" : ""); + hadron_extension_server_port, filename, is_library ? "?is_library=true" : ""); elog(LOG, "Sending request to compute_ctl: %s", compute_ctl_url); @@ -82,7 +82,7 @@ pg_init_extension_server() DefineCustomIntVariable("neon.extension_server_port", "connection string to the compute_ctl", NULL, - &extension_server_port, + &hadron_extension_server_port, 0, 0, INT_MAX, PGC_POSTMASTER, 0, /* no flags required */ diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index caffdc9612..ab0736e180 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -13,6 +13,8 @@ #include #include +#include + #include "libpq-int.h" #include "access/xlog.h" @@ -86,6 +88,8 @@ static int pageserver_response_log_timeout = 10000; /* 2.5 minutes. A bit higher than highest default TCP retransmission timeout */ static int pageserver_response_disconnect_timeout = 150000; +static int conf_refresh_reconnect_attempt_threshold = 16; + typedef struct { char connstring[MAX_SHARDS][MAX_PAGESERVER_CONNSTRING_SIZE]; @@ -130,7 +134,7 @@ static uint64 pagestore_local_counter = 0; typedef enum PSConnectionState { PS_Disconnected, /* no connection yet */ PS_Connecting_Startup, /* connection starting up */ - PS_Connecting_PageStream, /* negotiating pagestream */ + PS_Connecting_PageStream, /* negotiating pagestream */ PS_Connected, /* connected, pagestream established */ } PSConnectionState; @@ -401,7 +405,7 @@ get_shard_number(BufferTag *tag) } static inline void -CLEANUP_AND_DISCONNECT(PageServer *shard) +CLEANUP_AND_DISCONNECT(PageServer *shard) { if (shard->wes_read) { @@ -423,7 +427,7 @@ CLEANUP_AND_DISCONNECT(PageServer *shard) * complete the connection (e.g. due to receiving an earlier cancellation * during connection start). * Returns true if successfully connected; false if the connection failed. - * + * * Throws errors in unrecoverable situations, or when this backend's query * is canceled. */ @@ -1030,6 +1034,61 @@ pageserver_disconnect_shard(shardno_t shard_no) shard->state = PS_Disconnected; } +// BEGIN HADRON +/* + * Nudge compute_ctl to refresh our configuration. Called when we suspect we may be + * connecting to the wrong pageservers due to a stale configuration. + * + * This is a best-effort operation. If we couldn't send the local loopback HTTP request + * to compute_ctl or if the request fails for any reason, we just log the error and move + * on. + */ + +extern int hadron_extension_server_port; + +static void +hadron_request_configuration_refresh() { + static CURL *handle = NULL; + CURLcode res; + char *compute_ctl_url; + + if (!lakebase_mode) + return; + + if (handle == NULL) + { + handle = alloc_curl_handle(); + + curl_easy_setopt(handle, CURLOPT_CUSTOMREQUEST, "POST"); + curl_easy_setopt(handle, CURLOPT_TIMEOUT, 3L /* seconds */ ); + curl_easy_setopt(handle, CURLOPT_POSTFIELDS, ""); + } + + // Set the URL + compute_ctl_url = psprintf("http://localhost:%d/refresh_configuration", hadron_extension_server_port); + + + elog(LOG, "Sending refresh configuration request to compute_ctl: %s", compute_ctl_url); + + curl_easy_setopt(handle, CURLOPT_URL, compute_ctl_url); + + res = curl_easy_perform(handle); + if (res != CURLE_OK) + { + elog(WARNING, "compute_ctl refresh_configuration request failed: %s\n", curl_easy_strerror(res)); + } + + // In regular Postgres usage, it is not necessary to manually free memory allocated by palloc (psprintf) because + // it will be cleaned up after the "memory context" is reset (e.g. after the query or the transaction is finished). + // However, the number of times this function gets called during a single query/transaction can be unbounded due to + // the various retry loops around calls to pageservers. Therefore, we need to manually free this memory here. + if (compute_ctl_url != NULL) + { + pfree(compute_ctl_url); + } +} +// END HADRON + static bool pageserver_send(shardno_t shard_no, NeonRequest *request) { @@ -1064,6 +1123,9 @@ pageserver_send(shardno_t shard_no, NeonRequest *request) while (!pageserver_connect(shard_no, shard->n_reconnect_attempts < max_reconnect_attempts ? LOG : ERROR)) { shard->n_reconnect_attempts += 1; + if (shard->n_reconnect_attempts > conf_refresh_reconnect_attempt_threshold) { + hadron_request_configuration_refresh(); + } } shard->n_reconnect_attempts = 0; } else { @@ -1171,17 +1233,26 @@ pageserver_receive(shardno_t shard_no) pfree(msg); pageserver_disconnect(shard_no); resp = NULL; + + /* + * Always poke compute_ctl to request a configuration refresh if we have issues receiving data from pageservers after + * successfully connecting to it. It could be an indication that we are connecting to the wrong pageservers (e.g. PS + * is in secondary mode or otherwise refuses to respond our request). + */ + hadron_request_configuration_refresh(); } else if (rc == -2) { char *msg = pchomp(PQerrorMessage(pageserver_conn)); pageserver_disconnect(shard_no); + hadron_request_configuration_refresh(); neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: could not read COPY data: %s", msg); } else { pageserver_disconnect(shard_no); + hadron_request_configuration_refresh(); neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: unexpected PQgetCopyData return value: %d", rc); } @@ -1249,18 +1320,21 @@ pageserver_try_receive(shardno_t shard_no) neon_shard_log(shard_no, LOG, "pageserver_receive disconnect: psql end of copy data: %s", pchomp(PQerrorMessage(pageserver_conn))); pageserver_disconnect(shard_no); resp = NULL; + hadron_request_configuration_refresh(); } else if (rc == -2) { char *msg = pchomp(PQerrorMessage(pageserver_conn)); pageserver_disconnect(shard_no); + hadron_request_configuration_refresh(); neon_shard_log(shard_no, LOG, "pageserver_receive disconnect: could not read COPY data: %s", msg); resp = NULL; } else { pageserver_disconnect(shard_no); + hadron_request_configuration_refresh(); neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: unexpected PQgetCopyData return value: %d", rc); } @@ -1460,6 +1534,16 @@ pg_init_libpagestore(void) PGC_SU_BACKEND, 0, /* no flags required */ NULL, NULL, NULL); + DefineCustomIntVariable("hadron.conf_refresh_reconnect_attempt_threshold", + "Threshold of the number of consecutive failed pageserver " + "connection attempts (per shard) before signaling " + "compute_ctl for a configuration refresh.", + NULL, + &conf_refresh_reconnect_attempt_threshold, + 16, 0, INT_MAX, + PGC_USERSET, + 0, + NULL, NULL, NULL); DefineCustomIntVariable("neon.pageserver_response_log_timeout", "pageserver response log timeout", diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index e02b3b12f8..687500404a 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -4936,9 +4936,19 @@ class Endpoint(PgProtocol, LogUtils): # in the following commands. if safekeepers is not None: self.active_safekeepers = safekeepers - self.env.neon_cli.endpoint_reconfigure( - self.endpoint_id, self.tenant_id, pageserver_id, self.active_safekeepers - ) + + start_time = time.time() + while True: + try: + self.env.neon_cli.endpoint_reconfigure( + self.endpoint_id, self.tenant_id, pageserver_id, self.active_safekeepers + ) + return + except RuntimeError as e: + if time.time() - start_time > 120: + raise e + log.warning(f"Reconfigure failed with error: {e}. Retrying...") + time.sleep(5) def refresh_configuration(self): assert self.endpoint_id is not None diff --git a/test_runner/fixtures/workload.py b/test_runner/fixtures/workload.py index e17a8e989b..3ac61b5d8c 100644 --- a/test_runner/fixtures/workload.py +++ b/test_runner/fixtures/workload.py @@ -78,6 +78,9 @@ class Workload: """ if self._endpoint is not None: with ENDPOINT_LOCK: + # It's important that we update config.json before issuing the reconfigure request to make sure + # that PG-initiated spec refresh doesn't mess things up by reverting to the old spec. + self._endpoint.update_pageservers_in_config() self._endpoint.reconfigure() def endpoint(self, pageserver_id: int | None = None) -> Endpoint: @@ -97,10 +100,10 @@ class Workload: self._endpoint.start(pageserver_id=pageserver_id) self._configured_pageserver = pageserver_id else: - if self._configured_pageserver != pageserver_id: - self._configured_pageserver = pageserver_id - self._endpoint.reconfigure(pageserver_id=pageserver_id) - self._endpoint_config = pageserver_id + # It's important that we update config.json before issuing the reconfigure request to make sure + # that PG-initiated spec refresh doesn't mess things up by reverting to the old spec. + self._endpoint.update_pageservers_in_config(pageserver_id=pageserver_id) + self._endpoint.reconfigure(pageserver_id=pageserver_id) connstring = self._endpoint.safe_psql( "SELECT setting FROM pg_settings WHERE name='neon.pageserver_connstring'" diff --git a/test_runner/regress/test_change_pageserver.py b/test_runner/regress/test_change_pageserver.py index bcdccac14e..af736af825 100644 --- a/test_runner/regress/test_change_pageserver.py +++ b/test_runner/regress/test_change_pageserver.py @@ -17,7 +17,7 @@ def reconfigure_endpoint(endpoint: Endpoint, pageserver_id: int, use_explicit_re # to make sure that PG-initiated config refresh doesn't mess things up by reverting to the old config. endpoint.update_pageservers_in_config(pageserver_id=pageserver_id) - # PG will eventually automatically refresh its configuration if it detects connectivity issues with pageservers. + # PG will automatically refresh its configuration if it detects connectivity issues with pageservers. # We also allow the test to explicitly request a reconfigure so that the test can be sure that the # endpoint is running with the latest configuration. # From 11527b9df7a3ed7dcd3faaddbb64bd90f1739f31 Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Thu, 24 Jul 2025 13:41:29 -0500 Subject: [PATCH 20/23] [BRC-2951] Enforce PG backpressure parameters at the shard level (#12694) ## Problem Currently PG backpressure parameters are enforced globally. With tenant splitting, this makes it hard to balance small tenants and large tenants. For large tenants with more shards, we need to increase the lagging because each shard receives total/shard_count amount of data, while doing so could be suboptimal to small tenants with fewer shards. ## Summary of changes This PR makes these parameters to be enforced at the shard level, i.e., PG will compute the actual lag limit by multiply the shard count. ## How is this tested? Added regression test. Co-authored-by: Chen Luo --- pgxn/neon/walproposer_pg.c | 44 ++++++++++++++++++++++------ test_runner/regress/test_sharding.py | 5 ++-- 2 files changed, 38 insertions(+), 11 deletions(-) diff --git a/pgxn/neon/walproposer_pg.c b/pgxn/neon/walproposer_pg.c index 874a1590ac..b0f5828d39 100644 --- a/pgxn/neon/walproposer_pg.c +++ b/pgxn/neon/walproposer_pg.c @@ -507,19 +507,45 @@ backpressure_lag_impl(void) LSN_FORMAT_ARGS(flushPtr), LSN_FORMAT_ARGS(applyPtr)); - if ((writePtr != InvalidXLogRecPtr && max_replication_write_lag > 0 && myFlushLsn > writePtr + max_replication_write_lag * MB)) + if (lakebase_mode) { - return (myFlushLsn - writePtr - max_replication_write_lag * MB); - } + // in case PG does not have shard map initialized, we assume PG always has 1 shard at minimum. + shardno_t num_shards = Max(1, get_num_shards()); + int tenant_max_replication_apply_lag = num_shards * max_replication_apply_lag; + int tenant_max_replication_flush_lag = num_shards * max_replication_flush_lag; + int tenant_max_replication_write_lag = num_shards * max_replication_write_lag; - if ((flushPtr != InvalidXLogRecPtr && max_replication_flush_lag > 0 && myFlushLsn > flushPtr + max_replication_flush_lag * MB)) - { - return (myFlushLsn - flushPtr - max_replication_flush_lag * MB); - } + if ((writePtr != InvalidXLogRecPtr && tenant_max_replication_write_lag > 0 && myFlushLsn > writePtr + tenant_max_replication_write_lag * MB)) + { + return (myFlushLsn - writePtr - tenant_max_replication_write_lag * MB); + } - if ((applyPtr != InvalidXLogRecPtr && max_replication_apply_lag > 0 && myFlushLsn > applyPtr + max_replication_apply_lag * MB)) + if ((flushPtr != InvalidXLogRecPtr && tenant_max_replication_flush_lag > 0 && myFlushLsn > flushPtr + tenant_max_replication_flush_lag * MB)) + { + return (myFlushLsn - flushPtr - tenant_max_replication_flush_lag * MB); + } + + if ((applyPtr != InvalidXLogRecPtr && tenant_max_replication_apply_lag > 0 && myFlushLsn > applyPtr + tenant_max_replication_apply_lag * MB)) + { + return (myFlushLsn - applyPtr - tenant_max_replication_apply_lag * MB); + } + } + else { - return (myFlushLsn - applyPtr - max_replication_apply_lag * MB); + if ((writePtr != InvalidXLogRecPtr && max_replication_write_lag > 0 && myFlushLsn > writePtr + max_replication_write_lag * MB)) + { + return (myFlushLsn - writePtr - max_replication_write_lag * MB); + } + + if ((flushPtr != InvalidXLogRecPtr && max_replication_flush_lag > 0 && myFlushLsn > flushPtr + max_replication_flush_lag * MB)) + { + return (myFlushLsn - flushPtr - max_replication_flush_lag * MB); + } + + if ((applyPtr != InvalidXLogRecPtr && max_replication_apply_lag > 0 && myFlushLsn > applyPtr + max_replication_apply_lag * MB)) + { + return (myFlushLsn - applyPtr - max_replication_apply_lag * MB); + } } } return 0; diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index c2907d8a4f..4e46b67988 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -1751,14 +1751,15 @@ def test_back_pressure_per_shard(neon_env_builder: NeonEnvBuilder): "max_replication_apply_lag = 0", "max_replication_flush_lag = 15MB", "neon.max_cluster_size = 10GB", + "neon.lakebase_mode = true", ], ) endpoint.respec(skip_pg_catalog_updates=False) endpoint.start() - # generate 10MB of data + # generate 20MB of data endpoint.safe_psql( - "CREATE TABLE usertable AS SELECT s AS KEY, repeat('a', 1000) as VALUE from generate_series(1, 10000) s;" + "CREATE TABLE usertable AS SELECT s AS KEY, repeat('a', 1000) as VALUE from generate_series(1, 20000) s;" ) res = endpoint.safe_psql("SELECT neon.backpressure_throttling_time() as throttling_time")[0] assert res[0] == 0, f"throttling_time should be 0, but got {res[0]}" From 9eebd6fc796137d26a5bc5fb22a58a2d51bb4ff7 Mon Sep 17 00:00:00 2001 From: HaoyuHuang Date: Thu, 24 Jul 2025 12:01:30 -0700 Subject: [PATCH 21/23] A few more compute_ctl changes (#12713) ## Summary of changes A bunch of no-op changes. The only other thing is that the lock is released early in the terminate func. --- compute_tools/src/compute.rs | 31 +++++++++++++++++++++- compute_tools/src/http/routes/configure.rs | 7 ++++- compute_tools/src/http/routes/terminate.rs | 24 ++++++++++++++++- 3 files changed, 59 insertions(+), 3 deletions(-) diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index e3ac887e9c..ef7bca51b2 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -41,8 +41,9 @@ use utils::shard::{ShardCount, ShardIndex, ShardNumber}; use crate::configurator::launch_configurator; use crate::disk_quota::set_disk_quota; +use crate::hadron_metrics::COMPUTE_ATTACHED; use crate::installed_extensions::get_installed_extensions; -use crate::logger::startup_context_from_env; +use crate::logger::{self, startup_context_from_env}; use crate::lsn_lease::launch_lsn_lease_bg_task_for_static; use crate::metrics::COMPUTE_CTL_UP; use crate::monitor::launch_monitor; @@ -2550,6 +2551,34 @@ LIMIT 100", ); } } + + /// Set the compute spec and update related metrics. + /// This is the central place where pspec is updated. + pub fn set_spec(params: &ComputeNodeParams, state: &mut ComputeState, pspec: ParsedSpec) { + state.pspec = Some(pspec); + ComputeNode::update_attached_metric(params, state); + let _ = logger::update_ids(¶ms.instance_id, &Some(params.compute_id.clone())); + } + + pub fn update_attached_metric(params: &ComputeNodeParams, state: &mut ComputeState) { + // Update the pg_cctl_attached gauge when all identifiers are available. + if let Some(instance_id) = ¶ms.instance_id { + if let Some(pspec) = &state.pspec { + // Clear all values in the metric + COMPUTE_ATTACHED.reset(); + + // Set new metric value + COMPUTE_ATTACHED + .with_label_values(&[ + ¶ms.compute_id, + instance_id, + &pspec.tenant_id.to_string(), + &pspec.timeline_id.to_string(), + ]) + .set(1); + } + } + } } pub async fn installed_extensions(conf: tokio_postgres::Config) -> Result<()> { diff --git a/compute_tools/src/http/routes/configure.rs b/compute_tools/src/http/routes/configure.rs index b7325d283f..943ff45357 100644 --- a/compute_tools/src/http/routes/configure.rs +++ b/compute_tools/src/http/routes/configure.rs @@ -43,7 +43,12 @@ pub(in crate::http) async fn configure( // configure request for tracing purposes. state.startup_span = Some(tracing::Span::current()); - state.pspec = Some(pspec); + if compute.params.lakebase_mode { + ComputeNode::set_spec(&compute.params, &mut state, pspec); + } else { + state.pspec = Some(pspec); + } + state.set_status(ComputeStatus::ConfigurationPending, &compute.state_changed); drop(state); } diff --git a/compute_tools/src/http/routes/terminate.rs b/compute_tools/src/http/routes/terminate.rs index 5b30b020c8..deac760f43 100644 --- a/compute_tools/src/http/routes/terminate.rs +++ b/compute_tools/src/http/routes/terminate.rs @@ -1,7 +1,7 @@ use crate::compute::{ComputeNode, forward_termination_signal}; use crate::http::JsonResponse; use axum::extract::State; -use axum::response::Response; +use axum::response::{IntoResponse, Response}; use axum_extra::extract::OptionalQuery; use compute_api::responses::{ComputeStatus, TerminateMode, TerminateResponse}; use http::StatusCode; @@ -33,7 +33,29 @@ pub(in crate::http) async fn terminate( if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) { return JsonResponse::invalid_status(state.status); } + + // If compute is Empty, there's no Postgres to terminate. The regular compute_ctl termination path + // assumes Postgres to be configured and running, so we just special-handle this case by exiting + // the process directly. + if compute.params.lakebase_mode && state.status == ComputeStatus::Empty { + drop(state); + info!("terminating empty compute - will exit process"); + + // Queue a task to exit the process after 5 seconds. The 5-second delay aims to + // give enough time for the HTTP response to be sent so that HCM doesn't get an abrupt + // connection termination. + tokio::spawn(async { + tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; + info!("exiting process after terminating empty compute"); + std::process::exit(0); + }); + + return StatusCode::OK.into_response(); + } + + // For Running status, proceed with normal termination state.set_status(mode.into(), &compute.state_changed); + drop(state); } forward_termination_signal(false); From 512210bb5ac7c328afe08381d852ddaa8fd8c11e Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Thu, 24 Jul 2025 14:05:00 -0500 Subject: [PATCH 22/23] [BRC-2368] Add PS and compute_ctl metrics to report pagestream request errors (#12716) ## Problem In our experience running the system so far, almost all of the "hang compute" situations are due to the compute (postgres) pointing at the wrong pageservers. We currently mainly rely on the promethesus exporter (PGExporter) running on PG to detect and report any down time, but these can be unreliable because the read and write probes the PGExporter runs do not always generate pageserver requests due to caching, even though the real user might be experiencing down time when touching uncached pages. We are also about to start disk-wiping node pool rotation operations in prod clusters for our pageservers, and it is critical to have a convenient way to monitor the impact of these node pool rotations so that we can quickly respond to any issues. These metrics should provide very clear signals to address this operational need. ## Summary of changes Added a pair of metrics to detect issues between postgres' PageStream protocol (e.g. get_page_at_lsn, get_base_backup, etc.) communications with pageservers: * On the compute node (compute_ctl), exports a counter metric that is incremented every time postgres requests a configuration refresh. Postgres today only requests these configuration refreshes when it cannot connect to a pageserver or if the pageserver rejects its request by disconnecting. * On the pageserver, exports a counter metric that is incremented every time it receives a PageStream request that cannot be handled because the tenant is not known or if the request was routed to the wrong shard (e.g. secondary). ### How I plan to use metrics I plan to use the metrics added here to create alerts. The alerts can fire, for example, if these counters have been continuously increasing for over a certain period of time. During rollouts, misrouted requests may occasionally happen, but they should soon die down as reconfigurations make progress. We can start with something like raising the alert if the counters have been increasing continuously for over 5 minutes. ## How is this tested? New integration tests in `test_runner/regress/test_hadron_ps_connectivity_metrics.py` Co-authored-by: William Huang --- compute_tools/src/bin/compute_ctl.rs | 5 +- compute_tools/src/http/routes/metrics.rs | 10 +- .../src/http/routes/refresh_configuration.rs | 3 +- pageserver/src/page_service.rs | 1 + test_runner/fixtures/neon_cli.py | 7 +- test_runner/fixtures/neon_fixtures.py | 15 ++- .../test_hadron_ps_connectivity_metrics.py | 124 ++++++++++++++++++ 7 files changed, 157 insertions(+), 8 deletions(-) create mode 100644 test_runner/regress/test_hadron_ps_connectivity_metrics.py diff --git a/compute_tools/src/bin/compute_ctl.rs b/compute_tools/src/bin/compute_ctl.rs index 83a2e6dc68..9c86aba531 100644 --- a/compute_tools/src/bin/compute_ctl.rs +++ b/compute_tools/src/bin/compute_ctl.rs @@ -49,10 +49,10 @@ use compute_tools::compute::{ BUILD_TAG, ComputeNode, ComputeNodeParams, forward_termination_signal, }; use compute_tools::extension_server::get_pg_version_string; -use compute_tools::logger::*; use compute_tools::params::*; use compute_tools::pg_isready::get_pg_isready_bin; use compute_tools::spec::*; +use compute_tools::{hadron_metrics, installed_extensions, logger::*}; use rlimit::{Resource, setrlimit}; use signal_hook::consts::{SIGINT, SIGQUIT, SIGTERM}; use signal_hook::iterator::Signals; @@ -205,6 +205,9 @@ fn main() -> Result<()> { // enable core dumping for all child processes setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?; + installed_extensions::initialize_metrics(); + hadron_metrics::initialize_metrics(); + let connstr = Url::parse(&cli.connstr).context("cannot parse connstr as a URL")?; let config = get_config(&cli)?; diff --git a/compute_tools/src/http/routes/metrics.rs b/compute_tools/src/http/routes/metrics.rs index 96b464fd12..8406746327 100644 --- a/compute_tools/src/http/routes/metrics.rs +++ b/compute_tools/src/http/routes/metrics.rs @@ -13,6 +13,7 @@ use metrics::{Encoder, TextEncoder}; use crate::communicator_socket_client::connect_communicator_socket; use crate::compute::ComputeNode; +use crate::hadron_metrics; use crate::http::JsonResponse; use crate::metrics::collect; @@ -21,11 +22,18 @@ pub(in crate::http) async fn get_metrics() -> Response { // When we call TextEncoder::encode() below, it will immediately return an // error if a metric family has no metrics, so we need to preemptively // filter out metric families with no metrics. - let metrics = collect() + let mut metrics = collect() .into_iter() .filter(|m| !m.get_metric().is_empty()) .collect::>(); + // Add Hadron metrics. + let hadron_metrics: Vec = hadron_metrics::collect() + .into_iter() + .filter(|m| !m.get_metric().is_empty()) + .collect(); + metrics.extend(hadron_metrics); + let encoder = TextEncoder::new(); let mut buffer = vec![]; diff --git a/compute_tools/src/http/routes/refresh_configuration.rs b/compute_tools/src/http/routes/refresh_configuration.rs index 512abaa0a6..9b2f95ca5a 100644 --- a/compute_tools/src/http/routes/refresh_configuration.rs +++ b/compute_tools/src/http/routes/refresh_configuration.rs @@ -9,7 +9,7 @@ use axum::{ use http::StatusCode; use crate::compute::ComputeNode; -// use crate::hadron_metrics::POSTGRES_PAGESTREAM_REQUEST_ERRORS; +use crate::hadron_metrics::POSTGRES_PAGESTREAM_REQUEST_ERRORS; use crate::http::JsonResponse; /// The /refresh_configuration POST method is used to nudge compute_ctl to pull a new spec @@ -21,6 +21,7 @@ use crate::http::JsonResponse; pub(in crate::http) async fn refresh_configuration( State(compute): State>, ) -> Response { + POSTGRES_PAGESTREAM_REQUEST_ERRORS.inc(); match compute.signal_refresh_configuration().await { Ok(_) => StatusCode::OK.into_response(), Err(e) => JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, e), diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index 26a23da66f..bbfe35d07a 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -535,6 +535,7 @@ impl timeline::handle::TenantManager for TenantManagerWrappe match resolved { ShardResolveResult::Found(tenant_shard) => break tenant_shard, ShardResolveResult::NotFound => { + MISROUTED_PAGESTREAM_REQUESTS.inc(); return Err(GetActiveTimelineError::Tenant( GetActiveTenantError::NotFound(GetTenantError::NotFound(*tenant_id)), )); diff --git a/test_runner/fixtures/neon_cli.py b/test_runner/fixtures/neon_cli.py index d7634f24a4..390efe0309 100644 --- a/test_runner/fixtures/neon_cli.py +++ b/test_runner/fixtures/neon_cli.py @@ -587,7 +587,9 @@ class NeonLocalCli(AbstractNeonCli): ] extra_env_vars = env or {} if basebackup_request_tries is not None: - extra_env_vars["NEON_COMPUTE_TESTING_BASEBACKUP_TRIES"] = str(basebackup_request_tries) + extra_env_vars["NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES"] = str( + basebackup_request_tries + ) if remote_ext_base_url is not None: args.extend(["--remote-ext-base-url", remote_ext_base_url]) @@ -623,6 +625,7 @@ class NeonLocalCli(AbstractNeonCli): pageserver_id: int | None = None, safekeepers: list[int] | None = None, check_return_code=True, + timeout_sec: float | None = None, ) -> subprocess.CompletedProcess[str]: args = ["endpoint", "reconfigure", endpoint_id] if tenant_id is not None: @@ -631,7 +634,7 @@ class NeonLocalCli(AbstractNeonCli): args.extend(["--pageserver-id", str(pageserver_id)]) if safekeepers is not None: args.extend(["--safekeepers", (",".join(map(str, safekeepers)))]) - return self.raw_cli(args, check_return_code=check_return_code) + return self.raw_cli(args, check_return_code=check_return_code, timeout=timeout_sec) def endpoint_refresh_configuration( self, diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 687500404a..7f59547c73 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -4930,7 +4930,12 @@ class Endpoint(PgProtocol, LogUtils): def is_running(self): return self._running._value > 0 - def reconfigure(self, pageserver_id: int | None = None, safekeepers: list[int] | None = None): + def reconfigure( + self, + pageserver_id: int | None = None, + safekeepers: list[int] | None = None, + timeout_sec: float = 120, + ): assert self.endpoint_id is not None # If `safekeepers` is not None, they are remember them as active and use # in the following commands. @@ -4941,11 +4946,15 @@ class Endpoint(PgProtocol, LogUtils): while True: try: self.env.neon_cli.endpoint_reconfigure( - self.endpoint_id, self.tenant_id, pageserver_id, self.active_safekeepers + self.endpoint_id, + self.tenant_id, + pageserver_id, + self.active_safekeepers, + timeout_sec=timeout_sec, ) return except RuntimeError as e: - if time.time() - start_time > 120: + if time.time() - start_time > timeout_sec: raise e log.warning(f"Reconfigure failed with error: {e}. Retrying...") time.sleep(5) diff --git a/test_runner/regress/test_hadron_ps_connectivity_metrics.py b/test_runner/regress/test_hadron_ps_connectivity_metrics.py new file mode 100644 index 0000000000..7590c1236c --- /dev/null +++ b/test_runner/regress/test_hadron_ps_connectivity_metrics.py @@ -0,0 +1,124 @@ +import json +import shutil + +from fixtures.common_types import TenantShardId +from fixtures.log_helper import log +from fixtures.metrics import parse_metrics +from fixtures.neon_fixtures import Endpoint, NeonEnvBuilder, NeonPageserver + + +# Helper function to attempt reconfiguration of the compute to point to a new pageserver. Note that in these tests, +# we don't expect the reconfiguration attempts to go through, as we will be pointing the compute at a "wrong" pageserver. +def _attempt_reconfiguration(endpoint: Endpoint, new_pageserver_id: int, timeout_sec: float): + try: + endpoint.reconfigure(pageserver_id=new_pageserver_id, timeout_sec=timeout_sec) + except Exception as e: + log.info(f"reconfiguration failed with exception {e}") + pass + + +def read_misrouted_metric_value(pageserver: NeonPageserver) -> float: + return ( + pageserver.http_client() + .get_metrics() + .query_one("pageserver_misrouted_pagestream_requests_total") + .value + ) + + +def read_request_error_metric_value(endpoint: Endpoint) -> float: + return ( + parse_metrics(endpoint.http_client().metrics()) + .query_one("pg_cctl_pagestream_request_errors_total") + .value + ) + + +def test_misrouted_to_secondary( + neon_env_builder: NeonEnvBuilder, +): + """ + Tests that the following metrics are incremented when compute tries to talk to a secondary pageserver: + - On pageserver receiving the request: pageserver_misrouted_pagestream_requests_total + - On compute: pg_cctl_pagestream_request_errors_total + """ + neon_env_builder.num_pageservers = 2 + env = neon_env_builder.init_configs() + env.broker.start() + env.storage_controller.start() + for ps in env.pageservers: + ps.start() + for sk in env.safekeepers: + sk.start() + + # Create a tenant that has one primary and one secondary. Due to primary/secondary placement constraints, + # the primary and secondary pageservers will be different. + tenant_id, _ = env.create_tenant(shard_count=1, placement_policy=json.dumps({"Attached": 1})) + endpoint = env.endpoints.create( + "main", tenant_id=tenant_id, config_lines=["neon.lakebase_mode = true"] + ) + endpoint.respec(skip_pg_catalog_updates=False) + endpoint.start() + + # Get the primary pageserver serving the zero shard of the tenant, and detach it from the primary pageserver. + # This test operation configures tenant directly on the pageserver/does not go through the storage controller, + # so the compute does not get any notifications and will keep pointing at the detached pageserver. + tenant_zero_shard = TenantShardId(tenant_id, shard_number=0, shard_count=1) + + primary_ps = env.get_tenant_pageserver(tenant_zero_shard) + secondary_ps = ( + env.pageservers[1] if primary_ps.id == env.pageservers[0].id else env.pageservers[0] + ) + + # Now try to point the compute at the pageserver that is acting as secondary for the tenant. Test that the metrics + # on both compute_ctl and the pageserver register the misrouted requests following the reconfiguration attempt. + assert read_misrouted_metric_value(secondary_ps) == 0 + assert read_request_error_metric_value(endpoint) == 0 + _attempt_reconfiguration(endpoint, new_pageserver_id=secondary_ps.id, timeout_sec=2.0) + assert read_misrouted_metric_value(secondary_ps) > 0, "PS metric not incremented" + assert read_request_error_metric_value(endpoint) > 0, "compute_ctl metric not incremented" + + +def test_misrouted_to_ps_not_hosting_tenant( + neon_env_builder: NeonEnvBuilder, +): + """ + Tests that the following metrics are incremented when compute tries to talk to a pageserver that does not host the tenant: + - On pageserver receiving the request: pageserver_misrouted_pagestream_requests_total + - On compute: pg_cctl_pagestream_request_errors_total + """ + neon_env_builder.num_pageservers = 2 + env = neon_env_builder.init_configs() + env.broker.start() + env.storage_controller.start(handle_ps_local_disk_loss=False) + for ps in env.pageservers: + ps.start() + for sk in env.safekeepers: + sk.start() + + tenant_id, _ = env.create_tenant(shard_count=1) + endpoint = env.endpoints.create( + "main", tenant_id=tenant_id, config_lines=["neon.lakebase_mode = true"] + ) + endpoint.respec(skip_pg_catalog_updates=False) + endpoint.start() + + tenant_ps_id = env.get_tenant_pageserver( + TenantShardId(tenant_id, shard_number=0, shard_count=1) + ).id + non_hosting_ps = ( + env.pageservers[1] if tenant_ps_id == env.pageservers[0].id else env.pageservers[0] + ) + + # Clear the disk of the non-hosting PS to make sure that it indeed doesn't have any information about the tenant. + non_hosting_ps.stop(immediate=True) + shutil.rmtree(non_hosting_ps.tenant_dir()) + non_hosting_ps.start() + + # Now try to point the compute to the non-hosting pageserver. Test that the metrics + # on both compute_ctl and the pageserver register the misrouted requests following the reconfiguration attempt. + assert read_misrouted_metric_value(non_hosting_ps) == 0 + assert read_request_error_metric_value(endpoint) == 0 + _attempt_reconfiguration(endpoint, new_pageserver_id=non_hosting_ps.id, timeout_sec=2.0) + assert read_misrouted_metric_value(non_hosting_ps) > 0, "PS metric not incremented" + assert read_request_error_metric_value(endpoint) > 0, "compute_ctl metric not incremented" From b623fbae0c5bfd1952a3639bf27742c88842414c Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Thu, 24 Jul 2025 19:01:59 -0500 Subject: [PATCH 23/23] Cancel PG query if stuck at refreshing configuration (#12717) ## Problem While configuring or reconfiguring PG due to PageServer movements, it's possible PG may get stuck if PageServer is moved around after fetching the spec from StorageController. ## Summary of changes To fix this issue, this PR introduces two changes: 1. Fail the PG query directly if the query cannot request configuration for certain number of times. 2. Introduce a new state `RefreshConfiguration` in compute tools to differentiate it from `RefreshConfigurationPending`. If compute tool is already in `RefreshConfiguration` state, then it will not accept new request configuration requests. ## How is this tested? Chaos testing. Co-authored-by: Chen Luo --- compute_tools/README.md | 6 +- compute_tools/src/compute.rs | 1 + compute_tools/src/configurator.rs | 121 ++++-- control_plane/src/endpoint.rs | 3 +- libs/compute_api/src/responses.rs | 10 +- pgxn/neon/libpagestore.c | 66 +++- .../regress/test_compute_termination.py | 369 ++++++++++++++++++ .../test_hadron_ps_connectivity_metrics.py | 21 +- 8 files changed, 542 insertions(+), 55 deletions(-) create mode 100644 test_runner/regress/test_compute_termination.py diff --git a/compute_tools/README.md b/compute_tools/README.md index 446b441c18..e92e5920b9 100644 --- a/compute_tools/README.md +++ b/compute_tools/README.md @@ -54,11 +54,11 @@ stateDiagram-v2 Running --> TerminationPendingImmediate : Requested termination Running --> ConfigurationPending : Received a /configure request with spec Running --> RefreshConfigurationPending : Received a /refresh_configuration request, compute node will pull a new spec and reconfigure - RefreshConfigurationPending --> Running : Compute has been re-configured + RefreshConfigurationPending --> RefreshConfiguration: Received compute spec and started configuration + RefreshConfiguration --> Running : Compute has been re-configured + RefreshConfiguration --> RefreshConfigurationPending : Configuration failed and to be retried TerminationPendingFast --> Terminated compute with 30s delay for cplane to inspect status TerminationPendingImmediate --> Terminated : Terminated compute immediately - Running --> TerminationPending : Requested termination - TerminationPending --> Terminated : Terminated compute Failed --> RefreshConfigurationPending : Received a /refresh_configuration request Failed --> [*] : Compute exited Terminated --> [*] : Compute exited diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index ef7bca51b2..a240e69df8 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -1994,6 +1994,7 @@ impl ComputeNode { // wait ComputeStatus::Init | ComputeStatus::Configuration + | ComputeStatus::RefreshConfiguration | ComputeStatus::RefreshConfigurationPending | ComputeStatus::Empty => { state = self.state_changed.wait(state).unwrap(); diff --git a/compute_tools/src/configurator.rs b/compute_tools/src/configurator.rs index 93900b5c2b..feca8337b2 100644 --- a/compute_tools/src/configurator.rs +++ b/compute_tools/src/configurator.rs @@ -2,6 +2,7 @@ use std::fs::File; use std::thread; use std::{path::Path, sync::Arc}; +use anyhow::Result; use compute_api::responses::{ComputeConfig, ComputeStatus}; use tracing::{error, info, instrument}; @@ -13,6 +14,10 @@ fn configurator_main_loop(compute: &Arc) { info!("waiting for reconfiguration requests"); loop { let mut state = compute.state.lock().unwrap(); + /* BEGIN_HADRON */ + // RefreshConfiguration should only be used inside the loop + assert_ne!(state.status, ComputeStatus::RefreshConfiguration); + /* END_HADRON */ if compute.params.lakebase_mode { while state.status != ComputeStatus::ConfigurationPending @@ -54,53 +59,68 @@ fn configurator_main_loop(compute: &Arc) { info!( "compute node suspects its configuration is out of date, now refreshing configuration" ); - // Drop the lock guard here to avoid holding the lock while downloading spec from the control plane / HCC. - // This is the only thread that can move compute_ctl out of the `RefreshConfigurationPending` state, so it + state.set_status(ComputeStatus::RefreshConfiguration, &compute.state_changed); + // Drop the lock guard here to avoid holding the lock while downloading config from the control plane / HCC. + // This is the only thread that can move compute_ctl out of the `RefreshConfiguration` state, so it // is safe to drop the lock like this. drop(state); - let spec = if let Some(config_path) = &compute.params.config_path_test_only { - // This path is only to make testing easier. In production we always get the spec from the HCC. - info!( - "reloading config.json from path: {}", - config_path.to_string_lossy() - ); - let path = Path::new(config_path); - if let Ok(file) = File::open(path) { - match serde_json::from_reader::(file) { - Ok(config) => config.spec, - Err(e) => { - error!("could not parse spec file: {}", e); - None - } - } - } else { - error!( - "could not open config file at path: {}", + let get_config_result: anyhow::Result = + if let Some(config_path) = &compute.params.config_path_test_only { + // This path is only to make testing easier. In production we always get the config from the HCC. + info!( + "reloading config.json from path: {}", config_path.to_string_lossy() ); - None - } - } else if let Some(control_plane_uri) = &compute.params.control_plane_uri { - match get_config_from_control_plane(control_plane_uri, &compute.params.compute_id) { - Ok(config) => config.spec, - Err(e) => { - error!("could not get config from control plane: {}", e); - None + let path = Path::new(config_path); + if let Ok(file) = File::open(path) { + match serde_json::from_reader::(file) { + Ok(config) => Ok(config), + Err(e) => { + error!("could not parse config file: {}", e); + Err(anyhow::anyhow!("could not parse config file: {}", e)) + } + } + } else { + error!( + "could not open config file at path: {:?}", + config_path.to_string_lossy() + ); + Err(anyhow::anyhow!( + "could not open config file at path: {}", + config_path.to_string_lossy() + )) } - } - } else { - None - }; + } else if let Some(control_plane_uri) = &compute.params.control_plane_uri { + get_config_from_control_plane(control_plane_uri, &compute.params.compute_id) + } else { + Err(anyhow::anyhow!("config_path_test_only is not set")) + }; - if let Some(spec) = spec { - if let Ok(pspec) = ParsedSpec::try_from(spec) { + // Parse any received ComputeSpec and transpose the result into a Result>. + let parsed_spec_result: Result> = + get_config_result.and_then(|config| { + if let Some(spec) = config.spec { + if let Ok(pspec) = ParsedSpec::try_from(spec) { + Ok(Some(pspec)) + } else { + Err(anyhow::anyhow!("could not parse spec")) + } + } else { + Ok(None) + } + }); + + let new_status: ComputeStatus; + match parsed_spec_result { + // Control plane (HCM) returned a spec and we were able to parse it. + Ok(Some(pspec)) => { { let mut state = compute.state.lock().unwrap(); // Defensive programming to make sure this thread is indeed the only one that can move the compute - // node out of the `RefreshConfigurationPending` state. Would be nice if we can encode this invariant + // node out of the `RefreshConfiguration` state. Would be nice if we can encode this invariant // into the type system. - assert_eq!(state.status, ComputeStatus::RefreshConfigurationPending); + assert_eq!(state.status, ComputeStatus::RefreshConfiguration); if state.pspec.as_ref().map(|ps| ps.pageserver_connstr.clone()) == Some(pspec.pageserver_connstr.clone()) @@ -123,20 +143,45 @@ fn configurator_main_loop(compute: &Arc) { match compute.reconfigure() { Ok(_) => { info!("Refresh configuration: compute node configured"); - compute.set_status(ComputeStatus::Running); + new_status = ComputeStatus::Running; } Err(e) => { error!( "Refresh configuration: could not configure compute node: {}", e ); - // Leave the compute node in the `RefreshConfigurationPending` state if the configuration + // Set the compute node back to the `RefreshConfigurationPending` state if the configuration // was not successful. It should be okay to treat this situation the same as if the loop // hasn't executed yet as long as the detection side keeps notifying. + new_status = ComputeStatus::RefreshConfigurationPending; } } } + // Control plane (HCM)'s response does not contain a spec. This is the "Empty" attachment case. + Ok(None) => { + info!( + "Compute Manager signaled that this compute is no longer attached to any storage. Exiting." + ); + // We just immediately terminate the whole compute_ctl in this case. It's not necessary to attempt a + // clean shutdown as Postgres is probably not responding anyway (which is why we are in this refresh + // configuration state). + std::process::exit(1); + } + // Various error cases: + // - The request to the control plane (HCM) either failed or returned a malformed spec. + // - compute_ctl itself is configured incorrectly (e.g., compute_id is not set). + Err(e) => { + error!( + "Refresh configuration: error getting a parsed spec: {:?}", + e + ); + new_status = ComputeStatus::RefreshConfigurationPending; + // We may be dealing with an overloaded HCM if we end up in this path. Backoff 5 seconds before + // retrying to avoid hammering the HCM. + std::thread::sleep(std::time::Duration::from_secs(5)); + } } + compute.set_status(new_status); } else if state.status == ComputeStatus::Failed { info!("compute node is now in Failed state, exiting"); break; diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index 20dcf85562..6221d83c7f 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -938,7 +938,8 @@ impl Endpoint { | ComputeStatus::TerminationPendingFast | ComputeStatus::TerminationPendingImmediate | ComputeStatus::Terminated - | ComputeStatus::RefreshConfigurationPending => { + | ComputeStatus::RefreshConfigurationPending + | ComputeStatus::RefreshConfiguration => { bail!("unexpected compute status: {:?}", state.status) } } diff --git a/libs/compute_api/src/responses.rs b/libs/compute_api/src/responses.rs index 7efd94c76a..a27301e45e 100644 --- a/libs/compute_api/src/responses.rs +++ b/libs/compute_api/src/responses.rs @@ -174,6 +174,9 @@ pub enum ComputeStatus { Terminated, // A spec refresh is being requested RefreshConfigurationPending, + // A spec refresh is being applied. We cannot refresh configuration again until the current + // refresh is done, i.e., signal_refresh_configuration() will return 500 error. + RefreshConfiguration, } #[derive(Deserialize, Serialize)] @@ -186,6 +189,10 @@ impl Display for ComputeStatus { match self { ComputeStatus::Empty => f.write_str("empty"), ComputeStatus::ConfigurationPending => f.write_str("configuration-pending"), + ComputeStatus::RefreshConfiguration => f.write_str("refresh-configuration"), + ComputeStatus::RefreshConfigurationPending => { + f.write_str("refresh-configuration-pending") + } ComputeStatus::Init => f.write_str("init"), ComputeStatus::Running => f.write_str("running"), ComputeStatus::Configuration => f.write_str("configuration"), @@ -195,9 +202,6 @@ impl Display for ComputeStatus { f.write_str("termination-pending-immediate") } ComputeStatus::Terminated => f.write_str("terminated"), - ComputeStatus::RefreshConfigurationPending => { - f.write_str("refresh-configuration-pending") - } } } } diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index ab0736e180..1031f185a6 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -89,6 +89,8 @@ static int pageserver_response_log_timeout = 10000; static int pageserver_response_disconnect_timeout = 150000; static int conf_refresh_reconnect_attempt_threshold = 16; +// Hadron: timeout for refresh errors (1 minute) +static uint64 kRefreshErrorTimeoutUSec = 1 * USECS_PER_MINUTE; typedef struct { @@ -1046,14 +1048,22 @@ pageserver_disconnect_shard(shardno_t shard_no) extern int hadron_extension_server_port; -static void +// The timestamp (usec) of the first error that occurred while trying to refresh the configuration. +// Will be reset to 0 after a successful refresh. +static uint64 first_recorded_refresh_error_usec = 0; + +// Request compute_ctl to refresh the configuration. This operation may fail, e.g., if the compute_ctl +// is already in the configuration state. The function returns true if the caller needs to cancel the +// current query to avoid dead/live lock. +static bool hadron_request_configuration_refresh() { static CURL *handle = NULL; CURLcode res; char *compute_ctl_url; + bool cancel_query = false; if (!lakebase_mode) - return; + return false; if (handle == NULL) { @@ -1073,9 +1083,40 @@ hadron_request_configuration_refresh() { curl_easy_setopt(handle, CURLOPT_URL, compute_ctl_url); res = curl_easy_perform(handle); - if (res != CURLE_OK) + if (res != CURLE_OK ) { - elog(WARNING, "compute_ctl refresh_configuration request failed: %s\n", curl_easy_strerror(res)); + elog(WARNING, "refresh_configuration request failed: %s\n", curl_easy_strerror(res)); + } + else + { + long http_code = 0; + curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &http_code); + if ( res != CURLE_OK ) + { + elog(WARNING, "compute_ctl refresh_configuration request getinfo failed: %s\n", curl_easy_strerror(res)); + } + else + { + elog(LOG, "compute_ctl refresh_configuration got HTTP response: %ld\n", http_code); + if( http_code == 200 ) + { + first_recorded_refresh_error_usec = 0; + } + else + { + if (first_recorded_refresh_error_usec == 0) + { + first_recorded_refresh_error_usec = GetCurrentTimestamp(); + } + else if(GetCurrentTimestamp() - first_recorded_refresh_error_usec > kRefreshErrorTimeoutUSec) + { + { + first_recorded_refresh_error_usec = 0; + cancel_query = true; + } + } + } + } } // In regular Postgres usage, it is not necessary to manually free memory allocated by palloc (psprintf) because @@ -1086,6 +1127,7 @@ hadron_request_configuration_refresh() { { pfree(compute_ctl_url); } + return cancel_query; } // END HADRON @@ -1123,8 +1165,10 @@ pageserver_send(shardno_t shard_no, NeonRequest *request) while (!pageserver_connect(shard_no, shard->n_reconnect_attempts < max_reconnect_attempts ? LOG : ERROR)) { shard->n_reconnect_attempts += 1; - if (shard->n_reconnect_attempts > conf_refresh_reconnect_attempt_threshold) { - hadron_request_configuration_refresh(); + if (shard->n_reconnect_attempts > conf_refresh_reconnect_attempt_threshold + && hadron_request_configuration_refresh() ) + { + neon_shard_log(shard_no, ERROR, "request failed too many times, cancelling query"); } } shard->n_reconnect_attempts = 0; @@ -1338,6 +1382,16 @@ pageserver_try_receive(shardno_t shard_no) neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: unexpected PQgetCopyData return value: %d", rc); } + /* + * Always poke compute_ctl to request a configuration refresh if we have issues receiving data from pageservers after + * successfully connecting to it. It could be an indication that we are connecting to the wrong pageservers (e.g. PS + * is in secondary mode or otherwise refuses to respond our request). + */ + if ( rc < 0 && hadron_request_configuration_refresh() ) + { + neon_shard_log(shard_no, ERROR, "refresh_configuration request failed, cancelling query"); + } + shard->nresponses_received++; return (NeonResponse *) resp; } diff --git a/test_runner/regress/test_compute_termination.py b/test_runner/regress/test_compute_termination.py new file mode 100644 index 0000000000..2d62ccf20f --- /dev/null +++ b/test_runner/regress/test_compute_termination.py @@ -0,0 +1,369 @@ +from __future__ import annotations + +import json +import os +import shutil +import subprocess +import threading +import time +from http.server import BaseHTTPRequestHandler, HTTPServer +from typing import TYPE_CHECKING + +import requests +from fixtures.log_helper import log +from typing_extensions import override + +if TYPE_CHECKING: + from typing import Any + + from fixtures.common_types import TenantId, TimelineId + from fixtures.neon_fixtures import NeonEnv + from fixtures.port_distributor import PortDistributor + + +def launch_compute_ctl( + env: NeonEnv, + endpoint_name: str, + external_http_port: int, + internal_http_port: int, + pg_port: int, + control_plane_port: int, +) -> subprocess.Popen[str]: + """ + Helper function to launch compute_ctl process with common configuration. + Returns the Popen process object. + """ + # Create endpoint directory structure following the standard pattern + endpoint_path = env.repo_dir / "endpoints" / endpoint_name + + # Clean up any existing endpoint directory to avoid conflicts + if endpoint_path.exists(): + shutil.rmtree(endpoint_path) + + endpoint_path.mkdir(mode=0o755, parents=True, exist_ok=True) + + # pgdata path - compute_ctl will create this directory during basebackup + pgdata_path = endpoint_path / "pgdata" + + # Create log file in endpoint directory + log_file = endpoint_path / "compute.log" + log_handle = open(log_file, "w") + + # Start compute_ctl pointing to our control plane + compute_ctl_path = env.neon_binpath / "compute_ctl" + connstr = f"postgresql://cloud_admin@localhost:{pg_port}/postgres" + + # Find postgres binary path + pg_bin_path = env.pg_distrib_dir / env.pg_version.v_prefixed / "bin" / "postgres" + pg_lib_path = env.pg_distrib_dir / env.pg_version.v_prefixed / "lib" + + env_vars = { + "INSTANCE_ID": "lakebase-instance-id", + "LD_LIBRARY_PATH": str(pg_lib_path), # Linux, etc. + "DYLD_LIBRARY_PATH": str(pg_lib_path), # macOS + } + + cmd = [ + str(compute_ctl_path), + "--external-http-port", + str(external_http_port), + "--internal-http-port", + str(internal_http_port), + "--pgdata", + str(pgdata_path), + "--connstr", + connstr, + "--pgbin", + str(pg_bin_path), + "--compute-id", + endpoint_name, # Use endpoint_name as compute-id + "--control-plane-uri", + f"http://127.0.0.1:{control_plane_port}", + "--lakebase-mode", + "true", + ] + + print(f"Launching compute_ctl with command: {cmd}") + + # Start compute_ctl + process = subprocess.Popen( + cmd, + env=env_vars, + stdout=log_handle, + stderr=subprocess.STDOUT, # Combine stderr with stdout + text=True, + ) + + return process + + +def wait_for_compute_status( + compute_process: subprocess.Popen[str], + http_port: int, + expected_status: str, + timeout_seconds: int = 10, +) -> None: + """ + Wait for compute_ctl to reach the expected status. + Raises an exception if timeout is reached or process exits unexpectedly. + """ + start_time = time.time() + while time.time() - start_time < timeout_seconds: + try: + # Try to connect to the HTTP endpoint + response = requests.get(f"http://localhost:{http_port}/status", timeout=0.5) + if response.status_code == 200: + status_json = response.json() + # Check if it's in expected status + if status_json.get("status") == expected_status: + return + except (requests.ConnectionError, requests.Timeout): + pass + + # Check if process has exited + if compute_process.poll() is not None: + raise Exception( + f"compute_ctl exited unexpectedly with code {compute_process.returncode}." + ) + + time.sleep(0.5) + + # Timeout reached + compute_process.terminate() + raise Exception( + f"compute_ctl failed to reach {expected_status} status within {timeout_seconds} seconds." + ) + + +class EmptySpecHandler(BaseHTTPRequestHandler): + """HTTP handler that returns an Empty compute spec response""" + + def do_GET(self): + if self.path.startswith("/compute/api/v2/computes/") and self.path.endswith("/spec"): + # Return empty status which will put compute in Empty state + response: dict[str, Any] = { + "status": "empty", + "spec": None, + "compute_ctl_config": {"jwks": {"keys": []}}, + } + self.send_response(200) + self.send_header("Content-Type", "application/json") + self.end_headers() + self.wfile.write(json.dumps(response).encode()) + else: + self.send_error(404) + + @override + def log_message(self, format: str, *args: Any): + # Suppress request logging + pass + + +def test_compute_terminate_empty(neon_simple_env: NeonEnv, port_distributor: PortDistributor): + """ + Test that terminating a compute in Empty status works correctly. + + This tests the bug fix where terminating an Empty compute would hang + waiting for a non-existent postgres process to terminate. + """ + env = neon_simple_env + + # Get ports for our test + control_plane_port = port_distributor.get_port() + external_http_port = port_distributor.get_port() + internal_http_port = port_distributor.get_port() + pg_port = port_distributor.get_port() + + # Start a simple HTTP server that will serve the Empty spec + server = HTTPServer(("127.0.0.1", control_plane_port), EmptySpecHandler) + server_thread = threading.Thread(target=server.serve_forever) + server_thread.daemon = True + server_thread.start() + + compute_process = None + try: + # Start compute_ctl with ephemeral tenant ID + compute_process = launch_compute_ctl( + env, + "test-empty-compute", + external_http_port, + internal_http_port, + pg_port, + control_plane_port, + ) + + # Wait for compute_ctl to start and report "empty" status + wait_for_compute_status(compute_process, external_http_port, "empty") + + # Now send terminate request + response = requests.post(f"http://localhost:{external_http_port}/terminate") + + # Verify that the termination request sends back a 200 OK response and is not abruptly terminated. + assert response.status_code == 200, ( + f"Expected 200 OK, got {response.status_code}: {response.text}" + ) + + # Wait for compute_ctl to exit + exit_code = compute_process.wait(timeout=10) + assert exit_code == 0, f"compute_ctl exited with non-zero code: {exit_code}" + + finally: + # Clean up + server.shutdown() + if compute_process and compute_process.poll() is None: + compute_process.terminate() + compute_process.wait() + + +class SwitchableConfigHandler(BaseHTTPRequestHandler): + """HTTP handler that can switch between normal compute configs and compute configs without specs""" + + return_empty_spec: bool = False + tenant_id: TenantId | None = None + timeline_id: TimelineId | None = None + pageserver_port: int | None = None + safekeeper_connstrs: list[str] | None = None + + def do_GET(self): + if self.path.startswith("/compute/api/v2/computes/") and self.path.endswith("/spec"): + if self.return_empty_spec: + # Return empty status + response: dict[str, object | None] = { + "status": "empty", + "spec": None, + "compute_ctl_config": { + "jwks": {"keys": []}, + }, + } + else: + # Return normal attached spec + response = { + "status": "attached", + "spec": { + "format_version": 1.0, + "cluster": { + "roles": [], + "databases": [], + "postgresql_conf": "shared_preload_libraries='neon'", + }, + "tenant_id": str(self.tenant_id) if self.tenant_id else "", + "timeline_id": str(self.timeline_id) if self.timeline_id else "", + "pageserver_connstring": f"postgres://no_user@localhost:{self.pageserver_port}" + if self.pageserver_port + else "", + "safekeeper_connstrings": self.safekeeper_connstrs or [], + "mode": "Primary", + "skip_pg_catalog_updates": True, + "reconfigure_concurrency": 1, + "suspend_timeout_seconds": -1, + }, + "compute_ctl_config": { + "jwks": {"keys": []}, + }, + } + self.send_response(200) + self.send_header("Content-Type", "application/json") + self.end_headers() + self.wfile.write(json.dumps(response).encode()) + else: + self.send_error(404) + + @override + def log_message(self, format: str, *args: Any): + # Suppress request logging + pass + + +def test_compute_empty_spec_during_refresh_configuration( + neon_simple_env: NeonEnv, port_distributor: PortDistributor +): + """ + Test that compute exits when it receives an empty spec during refresh configuration state. + + This test: + 1. Start compute with a normal spec + 2. Change the spec handler to return empty spec + 3. Trigger some condition to force compute to refresh configuration + 4. Verify that compute_ctl exits + """ + env = neon_simple_env + + # Get ports for our test + control_plane_port = port_distributor.get_port() + external_http_port = port_distributor.get_port() + internal_http_port = port_distributor.get_port() + pg_port = port_distributor.get_port() + + # Set up handler class variables + SwitchableConfigHandler.tenant_id = env.initial_tenant + SwitchableConfigHandler.timeline_id = env.initial_timeline + SwitchableConfigHandler.pageserver_port = env.pageserver.service_port.pg + # Convert comma-separated string to list + safekeeper_connstrs = env.get_safekeeper_connstrs() + if safekeeper_connstrs: + SwitchableConfigHandler.safekeeper_connstrs = safekeeper_connstrs.split(",") + else: + SwitchableConfigHandler.safekeeper_connstrs = [] + SwitchableConfigHandler.return_empty_spec = False # Start with normal spec + + # Start HTTP server with switchable spec handler + server = HTTPServer(("127.0.0.1", control_plane_port), SwitchableConfigHandler) + server_thread = threading.Thread(target=server.serve_forever) + server_thread.daemon = True + server_thread.start() + + compute_process = None + try: + # Start compute_ctl with tenant and timeline IDs + # Use a unique endpoint name to avoid conflicts + endpoint_name = f"test-refresh-compute-{os.getpid()}" + compute_process = launch_compute_ctl( + env, + endpoint_name, + external_http_port, + internal_http_port, + pg_port, + control_plane_port, + ) + + # Wait for compute_ctl to start and report "running" status + wait_for_compute_status(compute_process, external_http_port, "running", timeout_seconds=30) + + log.info("Compute is running. Now returning empty spec and trigger configuration refresh.") + + # Switch spec fetch handler to return empty spec + SwitchableConfigHandler.return_empty_spec = True + + # Trigger a configuration refresh + try: + requests.post(f"http://localhost:{internal_http_port}/refresh_configuration") + except requests.RequestException as e: + log.info(f"Call to /refresh_configuration failed: {e}") + log.info( + "Ignoring the error, assuming that compute_ctl is already refreshing or has exited" + ) + + # Wait for compute_ctl to exit (it should exit when it gets an empty spec during refresh) + exit_start_time = time.time() + while time.time() - exit_start_time < 30: + if compute_process.poll() is not None: + # Process exited + break + time.sleep(0.5) + + # Verify that compute_ctl exited + exit_code = compute_process.poll() + if exit_code is None: + compute_process.terminate() + raise Exception("compute_ctl did not exit after receiving empty spec.") + + # The exit code might not be 0 in this case since it's an unexpected termination + # but we mainly care that it did exit + assert exit_code is not None, "compute_ctl should have exited" + + finally: + # Clean up + server.shutdown() + if compute_process and compute_process.poll() is None: + compute_process.terminate() + compute_process.wait() diff --git a/test_runner/regress/test_hadron_ps_connectivity_metrics.py b/test_runner/regress/test_hadron_ps_connectivity_metrics.py index 7590c1236c..ff1f37b634 100644 --- a/test_runner/regress/test_hadron_ps_connectivity_metrics.py +++ b/test_runner/regress/test_hadron_ps_connectivity_metrics.py @@ -5,6 +5,7 @@ from fixtures.common_types import TenantShardId from fixtures.log_helper import log from fixtures.metrics import parse_metrics from fixtures.neon_fixtures import Endpoint, NeonEnvBuilder, NeonPageserver +from requests.exceptions import ConnectionError # Helper function to attempt reconfiguration of the compute to point to a new pageserver. Note that in these tests, @@ -75,8 +76,14 @@ def test_misrouted_to_secondary( assert read_misrouted_metric_value(secondary_ps) == 0 assert read_request_error_metric_value(endpoint) == 0 _attempt_reconfiguration(endpoint, new_pageserver_id=secondary_ps.id, timeout_sec=2.0) - assert read_misrouted_metric_value(secondary_ps) > 0, "PS metric not incremented" - assert read_request_error_metric_value(endpoint) > 0, "compute_ctl metric not incremented" + assert read_misrouted_metric_value(secondary_ps) > 0 + try: + assert read_request_error_metric_value(endpoint) > 0 + except ConnectionError: + # When configuring PG to use misconfigured pageserver, PG will cancel the query after certain number of failed + # reconfigure attempts. This will cause compute_ctl to exit. + log.info("Cannot connect to PG, ignoring") + pass def test_misrouted_to_ps_not_hosting_tenant( @@ -120,5 +127,11 @@ def test_misrouted_to_ps_not_hosting_tenant( assert read_misrouted_metric_value(non_hosting_ps) == 0 assert read_request_error_metric_value(endpoint) == 0 _attempt_reconfiguration(endpoint, new_pageserver_id=non_hosting_ps.id, timeout_sec=2.0) - assert read_misrouted_metric_value(non_hosting_ps) > 0, "PS metric not incremented" - assert read_request_error_metric_value(endpoint) > 0, "compute_ctl metric not incremented" + assert read_misrouted_metric_value(non_hosting_ps) > 0 + try: + assert read_request_error_metric_value(endpoint) > 0 + except ConnectionError: + # When configuring PG to use misconfigured pageserver, PG will cancel the query after certain number of failed + # reconfigure attempts. This will cause compute_ctl to exit. + log.info("Cannot connect to PG, ignoring") + pass