From 218be9eb32788b6ed46f56515bacfbba12b97ae5 Mon Sep 17 00:00:00 2001 From: George MacKerron Date: Tue, 15 Aug 2023 14:52:00 +0100 Subject: [PATCH 01/40] Added deferrable transaction option to http batch queries (#4993) ## Problem HTTP batch queries currently allow us to set the isolation level and read only, but not deferrable. ## Summary of changes Add support for deferrable. Echo deferrable status in response headers only if true. Likewise, now echo read-only status in response headers only if true. --- proxy/src/http/sql_over_http.rs | 30 +++++++++++++++++++++--------- test_runner/regress/test_proxy.py | 12 ++++++++++-- 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/proxy/src/http/sql_over_http.rs b/proxy/src/http/sql_over_http.rs index aa06a91bba..33375e63e9 100644 --- a/proxy/src/http/sql_over_http.rs +++ b/proxy/src/http/sql_over_http.rs @@ -47,6 +47,7 @@ static ARRAY_MODE: HeaderName = HeaderName::from_static("neon-array-mode"); static ALLOW_POOL: HeaderName = HeaderName::from_static("neon-pool-opt-in"); static TXN_ISOLATION_LEVEL: HeaderName = HeaderName::from_static("neon-batch-isolation-level"); static TXN_READ_ONLY: HeaderName = HeaderName::from_static("neon-batch-read-only"); +static TXN_DEFERRABLE: HeaderName = HeaderName::from_static("neon-batch-deferrable"); static HEADER_VALUE_TRUE: HeaderValue = HeaderValue::from_static("true"); @@ -195,7 +196,7 @@ pub async fn handle( // Allow connection pooling only if explicitly requested let allow_pool = headers.get(&ALLOW_POOL) == Some(&HEADER_VALUE_TRUE); - // isolation level and read only + // isolation level, read only and deferrable let txn_isolation_level_raw = headers.get(&TXN_ISOLATION_LEVEL).cloned(); let txn_isolation_level = match txn_isolation_level_raw { @@ -209,8 +210,8 @@ pub async fn handle( None => None, }; - let txn_read_only_raw = headers.get(&TXN_READ_ONLY).cloned(); - let txn_read_only = txn_read_only_raw.as_ref() == Some(&HEADER_VALUE_TRUE); + let txn_read_only = headers.get(&TXN_READ_ONLY) == Some(&HEADER_VALUE_TRUE); + let txn_deferrable = headers.get(&TXN_DEFERRABLE) == Some(&HEADER_VALUE_TRUE); let request_content_length = match request.body().size_hint().upper() { Some(v) => v, @@ -247,6 +248,9 @@ pub async fn handle( if txn_read_only { builder = builder.read_only(true); } + if txn_deferrable { + builder = builder.deferrable(true); + } let transaction = builder.start().await?; for query in batch_query.queries { let result = query_to_json(&transaction, query, raw_output, array_mode).await; @@ -260,12 +264,20 @@ pub async fn handle( } transaction.commit().await?; let mut headers = HashMap::default(); - headers.insert( - TXN_READ_ONLY.clone(), - HeaderValue::try_from(txn_read_only.to_string())?, - ); - if let Some(txn_isolation_level_raw) = txn_isolation_level_raw { - headers.insert(TXN_ISOLATION_LEVEL.clone(), txn_isolation_level_raw); + if txn_read_only { + headers.insert( + TXN_READ_ONLY.clone(), + HeaderValue::try_from(txn_read_only.to_string())?, + ); + } + if txn_deferrable { + headers.insert( + TXN_DEFERRABLE.clone(), + HeaderValue::try_from(txn_deferrable.to_string())?, + ); + } + if let Some(txn_isolation_level) = txn_isolation_level_raw { + headers.insert(TXN_ISOLATION_LEVEL.clone(), txn_isolation_level); } Ok((json!({ "results": results }), headers)) } diff --git a/test_runner/regress/test_proxy.py b/test_runner/regress/test_proxy.py index fabec6b5bc..dd767e14b7 100644 --- a/test_runner/regress/test_proxy.py +++ b/test_runner/regress/test_proxy.py @@ -265,7 +265,11 @@ def test_sql_over_http_output_options(static_proxy: NeonProxy): def test_sql_over_http_batch(static_proxy: NeonProxy): static_proxy.safe_psql("create role http with login password 'http' superuser") - def qq(queries: List[Tuple[str, Optional[List[Any]]]], read_only: bool = False) -> Any: + def qq( + queries: List[Tuple[str, Optional[List[Any]]]], + read_only: bool = False, + deferrable: bool = False, + ) -> Any: connstr = f"postgresql://http:http@{static_proxy.domain}:{static_proxy.proxy_port}/postgres" response = requests.post( f"https://{static_proxy.domain}:{static_proxy.external_http_port}/sql", @@ -277,6 +281,7 @@ def test_sql_over_http_batch(static_proxy: NeonProxy): "Neon-Connection-String": connstr, "Neon-Batch-Isolation-Level": "Serializable", "Neon-Batch-Read-Only": "true" if read_only else "false", + "Neon-Batch-Deferrable": "true" if deferrable else "false", }, verify=str(static_proxy.test_output_dir / "proxy.crt"), ) @@ -299,7 +304,8 @@ def test_sql_over_http_batch(static_proxy: NeonProxy): ) assert headers["Neon-Batch-Isolation-Level"] == "Serializable" - assert headers["Neon-Batch-Read-Only"] == "false" + assert "Neon-Batch-Read-Only" not in headers + assert "Neon-Batch-Deferrable" not in headers assert result[0]["rows"] == [{"answer": 42}] assert result[1]["rows"] == [{"answer": "42"}] @@ -327,8 +333,10 @@ def test_sql_over_http_batch(static_proxy: NeonProxy): ("select 42 as answer", None), ], True, + True, ) assert headers["Neon-Batch-Isolation-Level"] == "Serializable" assert headers["Neon-Batch-Read-Only"] == "true" + assert headers["Neon-Batch-Deferrable"] == "true" assert result[0]["rows"] == [{"answer": 42}] From 207919f5eb7de8d54ad62fe23b1fa01954fd8ae2 Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Tue, 15 Aug 2023 15:32:30 +0100 Subject: [PATCH 02/40] Upload test results to DB right after generation (#4967) ## Problem While adding new test results format, I've also changed the way we upload Allure reports to S3 (https://github.com/neondatabase/neon/pull/4549/commits/722c7956bb932b639759f41f49aadb6115ef2179) to avoid duplicated results from previous runs. But it broke links at earlier results (results are still available but on different URLs). This PR fixes this (by reverting logic in https://github.com/neondatabase/neon/pull/4549/commits/722c7956bb932b639759f41f49aadb6115ef2179 changes), and moves the logic for storing test results into db to allure generate step. It allows us to avoid test results duplicates in the db and saves some time on extra s3 downloads that happened in a different job before the PR. Ref https://neondb.slack.com/archives/C059ZC138NR/p1691669522160229 ## Summary of changes - Move test results storing logic from a workflow to `actions/allure-report-generate` --- .../actions/allure-report-generate/action.yml | 48 ++++++++++++++++++- .github/workflows/build_and_test.yml | 44 ++--------------- 2 files changed, 51 insertions(+), 41 deletions(-) diff --git a/.github/actions/allure-report-generate/action.yml b/.github/actions/allure-report-generate/action.yml index daa369a1a0..f959833119 100644 --- a/.github/actions/allure-report-generate/action.yml +++ b/.github/actions/allure-report-generate/action.yml @@ -1,6 +1,13 @@ name: 'Create Allure report' description: 'Generate Allure report from uploaded by actions/allure-report-store tests results' +inputs: + store-test-results-into-db: + description: 'Whether to store test results into the database. TEST_RESULT_CONNSTR/TEST_RESULT_CONNSTR_NEW should be set' + type: boolean + required: false + default: false + outputs: base-url: description: 'Base URL for Allure report' @@ -139,9 +146,11 @@ runs: sed -i 's| ${WORKDIR}/index.html @@ -170,6 +179,41 @@ runs: aws s3 rm "s3://${BUCKET}/${LOCK_FILE}" fi + - name: Store Allure test stat in the DB + if: ${{ !cancelled() && inputs.store-test-results-into-db == 'true' }} + shell: bash -euxo pipefail {0} + env: + COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }} + REPORT_JSON_URL: ${{ steps.generate-report.outputs.report-json-url }} + run: | + export DATABASE_URL=${REGRESS_TEST_RESULT_CONNSTR} + + ./scripts/pysync + + poetry run python3 scripts/ingest_regress_test_result.py \ + --revision ${COMMIT_SHA} \ + --reference ${GITHUB_REF} \ + --build-type unified \ + --ingest ${WORKDIR}/report/data/suites.json + + - name: Store Allure test stat in the DB (new) + if: ${{ !cancelled() && inputs.store-test-results-into-db == 'true' }} + shell: bash -euxo pipefail {0} + env: + COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }} + BASE_S3_URL: ${{ steps.generate-report.outputs.base-s3-url }} + run: | + export DATABASE_URL=${REGRESS_TEST_RESULT_CONNSTR_NEW} + + ./scripts/pysync + + poetry run python3 scripts/ingest_regress_test_result-new-format.py \ + --reference ${GITHUB_REF} \ + --revision ${COMMIT_SHA} \ + --run-id ${GITHUB_RUN_ID} \ + --run-attempt ${GITHUB_RUN_ATTEMPT} \ + --test-cases-dir ${WORKDIR}/report/data/test-cases + - name: Cleanup if: always() shell: bash -euxo pipefail {0} diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 853f5f2919..a8eab7a86f 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -432,6 +432,11 @@ jobs: if: ${{ !cancelled() }} id: create-allure-report uses: ./.github/actions/allure-report-generate + with: + store-test-results-into-db: true + env: + REGRESS_TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR }} + REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} - uses: actions/github-script@v6 if: ${{ !cancelled() }} @@ -452,45 +457,6 @@ jobs: report, }) - - name: Store Allure test stat in the DB - if: ${{ !cancelled() && steps.create-allure-report.outputs.report-json-url }} - env: - COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }} - REPORT_JSON_URL: ${{ steps.create-allure-report.outputs.report-json-url }} - TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR }} - run: | - ./scripts/pysync - - curl --fail --output suites.json "${REPORT_JSON_URL}" - export BUILD_TYPE=unified - export DATABASE_URL="$TEST_RESULT_CONNSTR" - - poetry run python3 scripts/ingest_regress_test_result.py \ - --revision ${COMMIT_SHA} \ - --reference ${GITHUB_REF} \ - --build-type ${BUILD_TYPE} \ - --ingest suites.json - - - name: Store Allure test stat in the DB (new) - if: ${{ !cancelled() && steps.create-allure-report.outputs.report-json-url }} - env: - COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }} - REPORT_JSON_URL: ${{ steps.create-allure-report.outputs.report-json-url }} - TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} - BASE_S3_URL: ${{ steps.create-allure-report.outputs.base-s3-url }} - run: | - aws s3 cp --only-show-errors --recursive ${BASE_S3_URL}/data/test-cases ./test-cases - - ./scripts/pysync - - export DATABASE_URL="$TEST_RESULT_CONNSTR" - poetry run python3 scripts/ingest_regress_test_result-new-format.py \ - --reference ${GITHUB_REF} \ - --revision ${COMMIT_SHA} \ - --run-id ${GITHUB_RUN_ID} \ - --run-attempt ${GITHUB_RUN_ATTEMPT} \ - --test-cases-dir ./test-cases - coverage-report: runs-on: [ self-hosted, gen3, small ] container: From 52c2c693510bc9975817354350108623d3fbef8e Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Tue, 15 Aug 2023 19:24:23 +0300 Subject: [PATCH 03/40] fsync directory before mark file removal (#4986) ## Problem Deletions can be possibly reordered. Use fsync to avoid the case when mark file doesnt exist but other tenant/timeline files do. See added comments. resolves #4987 --- libs/utils/src/crashsafe.rs | 4 ++++ pageserver/src/tenant/delete.rs | 13 +++++++++++++ .../src/tenant/remote_timeline_client/download.rs | 8 ++------ pageserver/src/tenant/timeline/delete.rs | 11 +++++++++++ test_runner/fixtures/pageserver/utils.py | 2 +- 5 files changed, 31 insertions(+), 7 deletions(-) diff --git a/libs/utils/src/crashsafe.rs b/libs/utils/src/crashsafe.rs index 2c7e6e20ab..fd20d2d2ed 100644 --- a/libs/utils/src/crashsafe.rs +++ b/libs/utils/src/crashsafe.rs @@ -111,6 +111,10 @@ pub fn fsync(path: &Path) -> io::Result<()> { .map_err(|e| io::Error::new(e.kind(), format!("Failed to fsync file {path:?}: {e}"))) } +pub async fn fsync_async(path: impl AsRef) -> Result<(), std::io::Error> { + tokio::fs::File::open(path).await?.sync_all().await +} + #[cfg(test)] mod tests { use tempfile::tempdir; diff --git a/pageserver/src/tenant/delete.rs b/pageserver/src/tenant/delete.rs index 38fc31f69c..4f34f3c113 100644 --- a/pageserver/src/tenant/delete.rs +++ b/pageserver/src/tenant/delete.rs @@ -212,6 +212,19 @@ async fn cleanup_remaining_fs_traces( ))? }); + // Make sure previous deletions are ordered before mark removal. + // Otherwise there is no guarantee that they reach the disk before mark deletion. + // So its possible for mark to reach disk first and for other deletions + // to be reordered later and thus missed if a crash occurs. + // Note that we dont need to sync after mark file is removed + // because we can tolerate the case when mark file reappears on startup. + let tenant_path = &conf.tenant_path(tenant_id); + if tenant_path.exists() { + crashsafe::fsync_async(&conf.tenant_path(tenant_id)) + .await + .context("fsync_pre_mark_remove")?; + } + rm(conf.tenant_deleted_mark_file_path(tenant_id), false).await?; fail::fail_point!("tenant-delete-before-remove-tenant-dir", |_| { diff --git a/pageserver/src/tenant/remote_timeline_client/download.rs b/pageserver/src/tenant/remote_timeline_client/download.rs index 0a6fd03887..7426ae10e9 100644 --- a/pageserver/src/tenant/remote_timeline_client/download.rs +++ b/pageserver/src/tenant/remote_timeline_client/download.rs @@ -11,7 +11,7 @@ use std::time::Duration; use anyhow::{anyhow, Context}; use tokio::fs; use tokio::io::AsyncWriteExt; -use utils::backoff; +use utils::{backoff, crashsafe}; use crate::config::PageServerConf; use crate::tenant::storage_layer::LayerFileName; @@ -23,10 +23,6 @@ use utils::id::{TenantId, TimelineId}; use super::index::{IndexPart, LayerFileMetadata}; use super::{FAILED_DOWNLOAD_WARN_THRESHOLD, FAILED_REMOTE_OP_RETRIES}; -async fn fsync_path(path: impl AsRef) -> Result<(), std::io::Error> { - fs::File::open(path).await?.sync_all().await -} - static MAX_DOWNLOAD_DURATION: Duration = Duration::from_secs(120); /// @@ -150,7 +146,7 @@ pub async fn download_layer_file<'a>( }) .map_err(DownloadError::Other)?; - fsync_path(&local_path) + crashsafe::fsync_async(&local_path) .await .with_context(|| format!("Could not fsync layer file {}", local_path.display(),)) .map_err(DownloadError::Other)?; diff --git a/pageserver/src/tenant/timeline/delete.rs b/pageserver/src/tenant/timeline/delete.rs index dba6475c27..d3d9c8a082 100644 --- a/pageserver/src/tenant/timeline/delete.rs +++ b/pageserver/src/tenant/timeline/delete.rs @@ -279,6 +279,17 @@ async fn cleanup_remaining_timeline_fs_traces( Err(anyhow::anyhow!("failpoint: timeline-delete-after-rm-dir"))? }); + // Make sure previous deletions are ordered before mark removal. + // Otherwise there is no guarantee that they reach the disk before mark deletion. + // So its possible for mark to reach disk first and for other deletions + // to be reordered later and thus missed if a crash occurs. + // Note that we dont need to sync after mark file is removed + // because we can tolerate the case when mark file reappears on startup. + let timeline_path = conf.timelines_path(&tenant_id); + crashsafe::fsync_async(timeline_path) + .await + .context("fsync_pre_mark_remove")?; + // Remove delete mark tokio::fs::remove_file(conf.timeline_delete_mark_file_path(tenant_id, timeline_id)) .await diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index 6032ff5b68..3b95990a57 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -284,4 +284,4 @@ MANY_SMALL_LAYERS_TENANT_CONFIG = { def poll_for_remote_storage_iterations(remote_storage_kind: RemoteStorageKind) -> int: - return 20 if remote_storage_kind is RemoteStorageKind.REAL_S3 else 8 + return 30 if remote_storage_kind is RemoteStorageKind.REAL_S3 else 10 From 13adc83fc3bcc89009570b9413929202de2c7812 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 14 Aug 2023 23:00:00 +0300 Subject: [PATCH 04/40] Allow to enable http/pg/pg tenant only auth separately in safekeeper. The same option enables auth and specifies public key, so this allows to use different public keys as well. The motivation is to 1) Allow to e.g. change pageserver key/token without replacing all compute tokens. 2) Enable auth gradually. --- control_plane/src/safekeeper.rs | 23 +++++++++---- safekeeper/src/bin/safekeeper.rs | 56 +++++++++++++++++++++++++++----- safekeeper/src/handler.rs | 28 ++++++++-------- safekeeper/src/http/routes.rs | 4 +-- safekeeper/src/lib.rs | 8 +++-- safekeeper/src/wal_service.rs | 22 ++++++++----- 6 files changed, 98 insertions(+), 43 deletions(-) diff --git a/control_plane/src/safekeeper.rs b/control_plane/src/safekeeper.rs index be0192d137..961df38ea2 100644 --- a/control_plane/src/safekeeper.rs +++ b/control_plane/src/safekeeper.rs @@ -161,14 +161,23 @@ impl SafekeeperNode { let key_path = self.env.base_data_dir.join("auth_public_key.pem"); if self.conf.auth_enabled { + let key_path_string = key_path + .to_str() + .with_context(|| { + format!("Key path {key_path:?} cannot be represented as a unicode string") + })? + .to_owned(); args.extend([ - "--auth-validation-public-key-path".to_owned(), - key_path - .to_str() - .with_context(|| { - format!("Key path {key_path:?} cannot be represented as a unicode string") - })? - .to_owned(), + "--pg-auth-public-key-path".to_owned(), + key_path_string.clone(), + ]); + args.extend([ + "--pg-tenant-only-auth-public-key-path".to_owned(), + key_path_string.clone(), + ]); + args.extend([ + "--http-auth-public-key-path".to_owned(), + key_path_string.clone(), ]); } diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index acc717294b..b04076ab59 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -122,9 +122,21 @@ struct Args { /// WAL backup horizon. #[arg(long)] disable_wal_backup: bool, - /// Path to a .pem public key which is used to check JWT tokens. - #[arg(long)] - auth_validation_public_key_path: Option, + /// If given, enables auth on incoming connections to WAL service endpoint + /// (--listen-pg). Value specifies path to a .pem public key used for + /// validations of JWT tokens. + #[arg(long, verbatim_doc_comment)] + pg_auth_public_key_path: Option, + /// If given, enables auth on incoming connections to tenant only WAL + /// service endpoint (--listen-pg-tenant-only). Value specifies path to a + /// .pem public key used for validations of JWT tokens. + #[arg(long, verbatim_doc_comment)] + pg_tenant_only_auth_public_key_path: Option, + /// If given, enables auth on incoming connections to http management + /// service endpoint (--listen-http). Value specifies path to a .pem public + /// key used for validations of JWT tokens. + #[arg(long, verbatim_doc_comment)] + http_auth_public_key_path: Option, /// Format for logging, either 'plain' or 'json'. #[arg(long, default_value = "plain")] log_format: String, @@ -170,13 +182,37 @@ async fn main() -> anyhow::Result<()> { return Ok(()); } - let auth = match args.auth_validation_public_key_path.as_ref() { + let pg_auth = match args.pg_auth_public_key_path.as_ref() { None => { - info!("auth is disabled"); + info!("pg auth is disabled"); None } Some(path) => { - info!("loading JWT auth key from {}", path.display()); + info!("loading pg auth JWT key from {}", path.display()); + Some(Arc::new( + JwtAuth::from_key_path(path).context("failed to load the auth key")?, + )) + } + }; + let pg_tenant_only_auth = match args.pg_tenant_only_auth_public_key_path.as_ref() { + None => { + info!("pg tenant only auth is disabled"); + None + } + Some(path) => { + info!("loading pg tenant only auth JWT key from {}", path.display()); + Some(Arc::new( + JwtAuth::from_key_path(path).context("failed to load the auth key")?, + )) + } + }; + let http_auth = match args.http_auth_public_key_path.as_ref() { + None => { + info!("http auth is disabled"); + None + } + Some(path) => { + info!("loading http auth JWT key from {}", path.display()); Some(Arc::new( JwtAuth::from_key_path(path).context("failed to load the auth key")?, )) @@ -199,7 +235,9 @@ async fn main() -> anyhow::Result<()> { max_offloader_lag_bytes: args.max_offloader_lag, wal_backup_enabled: !args.disable_wal_backup, backup_parallel_jobs: args.wal_backup_parallel_jobs, - auth, + pg_auth, + pg_tenant_only_auth, + http_auth, current_thread_runtime: args.current_thread_runtime, }; @@ -288,7 +326,7 @@ async fn start_safekeeper(conf: SafeKeeperConf) -> Result<()> { .spawn(wal_service::task_main( conf_, pg_listener, - Some(Scope::SafekeeperData), + Scope::SafekeeperData, )) // wrap with task name for error reporting .map(|res| ("WAL service main".to_owned(), res)); @@ -302,7 +340,7 @@ async fn start_safekeeper(conf: SafeKeeperConf) -> Result<()> { .spawn(wal_service::task_main( conf_, pg_listener_tenant_only, - Some(Scope::Tenant), + Scope::Tenant, )) // wrap with task name for error reporting .map(|res| ("WAL service tenant only main".to_owned(), res)); diff --git a/safekeeper/src/handler.rs b/safekeeper/src/handler.rs index 136d62b321..134331c673 100644 --- a/safekeeper/src/handler.rs +++ b/safekeeper/src/handler.rs @@ -4,6 +4,7 @@ use anyhow::Context; use std::str::FromStr; use std::str::{self}; +use std::sync::Arc; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::{info, info_span, Instrument}; @@ -20,7 +21,7 @@ use postgres_backend::{self, PostgresBackend}; use postgres_ffi::PG_TLI; use pq_proto::{BeMessage, FeStartupPacket, RowDescriptor, INT4_OID, TEXT_OID}; use regex::Regex; -use utils::auth::{Claims, Scope}; +use utils::auth::{Claims, JwtAuth, Scope}; use utils::{ id::{TenantId, TenantTimelineId, TimelineId}, lsn::Lsn, @@ -36,8 +37,8 @@ pub struct SafekeeperPostgresHandler { pub ttid: TenantTimelineId, /// Unique connection id is logged in spans for observability. pub conn_id: ConnectionId, - /// Auth scope allowed on the connections. None if auth is not configured. - allowed_auth_scope: Option, + /// Auth scope allowed on the connections and public key used to check auth tokens. None if auth is not configured. + auth: Option<(Scope, Arc)>, claims: Option, io_metrics: Option, } @@ -154,18 +155,17 @@ impl postgres_backend::Handler ) -> Result<(), QueryError> { // this unwrap is never triggered, because check_auth_jwt only called when auth_type is NeonJWT // which requires auth to be present - let data = self - .conf + let (allowed_auth_scope, auth) = self .auth .as_ref() - .unwrap() - .decode(str::from_utf8(jwt_response).context("jwt response is not UTF-8")?)?; + .expect("auth_type is configured but .auth of handler is missing"); + let data = + auth.decode(str::from_utf8(jwt_response).context("jwt response is not UTF-8")?)?; - let scope = self - .allowed_auth_scope - .expect("auth is enabled but scope is not configured"); // The handler might be configured to allow only tenant scope tokens. - if matches!(scope, Scope::Tenant) && !matches!(data.claims.scope, Scope::Tenant) { + if matches!(allowed_auth_scope, Scope::Tenant) + && !matches!(data.claims.scope, Scope::Tenant) + { return Err(QueryError::Other(anyhow::anyhow!( "passed JWT token is for full access, but only tenant scope is allowed" ))); @@ -244,7 +244,7 @@ impl SafekeeperPostgresHandler { conf: SafeKeeperConf, conn_id: u32, io_metrics: Option, - allowed_auth_scope: Option, + auth: Option<(Scope, Arc)>, ) -> Self { SafekeeperPostgresHandler { conf, @@ -254,7 +254,7 @@ impl SafekeeperPostgresHandler { ttid: TenantTimelineId::empty(), conn_id, claims: None, - allowed_auth_scope, + auth, io_metrics, } } @@ -262,7 +262,7 @@ impl SafekeeperPostgresHandler { // when accessing management api supply None as an argument // when using to authorize tenant pass corresponding tenant id fn check_permission(&self, tenant_id: Option) -> anyhow::Result<()> { - if self.conf.auth.is_none() { + if self.auth.is_none() { // auth is set to Trust, nothing to check so just return ok return Ok(()); } diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 5cd0973ad6..411bfa295c 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -359,7 +359,7 @@ async fn dump_debug_handler(mut request: Request) -> Result /// Safekeeper http router. pub fn make_router(conf: SafeKeeperConf) -> RouterBuilder { let mut router = endpoint::make_router(); - if conf.auth.is_some() { + if conf.http_auth.is_some() { router = router.middleware(auth_middleware(|request| { #[allow(clippy::mutable_key_type)] static ALLOWLIST_ROUTES: Lazy> = @@ -375,7 +375,7 @@ pub fn make_router(conf: SafeKeeperConf) -> RouterBuilder // NB: on any changes do not forget to update the OpenAPI spec // located nearby (/safekeeper/src/http/openapi_spec.yaml). - let auth = conf.auth.clone(); + let auth = conf.http_auth.clone(); router .data(Arc::new(conf)) .data(auth) diff --git a/safekeeper/src/lib.rs b/safekeeper/src/lib.rs index 490af1de95..f8adb86250 100644 --- a/safekeeper/src/lib.rs +++ b/safekeeper/src/lib.rs @@ -65,7 +65,9 @@ pub struct SafeKeeperConf { pub max_offloader_lag_bytes: u64, pub backup_parallel_jobs: usize, pub wal_backup_enabled: bool, - pub auth: Option>, + pub pg_auth: Option>, + pub pg_tenant_only_auth: Option>, + pub http_auth: Option>, pub current_thread_runtime: bool, } @@ -99,7 +101,9 @@ impl SafeKeeperConf { broker_keepalive_interval: Duration::from_secs(5), wal_backup_enabled: true, backup_parallel_jobs: 1, - auth: None, + pg_auth: None, + pg_tenant_only_auth: None, + http_auth: None, heartbeat_timeout: Duration::new(5, 0), max_offloader_lag_bytes: defaults::DEFAULT_MAX_OFFLOADER_LAG_BYTES, current_thread_runtime: false, diff --git a/safekeeper/src/wal_service.rs b/safekeeper/src/wal_service.rs index 43e870e621..9fabaa79fb 100644 --- a/safekeeper/src/wal_service.rs +++ b/safekeeper/src/wal_service.rs @@ -16,10 +16,13 @@ use crate::SafeKeeperConf; use postgres_backend::{AuthType, PostgresBackend}; /// Accept incoming TCP connections and spawn them into a background thread. +/// allowed_auth_scope is either SafekeeperData (wide JWT tokens giving access +/// to any tenant are allowed) or Tenant (only tokens giving access to specific +/// tenant are allowed). Doesn't matter if auth is disabled in conf. pub async fn task_main( conf: SafeKeeperConf, pg_listener: std::net::TcpListener, - allowed_auth_scope: Option, + allowed_auth_scope: Scope, ) -> anyhow::Result<()> { // Tokio's from_std won't do this for us, per its comment. pg_listener.set_nonblocking(true)?; @@ -50,7 +53,7 @@ async fn handle_socket( socket: TcpStream, conf: SafeKeeperConf, conn_id: ConnectionId, - allowed_auth_scope: Option, + allowed_auth_scope: Scope, ) -> Result<(), QueryError> { socket.set_nodelay(true)?; let peer_addr = socket.peer_addr()?; @@ -82,16 +85,17 @@ async fn handle_socket( }, ); - let auth_type = match conf.auth { + let auth_key = match allowed_auth_scope { + Scope::Tenant => conf.pg_tenant_only_auth.clone(), + _ => conf.pg_auth.clone(), + }; + let auth_type = match auth_key { None => AuthType::Trust, Some(_) => AuthType::NeonJWT, }; - let mut conn_handler = SafekeeperPostgresHandler::new( - conf, - conn_id, - Some(traffic_metrics.clone()), - allowed_auth_scope, - ); + let auth_pair = auth_key.map(|key| (allowed_auth_scope, key)); + let mut conn_handler = + SafekeeperPostgresHandler::new(conf, conn_id, Some(traffic_metrics.clone()), auth_pair); let pgbackend = PostgresBackend::new_from_io(socket, peer_addr, auth_type, None)?; // libpq protocol between safekeeper and walproposer / pageserver // We don't use shutdown. From 4687b2e5975a4038f66424ac59d3217fb895eb16 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 15 Aug 2023 17:58:42 +0300 Subject: [PATCH 05/40] Test that auth on pg/http services can be enabled separately in sks. To this end add 1) -e option to 'neon_local safekeeper start' command appending extra options to safekeeper invocation; 2) Allow multiple occurrences of the same option in safekeepers, the last value is taken. 3) Allow to specify empty string for *-auth-public-key-path opts, it disables auth for the service. --- control_plane/src/bin/neon_local.rs | 29 +++++++++++-- control_plane/src/safekeeper.rs | 4 +- safekeeper/src/bin/safekeeper.rs | 53 ++++++++++++++++++++---- test_runner/fixtures/neon_fixtures.py | 16 +++++-- test_runner/regress/test_wal_acceptor.py | 31 +++++++++++++- 5 files changed, 116 insertions(+), 17 deletions(-) diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index 8f71cb65e2..ef308cb2d2 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -825,6 +825,16 @@ fn get_safekeeper(env: &local_env::LocalEnv, id: NodeId) -> Result Vec { + init_match + .get_many::("safekeeper-extra-opt") + .into_iter() + .flatten() + .map(|s| s.to_owned()) + .collect() +} + fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> { let (sub_name, sub_args) = match sub_match.subcommand() { Some(safekeeper_command_data) => safekeeper_command_data, @@ -841,7 +851,9 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul match sub_name { "start" => { - if let Err(e) = safekeeper.start() { + let extra_opts = safekeeper_extra_opts(sub_args); + + if let Err(e) = safekeeper.start(extra_opts) { eprintln!("safekeeper start failed: {}", e); exit(1); } @@ -866,7 +878,8 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul exit(1); } - if let Err(e) = safekeeper.start() { + let extra_opts = safekeeper_extra_opts(sub_args); + if let Err(e) = safekeeper.start(extra_opts) { eprintln!("safekeeper start failed: {}", e); exit(1); } @@ -893,7 +906,7 @@ fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow for node in env.safekeepers.iter() { let safekeeper = SafekeeperNode::from_env(env, node); - if let Err(e) = safekeeper.start() { + if let Err(e) = safekeeper.start(vec![]) { eprintln!("safekeeper {} start failed: {:#}", safekeeper.id, e); try_stop_all(env, false); exit(1); @@ -956,6 +969,14 @@ fn cli() -> Command { let safekeeper_id_arg = Arg::new("id").help("safekeeper id").required(false); + let safekeeper_extra_opt_arg = Arg::new("safekeeper-extra-opt") + .short('e') + .long("safekeeper-extra-opt") + .num_args(1) + .action(ArgAction::Append) + .help("Additional safekeeper invocation options, e.g. -e=--http-auth-public-key-path=foo") + .required(false); + let tenant_id_arg = Arg::new("tenant-id") .long("tenant-id") .help("Tenant id. Represented as a hexadecimal string 32 symbols length") @@ -1124,6 +1145,7 @@ fn cli() -> Command { .subcommand(Command::new("start") .about("Start local safekeeper") .arg(safekeeper_id_arg.clone()) + .arg(safekeeper_extra_opt_arg.clone()) ) .subcommand(Command::new("stop") .about("Stop local safekeeper") @@ -1134,6 +1156,7 @@ fn cli() -> Command { .about("Restart local safekeeper") .arg(safekeeper_id_arg) .arg(stop_mode_arg.clone()) + .arg(safekeeper_extra_opt_arg) ) ) .subcommand( diff --git a/control_plane/src/safekeeper.rs b/control_plane/src/safekeeper.rs index 961df38ea2..eb8fe1af17 100644 --- a/control_plane/src/safekeeper.rs +++ b/control_plane/src/safekeeper.rs @@ -101,7 +101,7 @@ impl SafekeeperNode { self.datadir_path().join("safekeeper.pid") } - pub fn start(&self) -> anyhow::Result { + pub fn start(&self, extra_opts: Vec) -> anyhow::Result { print!( "Starting safekeeper at '{}' in '{}'", self.pg_connection_config.raw_address(), @@ -181,6 +181,8 @@ impl SafekeeperNode { ]); } + args.extend(extra_opts); + background_process::start_process( &format!("safekeeper-{id}"), &datadir, diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index b04076ab59..8d2201b0eb 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -15,6 +15,7 @@ use toml_edit::Document; use std::fs::{self, File}; use std::io::{ErrorKind, Write}; use std::path::{Path, PathBuf}; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use storage_broker::Uri; @@ -124,18 +125,21 @@ struct Args { disable_wal_backup: bool, /// If given, enables auth on incoming connections to WAL service endpoint /// (--listen-pg). Value specifies path to a .pem public key used for - /// validations of JWT tokens. - #[arg(long, verbatim_doc_comment)] + /// validations of JWT tokens. Empty string is allowed and means disabling + /// auth. + #[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)] pg_auth_public_key_path: Option, /// If given, enables auth on incoming connections to tenant only WAL /// service endpoint (--listen-pg-tenant-only). Value specifies path to a - /// .pem public key used for validations of JWT tokens. - #[arg(long, verbatim_doc_comment)] + /// .pem public key used for validations of JWT tokens. Empty string is + /// allowed and means disabling auth. + #[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)] pg_tenant_only_auth_public_key_path: Option, /// If given, enables auth on incoming connections to http management /// service endpoint (--listen-http). Value specifies path to a .pem public - /// key used for validations of JWT tokens. - #[arg(long, verbatim_doc_comment)] + /// key used for validations of JWT tokens. Empty string is allowed and + /// means disabling auth. + #[arg(long, verbatim_doc_comment, value_parser = opt_pathbuf_parser)] http_auth_public_key_path: Option, /// Format for logging, either 'plain' or 'json'. #[arg(long, default_value = "plain")] @@ -146,9 +150,39 @@ struct Args { current_thread_runtime: bool, } +// Like PathBufValueParser, but allows empty string. +fn opt_pathbuf_parser(s: &str) -> Result { + Ok(PathBuf::from_str(s).unwrap()) +} + #[tokio::main(flavor = "current_thread")] async fn main() -> anyhow::Result<()> { - let args = Args::parse(); + // We want to allow multiple occurences of the same arg (taking the last) so + // that neon_local could generate command with defaults + overrides without + // getting 'argument cannot be used multiple times' error. This seems to be + // impossible with pure Derive API, so convert struct to Command, modify it, + // parse arguments, and then fill the struct back. + let cmd = ::command().args_override_self(true); + let mut matches = cmd.get_matches(); + let mut args = ::from_arg_matches_mut(&mut matches)?; + + // I failed to modify opt_pathbuf_parser to return Option in + // reasonable time, so turn empty string into option post factum. + if let Some(pb) = &args.pg_auth_public_key_path { + if pb.as_os_str().is_empty() { + args.pg_auth_public_key_path = None; + } + } + if let Some(pb) = &args.pg_tenant_only_auth_public_key_path { + if pb.as_os_str().is_empty() { + args.pg_tenant_only_auth_public_key_path = None; + } + } + if let Some(pb) = &args.http_auth_public_key_path { + if pb.as_os_str().is_empty() { + args.http_auth_public_key_path = None; + } + } if let Some(addr) = args.dump_control_file { let state = control_file::FileStorage::load_control_file(addr)?; @@ -200,7 +234,10 @@ async fn main() -> anyhow::Result<()> { None } Some(path) => { - info!("loading pg tenant only auth JWT key from {}", path.display()); + info!( + "loading pg tenant only auth JWT key from {}", + path.display() + ); Some(Arc::new( JwtAuth::from_key_path(path).context("failed to load the auth key")?, )) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 7c7ca94163..62c5bd9ba9 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -1313,12 +1313,20 @@ class NeonCli(AbstractNeonCli): log.info(f"Stopping pageserver with {cmd}") return self.raw_cli(cmd) - def safekeeper_start(self, id: int) -> "subprocess.CompletedProcess[str]": + def safekeeper_start( + self, id: int, extra_opts: Optional[List[str]] = None + ) -> "subprocess.CompletedProcess[str]": s3_env_vars = None if self.env.remote_storage is not None and isinstance(self.env.remote_storage, S3Storage): s3_env_vars = self.env.remote_storage.access_env_vars() - return self.raw_cli(["safekeeper", "start", str(id)], extra_env_vars=s3_env_vars) + if extra_opts is not None: + extra_opts = [f"-e={opt}" for opt in extra_opts] + else: + extra_opts = [] + return self.raw_cli( + ["safekeeper", "start", str(id), *extra_opts], extra_env_vars=s3_env_vars + ) def safekeeper_stop( self, id: Optional[int] = None, immediate=False @@ -2507,9 +2515,9 @@ class Safekeeper: id: int running: bool = False - def start(self) -> "Safekeeper": + def start(self, extra_opts: Optional[List[str]] = None) -> "Safekeeper": assert self.running is False - self.env.neon_cli.safekeeper_start(self.id) + self.env.neon_cli.safekeeper_start(self.id, extra_opts=extra_opts) self.running = True # wait for wal acceptor start by checking its status started_at = time.time() diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 9132efe79f..3b6a6c5ceb 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -912,7 +912,7 @@ def test_start_replication_term(neon_env_builder: NeonEnvBuilder): assert "failed to acquire term 3" in str(excinfo.value) -# Test auth on WAL service (postgres protocol) ports. +# Test auth on all ports: WAL service (postgres protocol), WAL service tenant only and http. def test_sk_auth(neon_env_builder: NeonEnvBuilder): neon_env_builder.auth_enabled = True env = neon_env_builder.init_start() @@ -946,6 +946,35 @@ def test_sk_auth(neon_env_builder: NeonEnvBuilder): with pytest.raises(psycopg2.OperationalError): connector.safe_psql("IDENTIFY_SYSTEM", port=sk.port.pg_tenant_only, password=full_token) + # Now test that auth on http/pg can be enabled separately. + + # By default, neon_local enables auth on all services if auth is configured, + # so http must require the token. + sk_http_cli_noauth = sk.http_client() + sk_http_cli_auth = sk.http_client(auth_token=env.auth_keys.generate_tenant_token(tenant_id)) + with pytest.raises(sk_http_cli_noauth.HTTPError, match="Forbidden|Unauthorized"): + sk_http_cli_noauth.timeline_status(tenant_id, timeline_id) + sk_http_cli_auth.timeline_status(tenant_id, timeline_id) + + # now, disable auth on http + sk.stop() + sk.start(extra_opts=["--http-auth-public-key-path="]) + sk_http_cli_noauth.timeline_status(tenant_id, timeline_id) # must work without token + # but pg should still require the token + with pytest.raises(psycopg2.OperationalError): + connector.safe_psql("IDENTIFY_SYSTEM", port=sk.port.pg) + connector.safe_psql("IDENTIFY_SYSTEM", port=sk.port.pg, password=tenant_token) + + # now also disable auth on pg, but leave on pg tenant only + sk.stop() + sk.start(extra_opts=["--http-auth-public-key-path=", "--pg-auth-public-key-path="]) + sk_http_cli_noauth.timeline_status(tenant_id, timeline_id) # must work without token + connector.safe_psql("IDENTIFY_SYSTEM", port=sk.port.pg) # must work without token + # but pg tenant only should still require the token + with pytest.raises(psycopg2.OperationalError): + connector.safe_psql("IDENTIFY_SYSTEM", port=sk.port.pg_tenant_only) + connector.safe_psql("IDENTIFY_SYSTEM", port=sk.port.pg_tenant_only, password=tenant_token) + class SafekeeperEnv: def __init__( From 5c836ee5b4780a4f74414789e54791c4426a370d Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 15 Aug 2023 18:14:03 +0100 Subject: [PATCH 06/40] tests: extend timeout in timeline deletion test (#4992) ## Problem This was set to 5 seconds, which was very close to how long a compaction took on my workstation, and when deletion is blocked on compaction the test would fail. We will fix this to make compactions drop out on deletion, but for the moment let's stabilize the test. ## Summary of changes Change timeout on timeline deletion in `test_timeline_deletion_with_files_stuck_in_upload_queue` from 5 seconds to 30 seconds. --- test_runner/fixtures/pageserver/utils.py | 9 +++++++-- test_runner/regress/test_remote_storage.py | 4 +++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index 3b95990a57..a2a49b8a6e 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -191,7 +191,11 @@ def wait_timeline_detail_404( tenant_id: TenantId, timeline_id: TimelineId, iterations: int, + interval: Optional[float] = None, ): + if interval is None: + interval = 0.25 + def timeline_is_missing(): data = {} try: @@ -204,7 +208,7 @@ def wait_timeline_detail_404( raise RuntimeError(f"Timeline exists state {data.get('state')}") - wait_until(iterations, interval=0.250, func=timeline_is_missing) + wait_until(iterations, interval, func=timeline_is_missing) def timeline_delete_wait_completed( @@ -212,10 +216,11 @@ def timeline_delete_wait_completed( tenant_id: TenantId, timeline_id: TimelineId, iterations: int = 20, + interval: Optional[float] = None, **delete_args, ): pageserver_http.timeline_delete(tenant_id=tenant_id, timeline_id=timeline_id, **delete_args) - wait_timeline_detail_404(pageserver_http, tenant_id, timeline_id, iterations) + wait_timeline_detail_404(pageserver_http, tenant_id, timeline_id, iterations, interval) if TYPE_CHECKING: diff --git a/test_runner/regress/test_remote_storage.py b/test_runner/regress/test_remote_storage.py index d642a6d190..502ae71cec 100644 --- a/test_runner/regress/test_remote_storage.py +++ b/test_runner/regress/test_remote_storage.py @@ -607,7 +607,9 @@ def test_timeline_deletion_with_files_stuck_in_upload_queue( ".* ERROR .*Error processing HTTP request: InternalServerError\\(timeline is Stopping" ) - timeline_delete_wait_completed(client, tenant_id, timeline_id) + # Generous timeout, because currently deletions can get blocked waiting for compaction + # This can be reduced when https://github.com/neondatabase/neon/issues/4998 is fixed. + timeline_delete_wait_completed(client, tenant_id, timeline_id, iterations=30, interval=1) assert not timeline_path.exists() From 1b97a3074cdde53d8d8e96807c5dc7d3a8e2a61a Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Tue, 15 Aug 2023 19:57:56 +0200 Subject: [PATCH 07/40] Disable neon-pool-opt-in (#4995) --- proxy/src/http/sql_over_http.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/proxy/src/http/sql_over_http.rs b/proxy/src/http/sql_over_http.rs index 33375e63e9..82e78796c6 100644 --- a/proxy/src/http/sql_over_http.rs +++ b/proxy/src/http/sql_over_http.rs @@ -44,7 +44,6 @@ const MAX_REQUEST_SIZE: u64 = 1024 * 1024; // 1 MB static RAW_TEXT_OUTPUT: HeaderName = HeaderName::from_static("neon-raw-text-output"); static ARRAY_MODE: HeaderName = HeaderName::from_static("neon-array-mode"); -static ALLOW_POOL: HeaderName = HeaderName::from_static("neon-pool-opt-in"); static TXN_ISOLATION_LEVEL: HeaderName = HeaderName::from_static("neon-batch-isolation-level"); static TXN_READ_ONLY: HeaderName = HeaderName::from_static("neon-batch-read-only"); static TXN_DEFERRABLE: HeaderName = HeaderName::from_static("neon-batch-deferrable"); @@ -194,7 +193,7 @@ pub async fn handle( let array_mode = headers.get(&ARRAY_MODE) == Some(&HEADER_VALUE_TRUE); // Allow connection pooling only if explicitly requested - let allow_pool = headers.get(&ALLOW_POOL) == Some(&HEADER_VALUE_TRUE); + let allow_pool = false; // isolation level, read only and deferrable From fdbe8dc8e0e9550ca1462d5258ca441822487b81 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 12 Jun 2023 18:03:03 +0400 Subject: [PATCH 08/40] Fix test_s3_wal_replay flakiness. ref https://github.com/neondatabase/neon/issues/4466 --- test_runner/regress/test_wal_acceptor.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 3b6a6c5ceb..6695819899 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -543,8 +543,13 @@ def test_s3_wal_replay(neon_env_builder: NeonEnvBuilder, remote_storage_kind: Re last_lsn = Lsn(query_scalar(cur, "SELECT pg_current_wal_flush_lsn()")) for sk in env.safekeepers: - # require WAL to be trimmed, so no more than one segment is left on disk - target_size_mb = 16 * 1.5 + # require WAL to be trimmed, so no more than one segment is left + # on disk + # TODO: WAL removal uses persistent values and control + # file is fsynced roughly once in a segment, so there is a small + # chance that two segments are left on disk, not one. We can + # force persist cf and have 16 instead of 32 here. + target_size_mb = 32 * 1.5 wait( partial(is_wal_trimmed, sk, tenant_id, timeline_id, target_size_mb), f"sk_id={sk.id} to trim WAL to {target_size_mb:.2f}MB", From 0f47bc03eb7a0d1cfcf65eb01a4a3f6735fe7a6a Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Wed, 16 Aug 2023 14:08:53 +0300 Subject: [PATCH 09/40] Fix delete_objects in UnreliableWrapper (#5002) For `delete_objects` it was injecting failures for whole delete_objects operation and then for every delete it contains. Make it fail once for the whole operation. --- libs/remote_storage/src/simulate_failures.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/libs/remote_storage/src/simulate_failures.rs b/libs/remote_storage/src/simulate_failures.rs index c46ca14ace..6d6a5c1d24 100644 --- a/libs/remote_storage/src/simulate_failures.rs +++ b/libs/remote_storage/src/simulate_failures.rs @@ -71,6 +71,13 @@ impl UnreliableWrapper { } } } + + async fn delete_inner(&self, path: &RemotePath, attempt: bool) -> anyhow::Result<()> { + if attempt { + self.attempt(RemoteOp::Delete(path.clone()))?; + } + self.inner.delete(path).await + } } #[async_trait::async_trait] @@ -122,15 +129,15 @@ impl RemoteStorage for UnreliableWrapper { } async fn delete(&self, path: &RemotePath) -> anyhow::Result<()> { - self.attempt(RemoteOp::Delete(path.clone()))?; - self.inner.delete(path).await + self.delete_inner(path, true).await } async fn delete_objects<'a>(&self, paths: &'a [RemotePath]) -> anyhow::Result<()> { self.attempt(RemoteOp::DeleteObjects(paths.to_vec()))?; let mut error_counter = 0; for path in paths { - if (self.delete(path).await).is_err() { + // Dont record attempt because it was already recorded above + if (self.delete_inner(path, false).await).is_err() { error_counter += 1; } } From 368b783adab98ea527f1049de237617beec46099 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Wed, 16 Aug 2023 15:41:25 +0200 Subject: [PATCH 10/40] ephemeral_file: remove FileExt impl (was only used by tests) (#5003) Extracted from https://github.com/neondatabase/neon/pull/4994 --- pageserver/src/tenant/ephemeral_file.rs | 118 +----------------------- 1 file changed, 2 insertions(+), 116 deletions(-) diff --git a/pageserver/src/tenant/ephemeral_file.rs b/pageserver/src/tenant/ephemeral_file.rs index 51841b8715..6979d19d33 100644 --- a/pageserver/src/tenant/ephemeral_file.rs +++ b/pageserver/src/tenant/ephemeral_file.rs @@ -12,13 +12,12 @@ use std::collections::HashMap; use std::fs::OpenOptions; use std::io::{self, ErrorKind}; use std::ops::DerefMut; +use std::os::unix::prelude::FileExt; use std::path::PathBuf; use std::sync::{Arc, RwLock}; use tracing::*; use utils::id::{TenantId, TimelineId}; -use std::os::unix::fs::FileExt; - /// /// This is the global cache of file descriptors (File objects). /// @@ -127,75 +126,6 @@ pub fn is_ephemeral_file(filename: &str) -> bool { } } -impl FileExt for EphemeralFile { - fn read_at(&self, dstbuf: &mut [u8], offset: u64) -> Result { - // Look up the right page - let blkno = (offset / PAGE_SZ as u64) as u32; - let off = offset as usize % PAGE_SZ; - let len = min(PAGE_SZ - off, dstbuf.len()); - - let read_guard; - let mut write_guard; - - let cache = page_cache::get(); - let buf = match cache - .read_ephemeral_buf(self.file_id, blkno) - .map_err(|e| to_io_error(e, "Failed to read ephemeral buf"))? - { - ReadBufResult::Found(guard) => { - read_guard = guard; - read_guard.as_ref() - } - ReadBufResult::NotFound(guard) => { - // Read the page from disk into the buffer - write_guard = guard; - self.fill_buffer(write_guard.deref_mut(), blkno)?; - write_guard.mark_valid(); - - // And then fall through to read the requested slice from the - // buffer. - write_guard.as_ref() - } - }; - - dstbuf[0..len].copy_from_slice(&buf[off..(off + len)]); - Ok(len) - } - - fn write_at(&self, srcbuf: &[u8], offset: u64) -> Result { - // Look up the right page - let blkno = (offset / PAGE_SZ as u64) as u32; - let off = offset as usize % PAGE_SZ; - let len = min(PAGE_SZ - off, srcbuf.len()); - - let mut write_guard; - let cache = page_cache::get(); - let buf = match cache - .write_ephemeral_buf(self.file_id, blkno) - .map_err(|e| to_io_error(e, "Failed to write ephemeral buf"))? - { - WriteBufResult::Found(guard) => { - write_guard = guard; - write_guard.deref_mut() - } - WriteBufResult::NotFound(guard) => { - // Read the page from disk into the buffer - // TODO: if we're overwriting the whole page, no need to read it in first - write_guard = guard; - self.fill_buffer(write_guard.deref_mut(), blkno)?; - write_guard.mark_valid(); - - // And then fall through to modify it. - write_guard.deref_mut() - } - }; - - buf[off..(off + len)].copy_from_slice(&srcbuf[0..len]); - write_guard.mark_dirty(); - Ok(len) - } -} - impl BlobWriter for EphemeralFile { fn write_blob(&mut self, srcbuf: &[u8]) -> Result { let pos = self.size; @@ -334,7 +264,7 @@ mod tests { use super::*; use crate::tenant::blob_io::BlobWriter; use crate::tenant::block_io::BlockCursor; - use rand::{seq::SliceRandom, thread_rng, RngCore}; + use rand::{thread_rng, RngCore}; use std::fs; use std::str::FromStr; @@ -355,50 +285,6 @@ mod tests { Ok((conf, tenant_id, timeline_id)) } - // Helper function to slurp contents of a file, starting at the current position, - // into a string - fn read_string(efile: &EphemeralFile, offset: u64, len: usize) -> Result { - let mut buf = Vec::new(); - buf.resize(len, 0u8); - - efile.read_exact_at(&mut buf, offset)?; - - Ok(String::from_utf8_lossy(&buf) - .trim_end_matches('\0') - .to_string()) - } - - #[test] - fn test_ephemeral_files() -> Result<(), io::Error> { - let (conf, tenant_id, timeline_id) = harness("ephemeral_files")?; - - let file_a = EphemeralFile::create(conf, tenant_id, timeline_id)?; - - file_a.write_all_at(b"foo", 0)?; - assert_eq!("foo", read_string(&file_a, 0, 20)?); - - file_a.write_all_at(b"bar", 3)?; - assert_eq!("foobar", read_string(&file_a, 0, 20)?); - - // Open a lot of files, enough to cause some page evictions. - let mut efiles = Vec::new(); - for fileno in 0..100 { - let efile = EphemeralFile::create(conf, tenant_id, timeline_id)?; - efile.write_all_at(format!("file {}", fileno).as_bytes(), 0)?; - assert_eq!(format!("file {}", fileno), read_string(&efile, 0, 10)?); - efiles.push((fileno, efile)); - } - - // Check that all the files can still be read from. Use them in random order for - // good measure. - efiles.as_mut_slice().shuffle(&mut thread_rng()); - for (fileno, efile) in efiles.iter_mut() { - assert_eq!(format!("file {}", fileno), read_string(efile, 0, 10)?); - } - - Ok(()) - } - #[tokio::test] async fn test_ephemeral_blobs() -> Result<(), io::Error> { let (conf, tenant_id, timeline_id) = harness("ephemeral_blobs")?; From 96b84ace897a4365a1ee6080b5e135e50363b2b1 Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Wed, 16 Aug 2023 17:31:16 +0300 Subject: [PATCH 11/40] Correctly remove orphaned objects in RemoteTimelineClient::delete_all (#5000) Previously list_prefixes was incorrectly used for that purpose. Change to use list_files. Add a test. Some drive by refactorings on python side to move helpers out of specific test file to be widely accessible resolves https://github.com/neondatabase/neon/issues/4499 --- .../src/tenant/remote_timeline_client.rs | 2 +- test_runner/fixtures/remote_storage.py | 13 ++++ test_runner/regress/test_remote_storage.py | 18 +++--- .../test_tenants_with_remote_storage.py | 36 +++-------- test_runner/regress/test_timeline_delete.py | 62 +++++++++++++++++++ 5 files changed, 91 insertions(+), 40 deletions(-) diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index 1d04d74839..f17d0f6b4d 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -851,7 +851,7 @@ impl RemoteTimelineClient { let remaining = backoff::retry( || async { self.storage_impl - .list_prefixes(Some(&timeline_storage_path)) + .list_files(Some(&timeline_storage_path)) .await }, |_e| false, diff --git a/test_runner/fixtures/remote_storage.py b/test_runner/fixtures/remote_storage.py index 1b80473377..ada2d42347 100644 --- a/test_runner/fixtures/remote_storage.py +++ b/test_runner/fixtures/remote_storage.py @@ -7,6 +7,9 @@ from pathlib import Path from typing import Dict, List, Optional, Union from fixtures.log_helper import log +from fixtures.types import TenantId, TimelineId + +TIMELINE_INDEX_PART_FILE_NAME = "index_part.json" class MockS3Server: @@ -89,6 +92,16 @@ def available_s3_storages() -> List[RemoteStorageKind]: class LocalFsStorage: root: Path + def timeline_path(self, tenant_id: TenantId, timeline_id: TimelineId) -> Path: + return self.root / "tenants" / str(tenant_id) / "timelines" / str(timeline_id) + + def index_path(self, tenant_id: TenantId, timeline_id: TimelineId) -> Path: + return self.timeline_path(tenant_id, timeline_id) / TIMELINE_INDEX_PART_FILE_NAME + + def index_content(self, tenant_id: TenantId, timeline_id: TimelineId): + with self.index_path(tenant_id, timeline_id).open("r") as f: + return json.load(f) + @dataclass class S3Storage: diff --git a/test_runner/regress/test_remote_storage.py b/test_runner/regress/test_remote_storage.py index 502ae71cec..714118a015 100644 --- a/test_runner/regress/test_remote_storage.py +++ b/test_runner/regress/test_remote_storage.py @@ -24,6 +24,7 @@ from fixtures.pageserver.utils import ( wait_until_tenant_state, ) from fixtures.remote_storage import ( + TIMELINE_INDEX_PART_FILE_NAME, LocalFsStorage, RemoteStorageKind, available_remote_storages, @@ -615,9 +616,7 @@ def test_timeline_deletion_with_files_stuck_in_upload_queue( # to please mypy assert isinstance(env.remote_storage, LocalFsStorage) - remote_timeline_path = ( - env.remote_storage.root / "tenants" / str(tenant_id) / "timelines" / str(timeline_id) - ) + remote_timeline_path = env.remote_storage.timeline_path(tenant_id, timeline_id) assert not list(remote_timeline_path.iterdir()) @@ -722,15 +721,14 @@ def test_empty_branch_remote_storage_upload_on_restart( # index upload is now hitting the failpoint, it should block the shutdown env.pageserver.stop(immediate=True) - timeline_path = ( - Path("tenants") / str(env.initial_tenant) / "timelines" / str(new_branch_timeline_id) - ) - - local_metadata = env.repo_dir / timeline_path / "metadata" + local_metadata = env.timeline_dir(env.initial_tenant, new_branch_timeline_id) / "metadata" assert local_metadata.is_file() assert isinstance(env.remote_storage, LocalFsStorage) - new_branch_on_remote_storage = env.remote_storage.root / timeline_path + + new_branch_on_remote_storage = env.remote_storage.timeline_path( + env.initial_tenant, new_branch_timeline_id + ) assert ( not new_branch_on_remote_storage.exists() ), "failpoint should had prohibited index_part.json upload" @@ -779,7 +777,7 @@ def test_empty_branch_remote_storage_upload_on_restart( assert_nothing_to_upload(client, env.initial_tenant, new_branch_timeline_id) assert ( - new_branch_on_remote_storage / "index_part.json" + new_branch_on_remote_storage / TIMELINE_INDEX_PART_FILE_NAME ).is_file(), "uploads scheduled during initial load should had been awaited for" finally: create_thread.join() diff --git a/test_runner/regress/test_tenants_with_remote_storage.py b/test_runner/regress/test_tenants_with_remote_storage.py index fee95e5420..397a2ea534 100644 --- a/test_runner/regress/test_tenants_with_remote_storage.py +++ b/test_runner/regress/test_tenants_with_remote_storage.py @@ -7,7 +7,6 @@ # import asyncio -import json import os from pathlib import Path from typing import List, Tuple @@ -225,10 +224,11 @@ def test_tenants_attached_after_download( # FIXME: test index_part.json getting downgraded from imaginary new version -@pytest.mark.parametrize("remote_storage_kind", [RemoteStorageKind.LOCAL_FS]) def test_tenant_redownloads_truncated_file_on_startup( - neon_env_builder: NeonEnvBuilder, remote_storage_kind: RemoteStorageKind + neon_env_builder: NeonEnvBuilder, ): + remote_storage_kind = RemoteStorageKind.LOCAL_FS + # since we now store the layer file length metadata, we notice on startup that a layer file is of wrong size, and proceed to redownload it. neon_env_builder.enable_remote_storage( remote_storage_kind=remote_storage_kind, @@ -237,6 +237,8 @@ def test_tenant_redownloads_truncated_file_on_startup( env = neon_env_builder.init_start() + assert isinstance(env.remote_storage, LocalFsStorage) + env.pageserver.allowed_errors.append( ".*removing local file .* because it has unexpected length.*" ) @@ -279,7 +281,7 @@ def test_tenant_redownloads_truncated_file_on_startup( (path, expected_size) = local_layer_truncated # ensure the same size is found from the index_part.json - index_part = local_fs_index_part(env, tenant_id, timeline_id) + index_part = env.remote_storage.index_content(tenant_id, timeline_id) assert index_part["layer_metadata"][path.name]["file_size"] == expected_size ## Start the pageserver. It will notice that the file size doesn't match, and @@ -309,7 +311,7 @@ def test_tenant_redownloads_truncated_file_on_startup( assert os.stat(path).st_size == expected_size, "truncated layer should had been re-downloaded" # the remote side of local_layer_truncated - remote_layer_path = local_fs_index_part_path(env, tenant_id, timeline_id).parent / path.name + remote_layer_path = env.remote_storage.timeline_path(tenant_id, timeline_id) / path.name # if the upload ever was ongoing, this check would be racy, but at least one # extra http request has been made in between so assume it's enough delay @@ -334,27 +336,3 @@ def test_tenant_redownloads_truncated_file_on_startup( assert ( os.stat(remote_layer_path).st_size == expected_size ), "truncated file should not had been uploaded after next checkpoint" - - -def local_fs_index_part(env, tenant_id, timeline_id): - """ - Return json.load parsed index_part.json of tenant and timeline from LOCAL_FS - """ - timeline_path = local_fs_index_part_path(env, tenant_id, timeline_id) - with open(timeline_path, "r") as timeline_file: - return json.load(timeline_file) - - -def local_fs_index_part_path(env, tenant_id, timeline_id): - """ - Return path to the LOCAL_FS index_part.json of the tenant and timeline. - """ - assert isinstance(env.remote_storage, LocalFsStorage) - return ( - env.remote_storage.root - / "tenants" - / str(tenant_id) - / "timelines" - / str(timeline_id) - / "index_part.json" - ) diff --git a/test_runner/regress/test_timeline_delete.py b/test_runner/regress/test_timeline_delete.py index 26caeb8ffb..a48c2186de 100644 --- a/test_runner/regress/test_timeline_delete.py +++ b/test_runner/regress/test_timeline_delete.py @@ -27,6 +27,7 @@ from fixtures.pageserver.utils import ( wait_until_timeline_state, ) from fixtures.remote_storage import ( + LocalFsStorage, RemoteStorageKind, available_remote_storages, ) @@ -762,3 +763,64 @@ def test_timeline_delete_works_for_remote_smoke( 0.5, lambda: assert_prefix_empty(neon_env_builder), ) + + +def test_delete_orphaned_objects( + neon_env_builder: NeonEnvBuilder, + pg_bin: PgBin, +): + remote_storage_kind = RemoteStorageKind.LOCAL_FS + neon_env_builder.enable_remote_storage(remote_storage_kind, "test_delete_orphaned_objects") + + env = neon_env_builder.init_start( + initial_tenant_conf={ + "gc_period": "0s", + "compaction_period": "0s", + "checkpoint_distance": f"{1024 ** 2}", + "image_creation_threshold": "100", + } + ) + + assert isinstance(env.remote_storage, LocalFsStorage) + + ps_http = env.pageserver.http_client() + + timeline_id = env.neon_cli.create_timeline("delete") + with env.endpoints.create_start("delete") as endpoint: + # generate enough layers + pg_bin.run(["pgbench", "-i", "-I dtGvp", "-s1", endpoint.connstr()]) + last_flush_lsn_upload(env, endpoint, env.initial_tenant, timeline_id) + + # write orphaned file that is missing from the index + remote_timeline_path = env.remote_storage.timeline_path(env.initial_tenant, timeline_id) + orphans = [remote_timeline_path / f"orphan_{i}" for i in range(3)] + for orphan in orphans: + orphan.write_text("I shouldnt be there") + + # trigger failpoint after orphaned file deletion to check that index_part is not deleted as well. + failpoint = "timeline-delete-before-index-delete" + ps_http.configure_failpoints((failpoint, "return")) + + env.pageserver.allowed_errors.append(f".*failpoint: {failpoint}") + + iterations = poll_for_remote_storage_iterations(remote_storage_kind) + + ps_http.timeline_delete(env.initial_tenant, timeline_id) + timeline_info = wait_until_timeline_state( + pageserver_http=ps_http, + tenant_id=env.initial_tenant, + timeline_id=timeline_id, + expected_state="Broken", + iterations=iterations, + ) + + reason = timeline_info["state"]["Broken"]["reason"] + assert reason.endswith(f"failpoint: {failpoint}"), reason + + for orphan in orphans: + assert not orphan.exists() + assert env.pageserver.log_contains( + f"deleting a file not referenced from index_part.json name={orphan.stem}" + ) + + assert env.remote_storage.index_path(env.initial_tenant, timeline_id).exists() From 0bdbc39cb192a344e7933bcde5b93b06a7c2102f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Wed, 16 Aug 2023 17:27:18 +0200 Subject: [PATCH 12/40] Compaction: unify key and value reference vecs (#4888) ## Problem PR #4839 has already reduced the number of b-tree traversals and vec creations from 3 to 2, but as pointed out in https://github.com/neondatabase/neon/pull/4839#discussion_r1279167815 , we would ideally just traverse the b-tree once during compaction. Afer #4836, the two vecs created are one for the list of keys, lsns and sizes, and one for the list of `(key, lsn, value reference)`. However, they are not equal, as pointed out in https://github.com/neondatabase/neon/pull/4839#issuecomment-1660418012 and the following comment: the key vec creation combines multiple entries for which the lsn is changing but the key stays the same into one, with the size being the sum of the sub-sizes. In SQL, this would correspond to something like `SELECT key, lsn, SUM(size) FROM b_tree GROUP BY key;` and `SELECT key, lsn, val_ref FROM b_tree;`. Therefore, the join operation is non-trivial. ## Summary of changes This PR merges the two lists of keys and value references into one. It's not a trivial change and affects the size pattern of the resulting files, which is why this is in a separate PR from #4839 . The key vec is used in compaction for determining when to start a new layer file. The loop uses various thresholds to come to this conclusion, but the grouping via the key has led to the behaviour that regardless of the threshold, it only starts a new file when either a new key is encountered, or a new delta file. The new code now does the combination after the merging and sorting of the various keys from the delta files. This *mostly* does the same as the old code, except for a detail: with the grouping done on a per-delta-layer basis, the sorted and merged vec would still have multiple entries for multiple delta files, but now, we don't have an easy way to tell when a new input delta layer file is encountered, so we cannot create multiple entries on that basis easily. To prevent possibly infinite growth, our new grouping code compares the combined size with the threshold, and if it is exceeded, it cuts a new entry so that the downstream code can cut a new output file. Here, we perform a tradeoff however, as if the threshold is too small, we risk putting entries for the same key into multiple layer files, but if the threshold is too big, we can in some instances exceed the target size. Currently, we set the threshold to the target size, so in theory we would stay below or roughly at double the `target_file_size`. We also fix the way the size was calculated for the last key. The calculation was wrong and accounted for the old layer's btree, even though we already account for the overhead of the in-construction btree. Builds on top of #4839 . --- .../src/tenant/storage_layer/delta_layer.rs | 99 ++++++++----------- pageserver/src/tenant/timeline.rs | 81 +++++++++------ test_runner/regress/test_remote_storage.py | 8 +- 3 files changed, 98 insertions(+), 90 deletions(-) diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index bd77d177b0..4324a6e9a0 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -549,25 +549,10 @@ impl DeltaLayer { &self.layer_name(), ) } - - /// Obtains all keys and value references stored in the layer + /// Loads all keys stored in the layer. Returns key, lsn, value size and value reference. /// /// The value can be obtained via the [`ValueRef::load`] function. - pub async fn load_val_refs( - &self, - ctx: &RequestContext, - ) -> Result>)>> { - let inner = self - .load(LayerAccessKind::Iter, ctx) - .await - .context("load delta layer")?; - DeltaLayerInner::load_val_refs(inner) - .await - .context("Layer index is corrupted") - } - - /// Loads all keys stored in the layer. Returns key, lsn and value size. - pub async fn load_keys(&self, ctx: &RequestContext) -> Result> { + pub async fn load_keys(&self, ctx: &RequestContext) -> Result> { let inner = self .load(LayerAccessKind::KeyIter, ctx) .await @@ -711,6 +696,17 @@ impl DeltaLayerWriterInner { .metadata() .context("get file metadata to determine size")?; + // 5GB limit for objects without multipart upload (which we don't want to use) + // Make it a little bit below to account for differing GB units + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/upload-objects.html + const S3_UPLOAD_LIMIT: u64 = 4_500_000_000; + ensure!( + metadata.len() <= S3_UPLOAD_LIMIT, + "Created delta layer file at {} of size {} above limit {S3_UPLOAD_LIMIT}!", + file.path.display(), + metadata.len() + ); + // Note: Because we opened the file in write-only mode, we cannot // reuse the same VirtualFile for reading later. That's why we don't // set inner.file here. The first read will have to re-open it. @@ -955,15 +951,17 @@ impl DeltaLayerInner { } } - pub(super) async fn load_val_refs + Clone>( + pub(super) async fn load_keys + Clone>( this: &T, - ) -> Result)>> { + ) -> Result> { let dl = this.as_ref(); let file = &dl.file; + let tree_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(dl.index_start_blk, dl.index_root_blk, file); - let mut all_offsets = Vec::<(Key, Lsn, ValueRef)>::new(); + let mut all_keys: Vec = Vec::new(); + tree_reader .visit( &[0u8; DELTA_KEY_SIZE], @@ -972,56 +970,45 @@ impl DeltaLayerInner { let delta_key = DeltaKey::from_slice(key); let val_ref = ValueRef { blob_ref: BlobRef(value), - reader: BlockCursor::new(Adapter(this.clone())), + reader: BlockCursor::new(Adapter(dl)), }; - all_offsets.push((delta_key.key(), delta_key.lsn(), val_ref)); - true - }, - ) - .await?; - - Ok(all_offsets) - } - - pub(super) async fn load_keys(&self) -> Result> { - let file = &self.file; - let tree_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new( - self.index_start_blk, - self.index_root_blk, - file, - ); - - let mut all_keys: Vec<(Key, Lsn, u64)> = Vec::new(); - tree_reader - .visit( - &[0u8; DELTA_KEY_SIZE], - VisitDirection::Forwards, - |key, value| { - let delta_key = DeltaKey::from_slice(key); let pos = BlobRef(value).pos(); if let Some(last) = all_keys.last_mut() { - if last.0 == delta_key.key() { - return true; - } else { - // subtract offset of new key BLOB and first blob of this key - // to get total size if values associated with this key - let first_pos = last.2; - last.2 = pos - first_pos; - } + // subtract offset of the current and last entries to get the size + // of the value associated with this (key, lsn) tuple + let first_pos = last.size; + last.size = pos - first_pos; } - all_keys.push((delta_key.key(), delta_key.lsn(), pos)); + let entry = DeltaEntry { + key: delta_key.key(), + lsn: delta_key.lsn(), + size: pos, + val: val_ref, + }; + all_keys.push(entry); true }, ) .await?; if let Some(last) = all_keys.last_mut() { - // Last key occupies all space till end of layer - last.2 = std::fs::metadata(&file.file.path)?.len() - last.2; + // Last key occupies all space till end of value storage, + // which corresponds to beginning of the index + last.size = dl.index_start_blk as u64 * PAGE_SZ as u64 - last.size; } Ok(all_keys) } } +/// A set of data associated with a delta layer key and its value +pub struct DeltaEntry<'a> { + pub key: Key, + pub lsn: Lsn, + /// Size of the stored value + pub size: u64, + /// Reference to the on-disk value + pub val: ValueRef<&'a DeltaLayerInner>, +} + /// Reference to an on-disk value pub struct ValueRef> { blob_ref: BlobRef, diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 2deeacdc64..502e5ed44e 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -39,6 +39,7 @@ use crate::context::{ AccessStatsBehavior, DownloadBehavior, RequestContext, RequestContextBuilder, }; use crate::tenant::remote_timeline_client::{self, index::LayerFileMetadata}; +use crate::tenant::storage_layer::delta_layer::DeltaEntry; use crate::tenant::storage_layer::{ DeltaFileName, DeltaLayerWriter, ImageFileName, ImageLayerWriter, InMemoryLayer, LayerAccessStats, LayerFileName, RemoteLayer, @@ -3312,10 +3313,10 @@ struct CompactLevel0Phase1StatsBuilder { timeline_id: Option, read_lock_acquisition_micros: DurationRecorder, read_lock_held_spawn_blocking_startup_micros: DurationRecorder, + read_lock_held_key_sort_micros: DurationRecorder, read_lock_held_prerequisites_micros: DurationRecorder, read_lock_held_compute_holes_micros: DurationRecorder, read_lock_drop_micros: DurationRecorder, - prepare_iterators_micros: DurationRecorder, write_layer_files_micros: DurationRecorder, level0_deltas_count: Option, new_deltas_count: Option, @@ -3332,10 +3333,10 @@ struct CompactLevel0Phase1Stats { timeline_id: TimelineId, read_lock_acquisition_micros: RecordedDuration, read_lock_held_spawn_blocking_startup_micros: RecordedDuration, + read_lock_held_key_sort_micros: RecordedDuration, read_lock_held_prerequisites_micros: RecordedDuration, read_lock_held_compute_holes_micros: RecordedDuration, read_lock_drop_micros: RecordedDuration, - prepare_iterators_micros: RecordedDuration, write_layer_files_micros: RecordedDuration, level0_deltas_count: usize, new_deltas_count: usize, @@ -3362,6 +3363,10 @@ impl TryFrom for CompactLevel0Phase1Stats { .read_lock_held_spawn_blocking_startup_micros .into_recorded() .ok_or_else(|| anyhow!("read_lock_held_spawn_blocking_startup_micros not set"))?, + read_lock_held_key_sort_micros: value + .read_lock_held_key_sort_micros + .into_recorded() + .ok_or_else(|| anyhow!("read_lock_held_key_sort_micros not set"))?, read_lock_held_prerequisites_micros: value .read_lock_held_prerequisites_micros .into_recorded() @@ -3374,10 +3379,6 @@ impl TryFrom for CompactLevel0Phase1Stats { .read_lock_drop_micros .into_recorded() .ok_or_else(|| anyhow!("read_lock_drop_micros not set"))?, - prepare_iterators_micros: value - .prepare_iterators_micros - .into_recorded() - .ok_or_else(|| anyhow!("prepare_iterators_micros not set"))?, write_layer_files_micros: value .write_layer_files_micros .into_recorded() @@ -3547,28 +3548,24 @@ impl Timeline { let mut heap: BinaryHeap = BinaryHeap::with_capacity(max_holes + 1); let mut prev: Option = None; - let mut all_value_refs = Vec::new(); let mut all_keys = Vec::new(); - for l in deltas_to_compact.iter() { + let downcast_deltas: Vec<_> = deltas_to_compact + .iter() + .map(|l| l.clone().downcast_delta_layer().expect("delta layer")) + .collect(); + for dl in downcast_deltas.iter() { // TODO: replace this with an await once we fully go async - let delta = l.clone().downcast_delta_layer().expect("delta layer"); - Handle::current().block_on(async { - all_value_refs.extend(delta.load_val_refs(ctx).await?); - all_keys.extend(delta.load_keys(ctx).await?); - anyhow::Ok(()) - })?; + all_keys.extend(Handle::current().block_on(DeltaLayer::load_keys(dl, ctx))?); } // The current stdlib sorting implementation is designed in a way where it is // particularly fast where the slice is made up of sorted sub-ranges. - all_value_refs.sort_by_key(|(key, lsn, _value_ref)| (*key, *lsn)); + all_keys.sort_by_key(|DeltaEntry { key, lsn, .. }| (*key, *lsn)); - // The current stdlib sorting implementation is designed in a way where it is - // particularly fast where the slice is made up of sorted sub-ranges. - all_keys.sort_by_key(|(key, lsn, _size)| (*key, *lsn)); + stats.read_lock_held_key_sort_micros = stats.read_lock_held_prerequisites_micros.till_now(); - for (next_key, _next_lsn, _size) in all_keys.iter() { + for DeltaEntry { key: next_key, .. } in all_keys.iter() { let next_key = *next_key; if let Some(prev_key) = prev { // just first fast filter @@ -3592,8 +3589,7 @@ impl Timeline { } prev = Some(next_key.next()); } - stats.read_lock_held_compute_holes_micros = - stats.read_lock_held_prerequisites_micros.till_now(); + stats.read_lock_held_compute_holes_micros = stats.read_lock_held_key_sort_micros.till_now(); drop_rlock(guard); stats.read_lock_drop_micros = stats.read_lock_held_compute_holes_micros.till_now(); let mut holes = heap.into_vec(); @@ -3602,12 +3598,26 @@ impl Timeline { // This iterator walks through all key-value pairs from all the layers // we're compacting, in key, LSN order. - let all_values_iter = all_value_refs.into_iter(); + let all_values_iter = all_keys.iter(); // This iterator walks through all keys and is needed to calculate size used by each key - let mut all_keys_iter = all_keys.into_iter(); - - stats.prepare_iterators_micros = stats.read_lock_drop_micros.till_now(); + let mut all_keys_iter = all_keys + .iter() + .map(|DeltaEntry { key, lsn, size, .. }| (*key, *lsn, *size)) + .coalesce(|mut prev, cur| { + // Coalesce keys that belong to the same key pair. + // This ensures that compaction doesn't put them + // into different layer files. + // Still limit this by the target file size, + // so that we keep the size of the files in + // check. + if prev.0 == cur.0 && prev.2 < target_file_size { + prev.2 += cur.2; + Ok(prev) + } else { + Err((prev, cur)) + } + }); // Merge the contents of all the input delta layers into a new set // of delta layers, based on the current partitioning. @@ -3662,8 +3672,11 @@ impl Timeline { // TODO remove this block_on wrapper once we fully go async Handle::current().block_on(async { - for (key, lsn, value_ref) in all_values_iter { - let value = value_ref.load().await?; + for &DeltaEntry { + key, lsn, ref val, .. + } in all_values_iter + { + let value = val.load().await?; let same_key = prev_key.map_or(false, |prev_key| prev_key == key); // We need to check key boundaries once we reach next key or end of layer with the same key if !same_key || lsn == dup_end_lsn { @@ -3764,6 +3777,16 @@ impl Timeline { // Sync layers if !new_layers.is_empty() { + // Print a warning if the created layer is larger than double the target size + let warn_limit = target_file_size * 2; + for layer in new_layers.iter() { + if layer.desc.file_size > warn_limit { + warn!( + %layer, + "created delta file of size {} larger than double of target of {target_file_size}", layer.desc.file_size + ); + } + } let mut layer_paths: Vec = new_layers.iter().map(|l| l.path()).collect(); // Fsync all the layer files and directory using multiple threads to @@ -3776,12 +3799,10 @@ impl Timeline { layer_paths.pop().unwrap(); } - stats.write_layer_files_micros = stats.prepare_iterators_micros.till_now(); + stats.write_layer_files_micros = stats.read_lock_drop_micros.till_now(); stats.new_deltas_count = Some(new_layers.len()); stats.new_deltas_size = Some(new_layers.iter().map(|l| l.desc.file_size).sum()); - drop(all_keys_iter); // So that deltas_to_compact is no longer borrowed - match TryInto::::try_into(stats) .and_then(|stats| serde_json::to_string(&stats).context("serde_json::to_string")) { diff --git a/test_runner/regress/test_remote_storage.py b/test_runner/regress/test_remote_storage.py index 714118a015..4f5b193ce2 100644 --- a/test_runner/regress/test_remote_storage.py +++ b/test_runner/regress/test_remote_storage.py @@ -270,7 +270,7 @@ def test_remote_storage_upload_queue_retries( f""" INSERT INTO foo (id, val) SELECT g, '{data}' - FROM generate_series(1, 10000) g + FROM generate_series(1, 20000) g ON CONFLICT (id) DO UPDATE SET val = EXCLUDED.val """, @@ -371,7 +371,7 @@ def test_remote_storage_upload_queue_retries( log.info("restarting postgres to validate") endpoint = env.endpoints.create_start("main", tenant_id=tenant_id) with endpoint.cursor() as cur: - assert query_scalar(cur, "SELECT COUNT(*) FROM foo WHERE val = 'd'") == 10000 + assert query_scalar(cur, "SELECT COUNT(*) FROM foo WHERE val = 'd'") == 20000 @pytest.mark.parametrize("remote_storage_kind", [RemoteStorageKind.LOCAL_FS]) @@ -419,7 +419,7 @@ def test_remote_timeline_client_calls_started_metric( f""" INSERT INTO foo (id, val) SELECT g, '{data}' - FROM generate_series(1, 10000) g + FROM generate_series(1, 20000) g ON CONFLICT (id) DO UPDATE SET val = EXCLUDED.val """, @@ -510,7 +510,7 @@ def test_remote_timeline_client_calls_started_metric( log.info("restarting postgres to validate") endpoint = env.endpoints.create_start("main", tenant_id=tenant_id) with endpoint.cursor() as cur: - assert query_scalar(cur, "SELECT COUNT(*) FROM foo WHERE val = 'd'") == 10000 + assert query_scalar(cur, "SELECT COUNT(*) FROM foo WHERE val = 'd'") == 20000 # ensure that we updated the calls_started download metric fetch_calls_started() From 25934ec1ba63c183a8d9af5facdfb0a3f22652e6 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Wed, 16 Aug 2023 17:20:28 +0100 Subject: [PATCH 13/40] proxy: reduce global conn pool contention (#4747) ## Problem As documented, the global connection pool will be high contention. ## Summary of changes Use DashMap rather than Mutex. Of note, DashMap currently uses a RwLock internally, but it's partially sharded to reduce contention by a factor of N. We could potentially use flurry which is a port of Java's concurrent hashmap, but I have no good understanding of it's performance characteristics. Dashmap is at least equivalent to hashmap but less contention. See the read heavy benchmark to analyse our expected performance I also spoke with the developer of dashmap recently, and they are working on porting the implementation to use concurrent HAMT FWIW --- Cargo.lock | 56 +++++--- Cargo.toml | 3 +- proxy/Cargo.toml | 3 +- proxy/src/http/conn_pool.rs | 179 ++++++++++++++++++++------ proxy/src/http/sql_over_http.rs | 3 +- test_runner/fixtures/neon_fixtures.py | 20 ++- test_runner/regress/test_proxy.py | 47 +++++++ 7 files changed, 247 insertions(+), 64 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c18bddfbcb..97515ca24d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -639,6 +639,12 @@ dependencies = [ "vsimd", ] +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + [[package]] name = "bincode" version = "1.3.3" @@ -1010,9 +1016,9 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] @@ -1192,15 +1198,15 @@ dependencies = [ [[package]] name = "dashmap" -version = "5.4.0" +version = "5.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" +checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d" dependencies = [ "cfg-if", - "hashbrown 0.12.3", + "hashbrown 0.14.0", "lock_api", "once_cell", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -1649,6 +1655,12 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" + [[package]] name = "hashlink" version = "0.8.2" @@ -2073,9 +2085,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -2339,9 +2351,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "oorandom" @@ -2640,7 +2652,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -2659,15 +2671,26 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.2.16", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.45.0", + "windows-targets 0.48.0", +] + +[[package]] +name = "password-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +dependencies = [ + "base64ct", + "rand_core", + "subtle", ] [[package]] @@ -2678,6 +2701,8 @@ checksum = "f0ca0b5a68607598bf3bad68f32227a8164f6254833f84eafaac409cd6746c31" dependencies = [ "digest", "hmac", + "password-hash", + "sha2", ] [[package]] @@ -3056,6 +3081,7 @@ dependencies = [ "chrono", "clap", "consumption_metrics", + "dashmap", "futures", "git-version", "hashbrown 0.13.2", diff --git a/Cargo.toml b/Cargo.toml index a0acc061fb..5eab28e2e5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,6 +54,7 @@ comfy-table = "6.1" const_format = "0.2" crc32c = "0.6" crossbeam-utils = "0.8.5" +dashmap = "5.5.0" either = "1.8" enum-map = "2.4.2" enumset = "1.0.12" @@ -88,7 +89,7 @@ opentelemetry = "0.19.0" opentelemetry-otlp = { version = "0.12.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] } opentelemetry-semantic-conventions = "0.11.0" parking_lot = "0.12" -pbkdf2 = "0.12.1" +pbkdf2 = { version = "0.12.1", features = ["simple", "std"] } pin-project-lite = "0.2" prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency prost = "0.11" diff --git a/proxy/Cargo.toml b/proxy/Cargo.toml index 438dd62315..cbab0c6f07 100644 --- a/proxy/Cargo.toml +++ b/proxy/Cargo.toml @@ -13,6 +13,7 @@ bytes = { workspace = true, features = ["serde"] } chrono.workspace = true clap.workspace = true consumption_metrics.workspace = true +dashmap.workspace = true futures.workspace = true git-version.workspace = true hashbrown.workspace = true @@ -29,7 +30,7 @@ metrics.workspace = true once_cell.workspace = true opentelemetry.workspace = true parking_lot.workspace = true -pbkdf2.workspace = true +pbkdf2 = { workspace = true, features = ["simple", "std"] } pin-project-lite.workspace = true postgres_backend.workspace = true pq_proto.workspace = true diff --git a/proxy/src/http/conn_pool.rs b/proxy/src/http/conn_pool.rs index 703632a511..9bba846d57 100644 --- a/proxy/src/http/conn_pool.rs +++ b/proxy/src/http/conn_pool.rs @@ -1,8 +1,14 @@ use anyhow::Context; use async_trait::async_trait; -use parking_lot::Mutex; +use dashmap::DashMap; +use parking_lot::RwLock; +use pbkdf2::{ + password_hash::{PasswordHashString, PasswordHasher, PasswordVerifier, SaltString}, + Params, Pbkdf2, +}; use pq_proto::StartupMessageParams; use std::fmt; +use std::sync::atomic::{self, AtomicUsize}; use std::{collections::HashMap, sync::Arc}; use tokio::time; @@ -46,19 +52,40 @@ struct ConnPoolEntry { _last_access: std::time::Instant, } -// Per-endpoint connection pool, (dbname, username) -> Vec +// Per-endpoint connection pool, (dbname, username) -> DbUserConnPool // Number of open connections is limited by the `max_conns_per_endpoint`. pub struct EndpointConnPool { - pools: HashMap<(String, String), Vec>, + pools: HashMap<(String, String), DbUserConnPool>, total_conns: usize, } +/// This is cheap and not hugely secure. +/// But probably good enough for in memory only hashes. +/// +/// Still takes 3.5ms to hash on my hardware. +/// We don't want to ruin the latency improvements of using the pool by making password verification take too long +const PARAMS: Params = Params { + rounds: 10_000, + output_length: 32, +}; + +#[derive(Default)] +pub struct DbUserConnPool { + conns: Vec, + password_hash: Option, +} + pub struct GlobalConnPool { // endpoint -> per-endpoint connection pool // // That should be a fairly conteded map, so return reference to the per-endpoint // pool as early as possible and release the lock. - global_pool: Mutex>>>, + global_pool: DashMap>>, + + /// [`DashMap::len`] iterates over all inner pools and acquires a read lock on each. + /// That seems like far too much effort, so we're using a relaxed increment counter instead. + /// It's only used for diagnostics. + global_pool_size: AtomicUsize, // Maximum number of connections per one endpoint. // Can mix different (dbname, username) connections. @@ -72,7 +99,8 @@ pub struct GlobalConnPool { impl GlobalConnPool { pub fn new(config: &'static crate::config::ProxyConfig) -> Arc { Arc::new(Self { - global_pool: Mutex::new(HashMap::new()), + global_pool: DashMap::new(), + global_pool_size: AtomicUsize::new(0), max_conns_per_endpoint: MAX_CONNS_PER_ENDPOINT, proxy_config: config, }) @@ -85,33 +113,92 @@ impl GlobalConnPool { ) -> anyhow::Result { let mut client: Option = None; + let mut hash_valid = false; if !force_new { - let pool = self.get_endpoint_pool(&conn_info.hostname).await; + let pool = self.get_or_create_endpoint_pool(&conn_info.hostname); + let mut hash = None; // find a pool entry by (dbname, username) if exists - let mut pool = pool.lock(); - let pool_entries = pool.pools.get_mut(&conn_info.db_and_user()); - if let Some(pool_entries) = pool_entries { - if let Some(entry) = pool_entries.pop() { - client = Some(entry.conn); - pool.total_conns -= 1; + { + let pool = pool.read(); + if let Some(pool_entries) = pool.pools.get(&conn_info.db_and_user()) { + if !pool_entries.conns.is_empty() { + hash = pool_entries.password_hash.clone(); + } + } + } + + // a connection exists in the pool, verify the password hash + if let Some(hash) = hash { + let pw = conn_info.password.clone(); + let validate = tokio::task::spawn_blocking(move || { + Pbkdf2.verify_password(pw.as_bytes(), &hash.password_hash()) + }) + .await?; + + // if the hash is invalid, don't error + // we will continue with the regular connection flow + if validate.is_ok() { + hash_valid = true; + let mut pool = pool.write(); + if let Some(pool_entries) = pool.pools.get_mut(&conn_info.db_and_user()) { + if let Some(entry) = pool_entries.conns.pop() { + client = Some(entry.conn); + pool.total_conns -= 1; + } + } } } } // ok return cached connection if found and establish a new one otherwise - if let Some(client) = client { + let new_client = if let Some(client) = client { if client.is_closed() { info!("pool: cached connection '{conn_info}' is closed, opening a new one"); connect_to_compute(self.proxy_config, conn_info).await } else { info!("pool: reusing connection '{conn_info}'"); - Ok(client) + return Ok(client); } } else { info!("pool: opening a new connection '{conn_info}'"); connect_to_compute(self.proxy_config, conn_info).await + }; + + match &new_client { + // clear the hash. it's no longer valid + // TODO: update tokio-postgres fork to allow access to this error kind directly + Err(err) + if hash_valid && err.to_string().contains("password authentication failed") => + { + let pool = self.get_or_create_endpoint_pool(&conn_info.hostname); + let mut pool = pool.write(); + if let Some(entry) = pool.pools.get_mut(&conn_info.db_and_user()) { + entry.password_hash = None; + } + } + // new password is valid and we should insert/update it + Ok(_) if !force_new && !hash_valid => { + let pw = conn_info.password.clone(); + let new_hash = tokio::task::spawn_blocking(move || { + let salt = SaltString::generate(rand::rngs::OsRng); + Pbkdf2 + .hash_password_customized(pw.as_bytes(), None, None, PARAMS, &salt) + .map(|s| s.serialize()) + }) + .await??; + + let pool = self.get_or_create_endpoint_pool(&conn_info.hostname); + let mut pool = pool.write(); + pool.pools + .entry(conn_info.db_and_user()) + .or_default() + .password_hash = Some(new_hash); + } + _ => {} } + + new_client } pub async fn put( @@ -119,33 +206,31 @@ impl GlobalConnPool { conn_info: &ConnInfo, client: tokio_postgres::Client, ) -> anyhow::Result<()> { - let pool = self.get_endpoint_pool(&conn_info.hostname).await; + let pool = self.get_or_create_endpoint_pool(&conn_info.hostname); // return connection to the pool - let mut total_conns; let mut returned = false; let mut per_db_size = 0; - { - let mut pool = pool.lock(); - total_conns = pool.total_conns; + let total_conns = { + let mut pool = pool.write(); - let pool_entries: &mut Vec = pool - .pools - .entry(conn_info.db_and_user()) - .or_insert_with(|| Vec::with_capacity(1)); - if total_conns < self.max_conns_per_endpoint { - pool_entries.push(ConnPoolEntry { - conn: client, - _last_access: std::time::Instant::now(), - }); + if pool.total_conns < self.max_conns_per_endpoint { + // we create this db-user entry in get, so it should not be None + if let Some(pool_entries) = pool.pools.get_mut(&conn_info.db_and_user()) { + pool_entries.conns.push(ConnPoolEntry { + conn: client, + _last_access: std::time::Instant::now(), + }); - total_conns += 1; - returned = true; - per_db_size = pool_entries.len(); + returned = true; + per_db_size = pool_entries.conns.len(); - pool.total_conns += 1; + pool.total_conns += 1; + } } - } + + pool.total_conns + }; // do logging outside of the mutex if returned { @@ -157,25 +242,35 @@ impl GlobalConnPool { Ok(()) } - async fn get_endpoint_pool(&self, endpoint: &String) -> Arc> { + fn get_or_create_endpoint_pool(&self, endpoint: &String) -> Arc> { + // fast path + if let Some(pool) = self.global_pool.get(endpoint) { + return pool.clone(); + } + + // slow path + let new_pool = Arc::new(RwLock::new(EndpointConnPool { + pools: HashMap::new(), + total_conns: 0, + })); + // find or create a pool for this endpoint let mut created = false; - let mut global_pool = self.global_pool.lock(); - let pool = global_pool + let pool = self + .global_pool .entry(endpoint.clone()) .or_insert_with(|| { created = true; - Arc::new(Mutex::new(EndpointConnPool { - pools: HashMap::new(), - total_conns: 0, - })) + new_pool }) .clone(); - let global_pool_size = global_pool.len(); - drop(global_pool); // log new global pool size if created { + let global_pool_size = self + .global_pool_size + .fetch_add(1, atomic::Ordering::Relaxed) + + 1; info!( "pool: created new pool for '{endpoint}', global pool size now {global_pool_size}" ); diff --git a/proxy/src/http/sql_over_http.rs b/proxy/src/http/sql_over_http.rs index 82e78796c6..33375e63e9 100644 --- a/proxy/src/http/sql_over_http.rs +++ b/proxy/src/http/sql_over_http.rs @@ -44,6 +44,7 @@ const MAX_REQUEST_SIZE: u64 = 1024 * 1024; // 1 MB static RAW_TEXT_OUTPUT: HeaderName = HeaderName::from_static("neon-raw-text-output"); static ARRAY_MODE: HeaderName = HeaderName::from_static("neon-array-mode"); +static ALLOW_POOL: HeaderName = HeaderName::from_static("neon-pool-opt-in"); static TXN_ISOLATION_LEVEL: HeaderName = HeaderName::from_static("neon-batch-isolation-level"); static TXN_READ_ONLY: HeaderName = HeaderName::from_static("neon-batch-read-only"); static TXN_DEFERRABLE: HeaderName = HeaderName::from_static("neon-batch-deferrable"); @@ -193,7 +194,7 @@ pub async fn handle( let array_mode = headers.get(&ARRAY_MODE) == Some(&HEADER_VALUE_TRUE); // Allow connection pooling only if explicitly requested - let allow_pool = false; + let allow_pool = headers.get(&ALLOW_POOL) == Some(&HEADER_VALUE_TRUE); // isolation level, read only and deferrable diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 62c5bd9ba9..61cd169fa3 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -1769,6 +1769,15 @@ class VanillaPostgres(PgProtocol): with open(os.path.join(self.pgdatadir, "postgresql.conf"), "a") as conf_file: conf_file.write("\n".join(options)) + def edit_hba(self, hba: List[str]): + """Prepend hba lines into pg_hba.conf file.""" + assert not self.running + with open(os.path.join(self.pgdatadir, "pg_hba.conf"), "r+") as conf_file: + data = conf_file.read() + conf_file.seek(0) + conf_file.write("\n".join(hba) + "\n") + conf_file.write(data) + def start(self, log_path: Optional[str] = None): assert not self.running self.running = True @@ -2166,15 +2175,18 @@ def static_proxy( ) -> Iterator[NeonProxy]: """Neon proxy that routes directly to vanilla postgres.""" - # For simplicity, we use the same user for both `--auth-endpoint` and `safe_psql` - vanilla_pg.start() - vanilla_pg.safe_psql("create user proxy with login superuser password 'password'") - port = vanilla_pg.default_options["port"] host = vanilla_pg.default_options["host"] dbname = vanilla_pg.default_options["dbname"] auth_endpoint = f"postgres://proxy:password@{host}:{port}/{dbname}" + # require password for 'http_auth' user + vanilla_pg.edit_hba([f"host {dbname} http_auth {host} password"]) + + # For simplicity, we use the same user for both `--auth-endpoint` and `safe_psql` + vanilla_pg.start() + vanilla_pg.safe_psql("create user proxy with login superuser password 'password'") + proxy_port = port_distributor.get_port() mgmt_port = port_distributor.get_port() http_port = port_distributor.get_port() diff --git a/test_runner/regress/test_proxy.py b/test_runner/regress/test_proxy.py index dd767e14b7..598a1bd084 100644 --- a/test_runner/regress/test_proxy.py +++ b/test_runner/regress/test_proxy.py @@ -340,3 +340,50 @@ def test_sql_over_http_batch(static_proxy: NeonProxy): assert headers["Neon-Batch-Deferrable"] == "true" assert result[0]["rows"] == [{"answer": 42}] + + +def test_sql_over_http_pool(static_proxy: NeonProxy): + static_proxy.safe_psql("create user http_auth with password 'http' superuser") + + def get_pid(status: int, pw: str) -> Any: + connstr = ( + f"postgresql://http_auth:{pw}@{static_proxy.domain}:{static_proxy.proxy_port}/postgres" + ) + response = requests.post( + f"https://{static_proxy.domain}:{static_proxy.external_http_port}/sql", + data=json.dumps( + {"query": "SELECT pid FROM pg_stat_activity WHERE state = 'active'", "params": []} + ), + headers={ + "Content-Type": "application/sql", + "Neon-Connection-String": connstr, + "Neon-Pool-Opt-In": "true", + }, + verify=str(static_proxy.test_output_dir / "proxy.crt"), + ) + assert response.status_code == status + return response.json() + + pid1 = get_pid(200, "http")["rows"][0]["pid"] + + # query should be on the same connection + rows = get_pid(200, "http")["rows"] + assert rows == [{"pid": pid1}] + + # incorrect password should not work + res = get_pid(400, "foobar") + assert "password authentication failed for user" in res["message"] + + static_proxy.safe_psql("alter user http_auth with password 'http2'") + + # after password change, should open a new connection to verify it + pid2 = get_pid(200, "http2")["rows"][0]["pid"] + assert pid1 != pid2 + + # query should be on an existing connection + pid = get_pid(200, "http2")["rows"][0]["pid"] + assert pid in [pid1, pid2] + + # old password should not work + res = get_pid(400, "http") + assert "password authentication failed for user" in res["message"] From 994411f5c2ada300d71938ca1e5a95db2cbda43a Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Wed, 16 Aug 2023 18:33:47 +0200 Subject: [PATCH 14/40] page cache: newtype the blob_io and ephemeral_file file ids (#5005) This makes it more explicit that these are different u64-sized namespaces. Re-using one in place of the other would be catastrophic. Prep for https://github.com/neondatabase/neon/pull/4994 which will eliminate the ephemeral_file::FileId and move the blob_io::FileId into page_cache. It makes sense to have this preliminary commit though, to minimize amount of new concept in #4994 and other preliminaries that depend on that work. --- pageserver/src/page_cache.rs | 32 +++++++++++++++++-------- pageserver/src/tenant/block_io.rs | 10 ++++++-- pageserver/src/tenant/ephemeral_file.rs | 23 ++++++++++++------ 3 files changed, 46 insertions(+), 19 deletions(-) diff --git a/pageserver/src/page_cache.rs b/pageserver/src/page_cache.rs index e29eb1d197..8306ce4636 100644 --- a/pageserver/src/page_cache.rs +++ b/pageserver/src/page_cache.rs @@ -53,7 +53,7 @@ use utils::{ lsn::Lsn, }; -use crate::tenant::writeback_ephemeral_file; +use crate::tenant::{block_io, ephemeral_file, writeback_ephemeral_file}; use crate::{metrics::PageCacheSizeMetrics, repository::Key}; static PAGE_CACHE: OnceCell = OnceCell::new(); @@ -98,11 +98,11 @@ enum CacheKey { lsn: Lsn, }, EphemeralPage { - file_id: u64, + file_id: ephemeral_file::FileId, blkno: u32, }, ImmutableFilePage { - file_id: u64, + file_id: block_io::FileId, blkno: u32, }, } @@ -177,9 +177,9 @@ pub struct PageCache { /// can have a separate mapping map, next to this field. materialized_page_map: RwLock>>, - ephemeral_page_map: RwLock>, + ephemeral_page_map: RwLock>, - immutable_page_map: RwLock>, + immutable_page_map: RwLock>, /// The actual buffers with their metadata. slots: Box<[Slot]>, @@ -390,20 +390,28 @@ impl PageCache { // Section 1.2: Public interface functions for working with Ephemeral pages. - pub fn read_ephemeral_buf(&self, file_id: u64, blkno: u32) -> anyhow::Result { + pub fn read_ephemeral_buf( + &self, + file_id: ephemeral_file::FileId, + blkno: u32, + ) -> anyhow::Result { let mut cache_key = CacheKey::EphemeralPage { file_id, blkno }; self.lock_for_read(&mut cache_key) } - pub fn write_ephemeral_buf(&self, file_id: u64, blkno: u32) -> anyhow::Result { + pub fn write_ephemeral_buf( + &self, + file_id: ephemeral_file::FileId, + blkno: u32, + ) -> anyhow::Result { let cache_key = CacheKey::EphemeralPage { file_id, blkno }; self.lock_for_write(&cache_key) } /// Immediately drop all buffers belonging to given file, without writeback - pub fn drop_buffers_for_ephemeral(&self, drop_file_id: u64) { + pub fn drop_buffers_for_ephemeral(&self, drop_file_id: ephemeral_file::FileId) { for slot_idx in 0..self.slots.len() { let slot = &self.slots[slot_idx]; @@ -424,14 +432,18 @@ impl PageCache { // Section 1.3: Public interface functions for working with immutable file pages. - pub fn read_immutable_buf(&self, file_id: u64, blkno: u32) -> anyhow::Result { + pub fn read_immutable_buf( + &self, + file_id: block_io::FileId, + blkno: u32, + ) -> anyhow::Result { let mut cache_key = CacheKey::ImmutableFilePage { file_id, blkno }; self.lock_for_read(&mut cache_key) } /// Immediately drop all buffers belonging to given file, without writeback - pub fn drop_buffers_for_immutable(&self, drop_file_id: u64) { + pub fn drop_buffers_for_immutable(&self, drop_file_id: block_io::FileId) { for slot_idx in 0..self.slots.len() { let slot = &self.slots[slot_idx]; diff --git a/pageserver/src/tenant/block_io.rs b/pageserver/src/tenant/block_io.rs index f25df324f4..3cc4e61a95 100644 --- a/pageserver/src/tenant/block_io.rs +++ b/pageserver/src/tenant/block_io.rs @@ -117,6 +117,12 @@ where } } static NEXT_ID: AtomicU64 = AtomicU64::new(1); +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct FileId(u64); + +fn next_file_id() -> FileId { + FileId(NEXT_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed)) +} /// An adapter for reading a (virtual) file using the page cache. /// @@ -126,7 +132,7 @@ pub struct FileBlockReader { pub file: F, /// Unique ID of this file, used as key in the page cache. - file_id: u64, + file_id: FileId, } impl FileBlockReader @@ -134,7 +140,7 @@ where F: FileExt, { pub fn new(file: F) -> Self { - let file_id = NEXT_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + let file_id = next_file_id(); FileBlockReader { file_id, file } } diff --git a/pageserver/src/tenant/ephemeral_file.rs b/pageserver/src/tenant/ephemeral_file.rs index 6979d19d33..ddce0cb152 100644 --- a/pageserver/src/tenant/ephemeral_file.rs +++ b/pageserver/src/tenant/ephemeral_file.rs @@ -23,19 +23,28 @@ use utils::id::{TenantId, TimelineId}; /// static EPHEMERAL_FILES: Lazy> = Lazy::new(|| { RwLock::new(EphemeralFiles { - next_file_id: 1, + next_file_id: FileId(1), files: HashMap::new(), }) }); -pub struct EphemeralFiles { - next_file_id: u64, +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct FileId(u64); - files: HashMap>, +impl std::fmt::Display for FileId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +pub struct EphemeralFiles { + next_file_id: FileId, + + files: HashMap>, } pub struct EphemeralFile { - file_id: u64, + file_id: FileId, _tenant_id: TenantId, _timeline_id: TimelineId, file: Arc, @@ -51,7 +60,7 @@ impl EphemeralFile { ) -> Result { let mut l = EPHEMERAL_FILES.write().unwrap(); let file_id = l.next_file_id; - l.next_file_id += 1; + l.next_file_id = FileId(l.next_file_id.0 + 1); let filename = conf .timeline_path(&tenant_id, &timeline_id) @@ -211,7 +220,7 @@ impl Drop for EphemeralFile { } } -pub fn writeback(file_id: u64, blkno: u32, buf: &[u8]) -> Result<(), io::Error> { +pub fn writeback(file_id: FileId, blkno: u32, buf: &[u8]) -> Result<(), io::Error> { if let Some(file) = EPHEMERAL_FILES.read().unwrap().files.get(&file_id) { match file.write_all_at(buf, blkno as u64 * PAGE_SZ as u64) { Ok(_) => Ok(()), From d3612ce26660750e2178a3ce30103106f993aaae Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Thu, 17 Aug 2023 11:47:31 +0300 Subject: [PATCH 15/40] delta_layer: Restore generic from last week (#5014) Restores #4937 work relating to the ability to use `ResidentDeltaLayer` (which is an Arc wrapper) in #4938 for the ValueRef's by removing the borrow from `ValueRef` and providing it from an upper layer. This should not have any functional changes, most importantly, the `main` will continue to use the borrowed `DeltaLayerInner`. It might be that I can change #4938 to be like this. If that is so, I'll gladly rip out the `Ref` and move the borrow back. But I'll first want to look at the current test failures. --- .../src/tenant/storage_layer/delta_layer.rs | 39 +++++++++++++++---- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index 4324a6e9a0..bff42a1ec2 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -552,12 +552,17 @@ impl DeltaLayer { /// Loads all keys stored in the layer. Returns key, lsn, value size and value reference. /// /// The value can be obtained via the [`ValueRef::load`] function. - pub async fn load_keys(&self, ctx: &RequestContext) -> Result> { + pub(crate) async fn load_keys( + &self, + ctx: &RequestContext, + ) -> Result>>> { let inner = self .load(LayerAccessKind::KeyIter, ctx) .await .context("load delta layer keys")?; - DeltaLayerInner::load_keys(inner) + + let inner = Ref(&**inner); + DeltaLayerInner::load_keys(&inner) .await .context("Layer index is corrupted") } @@ -953,14 +958,14 @@ impl DeltaLayerInner { pub(super) async fn load_keys + Clone>( this: &T, - ) -> Result> { + ) -> Result>> { let dl = this.as_ref(); let file = &dl.file; let tree_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(dl.index_start_blk, dl.index_root_blk, file); - let mut all_keys: Vec = Vec::new(); + let mut all_keys: Vec> = Vec::new(); tree_reader .visit( @@ -970,7 +975,7 @@ impl DeltaLayerInner { let delta_key = DeltaKey::from_slice(key); let val_ref = ValueRef { blob_ref: BlobRef(value), - reader: BlockCursor::new(Adapter(dl)), + reader: BlockCursor::new(Adapter(this.clone())), }; let pos = BlobRef(value).pos(); if let Some(last) = all_keys.last_mut() { @@ -999,14 +1004,34 @@ impl DeltaLayerInner { } } +/// Cloneable borrow wrapper to make borrows behave like smart pointers. +/// +/// Shared references are trivially copyable. This wrapper avoids (confusion) to otherwise attempt +/// cloning DeltaLayerInner. +pub(crate) struct Ref(T); + +impl<'a, T> AsRef for Ref<&'a T> { + fn as_ref(&self) -> &T { + self.0 + } +} + +impl<'a, T> Clone for Ref<&'a T> { + fn clone(&self) -> Self { + *self + } +} + +impl<'a, T> Copy for Ref<&'a T> {} + /// A set of data associated with a delta layer key and its value -pub struct DeltaEntry<'a> { +pub struct DeltaEntry> { pub key: Key, pub lsn: Lsn, /// Size of the stored value pub size: u64, /// Reference to the on-disk value - pub val: ValueRef<&'a DeltaLayerInner>, + pub val: ValueRef, } /// Reference to an on-disk value From 786c7b3708a152ba44ba54272c5f320c50a56b16 Mon Sep 17 00:00:00 2001 From: Anastasia Lubennikova Date: Fri, 11 Aug 2023 15:41:37 +0300 Subject: [PATCH 16/40] Refactor remote extensions index download. Don't download ext_index.json from s3, but instead receive it as a part of spec from control plane. This eliminates s3 access for most compute starts, and also allows us to update extensions spec on the fly --- Cargo.lock | 2 + compute_tools/src/bin/compute_ctl.rs | 6 +- compute_tools/src/compute.rs | 116 +++++------------------ compute_tools/src/extension_server.rs | 94 ++++-------------- compute_tools/src/http/api.rs | 60 +++++++++--- control_plane/src/endpoint.rs | 2 +- libs/compute_api/Cargo.toml | 3 + libs/compute_api/src/responses.rs | 1 - libs/compute_api/src/spec.rs | 54 ++++++++++- libs/compute_api/tests/cluster_spec.json | 40 +++++++- 10 files changed, 188 insertions(+), 190 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 97515ca24d..23741e2a3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -892,6 +892,8 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", + "regex", + "remote_storage", "serde", "serde_json", "serde_with", diff --git a/compute_tools/src/bin/compute_ctl.rs b/compute_tools/src/bin/compute_ctl.rs index ad747470c2..343bb41d3b 100644 --- a/compute_tools/src/bin/compute_ctl.rs +++ b/compute_tools/src/bin/compute_ctl.rs @@ -38,7 +38,7 @@ use std::fs::File; use std::panic; use std::path::Path; use std::process::exit; -use std::sync::{mpsc, Arc, Condvar, Mutex, OnceLock, RwLock}; +use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock}; use std::{thread, time::Duration}; use anyhow::{Context, Result}; @@ -147,6 +147,7 @@ fn main() -> Result<()> { match spec_json { // First, try to get cluster spec from the cli argument Some(json) => { + info!("got spec from cli argument {}", json); spec = Some(serde_json::from_str(json)?); } None => { @@ -182,6 +183,7 @@ fn main() -> Result<()> { if let Some(spec) = spec { let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?; + info!("new pspec.spec: {:?}", pspec.spec); new_state.pspec = Some(pspec); spec_set = true; } else { @@ -196,9 +198,7 @@ fn main() -> Result<()> { state: Mutex::new(new_state), state_changed: Condvar::new(), ext_remote_storage, - ext_remote_paths: OnceLock::new(), ext_download_progress: RwLock::new(HashMap::new()), - library_index: OnceLock::new(), build_tag, }; let compute = Arc::new(compute_node); diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index 03ffedc0e3..d9c71621c2 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -5,7 +5,7 @@ use std::os::unix::fs::PermissionsExt; use std::path::Path; use std::process::{Command, Stdio}; use std::str::FromStr; -use std::sync::{Condvar, Mutex, OnceLock, RwLock}; +use std::sync::{Condvar, Mutex, RwLock}; use std::time::Instant; use anyhow::{Context, Result}; @@ -14,7 +14,6 @@ use futures::future::join_all; use futures::stream::FuturesUnordered; use futures::StreamExt; use postgres::{Client, NoTls}; -use regex::Regex; use tokio; use tokio_postgres; use tracing::{error, info, instrument, warn}; @@ -60,10 +59,6 @@ pub struct ComputeNode { pub state_changed: Condvar, /// the S3 bucket that we search for extensions in pub ext_remote_storage: Option, - // (key: extension name, value: path to extension archive in remote storage) - pub ext_remote_paths: OnceLock>, - // (key: library name, value: name of extension containing this library) - pub library_index: OnceLock>, // key: ext_archive_name, value: started download time, download_completed? pub ext_download_progress: RwLock, bool)>>, pub build_tag: String, @@ -75,7 +70,6 @@ pub struct RemoteExtensionMetrics { num_ext_downloaded: u64, largest_ext_size: u64, total_ext_download_size: u64, - prep_extensions_ms: u64, } #[derive(Clone, Debug)] @@ -745,11 +739,19 @@ impl ComputeNode { pspec.timeline_id, ); + info!( + "start_compute spec.remote_extensions {:?}", + pspec.spec.remote_extensions + ); + // This part is sync, because we need to download // remote shared_preload_libraries before postgres start (if any) - { + if let Some(remote_extensions) = &pspec.spec.remote_extensions { + // First, create control files for all availale extensions + extension_server::create_control_files(remote_extensions, &self.pgbin); + let library_load_start_time = Utc::now(); - let remote_ext_metrics = self.prepare_preload_libraries(&compute_state)?; + let remote_ext_metrics = self.prepare_preload_libraries(&pspec.spec)?; let library_load_time = Utc::now() .signed_duration_since(library_load_start_time) @@ -761,7 +763,6 @@ impl ComputeNode { state.metrics.num_ext_downloaded = remote_ext_metrics.num_ext_downloaded; state.metrics.largest_ext_size = remote_ext_metrics.largest_ext_size; state.metrics.total_ext_download_size = remote_ext_metrics.total_ext_download_size; - state.metrics.prep_extensions_ms = remote_ext_metrics.prep_extensions_ms; info!( "Loading shared_preload_libraries took {:?}ms", library_load_time @@ -918,38 +919,11 @@ LIMIT 100", } } - // If remote extension storage is configured, - // download extension control files - pub async fn prepare_external_extensions(&self, compute_state: &ComputeState) -> Result<()> { - if let Some(ref ext_remote_storage) = self.ext_remote_storage { - let pspec = compute_state.pspec.as_ref().expect("spec must be set"); - let spec = &pspec.spec; - let custom_ext = spec.custom_extensions.clone().unwrap_or(Vec::new()); - info!("custom extensions: {:?}", &custom_ext); - - let (ext_remote_paths, library_index) = extension_server::get_available_extensions( - ext_remote_storage, - &self.pgbin, - &self.pgversion, - &custom_ext, - &self.build_tag, - ) - .await?; - self.ext_remote_paths - .set(ext_remote_paths) - .expect("this is the only time we set ext_remote_paths"); - self.library_index - .set(library_index) - .expect("this is the only time we set library_index"); - } - Ok(()) - } - // download an archive, unzip and place files in correct locations pub async fn download_extension( &self, - ext_name: &str, - is_library: bool, + real_ext_name: String, + ext_path: RemotePath, ) -> Result { let remote_storage = self .ext_remote_storage @@ -958,35 +932,6 @@ LIMIT 100", "Remote extensions storage is not configured", )))?; - let mut real_ext_name = ext_name; - if is_library { - // sometimes library names might have a suffix like - // library.so or library.so.3. We strip this off - // because library_index is based on the name without the file extension - let strip_lib_suffix = Regex::new(r"\.so.*").unwrap(); - let lib_raw_name = strip_lib_suffix.replace(real_ext_name, "").to_string(); - - real_ext_name = self - .library_index - .get() - .expect("must have already downloaded the library_index") - .get(&lib_raw_name) - .ok_or(DownloadError::BadInput(anyhow::anyhow!( - "library {} is not found", - lib_raw_name - )))?; - } - - let ext_path = &self - .ext_remote_paths - .get() - .expect("error accessing ext_remote_paths") - .get(real_ext_name) - .ok_or(DownloadError::BadInput(anyhow::anyhow!( - "real_ext_name {} is not found", - real_ext_name - )))?; - let ext_archive_name = ext_path.object_name().expect("bad path"); let mut first_try = false; @@ -1039,8 +984,8 @@ LIMIT 100", info!("downloading new extension {ext_archive_name}"); let download_size = extension_server::download_extension( - real_ext_name, - ext_path, + &real_ext_name, + &ext_path, remote_storage, &self.pgbin, ) @@ -1058,18 +1003,19 @@ LIMIT 100", #[tokio::main] pub async fn prepare_preload_libraries( &self, - compute_state: &ComputeState, + spec: &ComputeSpec, ) -> Result { if self.ext_remote_storage.is_none() { return Ok(RemoteExtensionMetrics { num_ext_downloaded: 0, largest_ext_size: 0, total_ext_download_size: 0, - prep_extensions_ms: 0, }); } - let pspec = compute_state.pspec.as_ref().expect("spec must be set"); - let spec = &pspec.spec; + let remote_extensions = spec + .remote_extensions + .as_ref() + .ok_or(anyhow::anyhow!("Remote extensions are not configured",))?; info!("parse shared_preload_libraries from spec.cluster.settings"); let mut libs_vec = Vec::new(); @@ -1081,6 +1027,7 @@ LIMIT 100", .collect(); } info!("parse shared_preload_libraries from provided postgresql.conf"); + // that is used in neon_local and python tests if let Some(conf) = &spec.cluster.postgresql_conf { let conf_lines = conf.split('\n').collect::>(); @@ -1101,30 +1048,16 @@ LIMIT 100", libs_vec.extend(preload_libs_vec); } - info!("Download ext_index.json, find the extension paths"); - let prep_ext_start_time = Utc::now(); - self.prepare_external_extensions(compute_state).await?; - let prep_ext_time_delta = Utc::now() - .signed_duration_since(prep_ext_start_time) - .to_std() - .unwrap() - .as_millis() as u64; - info!("Prepare extensions took {prep_ext_time_delta}ms"); - // Don't try to download libraries that are not in the index. // Assume that they are already present locally. - libs_vec.retain(|lib| { - self.library_index - .get() - .expect("error accessing ext_remote_paths") - .contains_key(lib) - }); + libs_vec.retain(|lib| remote_extensions.library_index.contains_key(lib)); info!("Downloading to shared preload libraries: {:?}", &libs_vec); let mut download_tasks = Vec::new(); for library in &libs_vec { - download_tasks.push(self.download_extension(library, true)); + let (ext_name, ext_path) = remote_extensions.get_ext(library, true)?; + download_tasks.push(self.download_extension(ext_name, ext_path)); } let results = join_all(download_tasks).await; @@ -1132,7 +1065,6 @@ LIMIT 100", num_ext_downloaded: 0, largest_ext_size: 0, total_ext_download_size: 0, - prep_extensions_ms: prep_ext_time_delta, }; for result in results { let download_size = match result { diff --git a/compute_tools/src/extension_server.rs b/compute_tools/src/extension_server.rs index 1b5e4cf974..cb54a603e0 100644 --- a/compute_tools/src/extension_server.rs +++ b/compute_tools/src/extension_server.rs @@ -73,10 +73,9 @@ More specifically, here is an example ext_index.json */ use anyhow::Context; use anyhow::{self, Result}; -use futures::future::join_all; +use compute_api::spec::RemoteExtSpec; use remote_storage::*; use serde_json; -use std::collections::HashMap; use std::io::Read; use std::num::{NonZeroU32, NonZeroUsize}; use std::path::Path; @@ -117,81 +116,6 @@ pub fn get_pg_version(pgbin: &str) -> String { panic!("Unsuported postgres version {human_version}"); } -// download control files for enabled_extensions -// return Hashmaps converting library names to extension names (library_index) -// and specifying the remote path to the archive for each extension name -pub async fn get_available_extensions( - remote_storage: &GenericRemoteStorage, - pgbin: &str, - pg_version: &str, - custom_extensions: &[String], - build_tag: &str, -) -> Result<(HashMap, HashMap)> { - let local_sharedir = Path::new(&get_pg_config("--sharedir", pgbin)).join("extension"); - let index_path = format!("{build_tag}/{pg_version}/ext_index.json"); - let index_path = RemotePath::new(Path::new(&index_path)).context("error forming path")?; - info!("download ext_index.json from: {:?}", &index_path); - - let mut download = remote_storage.download(&index_path).await?; - let mut ext_idx_buffer = Vec::new(); - download - .download_stream - .read_to_end(&mut ext_idx_buffer) - .await?; - info!("ext_index downloaded"); - - #[derive(Debug, serde::Deserialize)] - struct Index { - public_extensions: Vec, - library_index: HashMap, - extension_data: HashMap, - } - - #[derive(Debug, serde::Deserialize)] - struct ExtensionData { - control_data: HashMap, - archive_path: String, - } - - let ext_index_full = serde_json::from_slice::(&ext_idx_buffer)?; - let mut enabled_extensions = ext_index_full.public_extensions; - enabled_extensions.extend_from_slice(custom_extensions); - let mut library_index = ext_index_full.library_index; - let all_extension_data = ext_index_full.extension_data; - info!("library_index: {:?}", library_index); - - info!("enabled_extensions: {:?}", enabled_extensions); - let mut ext_remote_paths = HashMap::new(); - let mut file_create_tasks = Vec::new(); - for extension in enabled_extensions { - let ext_data = &all_extension_data[&extension]; - for (control_file, control_contents) in &ext_data.control_data { - let extension_name = control_file - .strip_suffix(".control") - .expect("control files must end in .control"); - let control_path = local_sharedir.join(control_file); - if !control_path.exists() { - ext_remote_paths.insert( - extension_name.to_string(), - RemotePath::from_string(&ext_data.archive_path)?, - ); - info!("writing file {:?}{:?}", control_path, control_contents); - file_create_tasks.push(tokio::fs::write(control_path, control_contents)); - } else { - warn!("control file {:?} exists both locally and remotely. ignoring the remote version.", control_file); - // also delete this from library index - library_index.retain(|_, value| value != extension_name); - } - } - } - let results = join_all(file_create_tasks).await; - for result in results { - result?; - } - info!("ext_remote_paths {:?}", ext_remote_paths); - Ok((ext_remote_paths, library_index)) -} - // download the archive for a given extension, // unzip it, and place files in the appropriate locations (share/lib) pub async fn download_extension( @@ -253,6 +177,22 @@ pub async fn download_extension( Ok(download_size) } +// Create extension control files from spec +pub fn create_control_files(remote_extensions: &RemoteExtSpec, pgbin: &str) { + let local_sharedir = Path::new(&get_pg_config("--sharedir", pgbin)).join("extension"); + for ext_data in remote_extensions.extension_data.values() { + for (control_name, control_content) in &ext_data.control_data { + let control_path = local_sharedir.join(control_name); + if !control_path.exists() { + info!("writing file {:?}{:?}", control_path, control_content); + std::fs::write(control_path, control_content).unwrap(); + } else { + warn!("control file {:?} exists both locally and remotely. ignoring the remote version.", control_path); + } + } + } +} + // This function initializes the necessary structs to use remote storage pub fn init_remote_storage(remote_ext_config: &str) -> anyhow::Result { #[derive(Debug, serde::Deserialize)] diff --git a/compute_tools/src/http/api.rs b/compute_tools/src/http/api.rs index 7713d2bb51..841e533a3a 100644 --- a/compute_tools/src/http/api.rs +++ b/compute_tools/src/http/api.rs @@ -13,7 +13,7 @@ use hyper::{Body, Method, Request, Response, Server, StatusCode}; use num_cpus; use serde_json; use tokio::task; -use tracing::{error, info}; +use tracing::{error, info, warn}; use tracing_utils::http::OtelName; fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse { @@ -126,6 +126,15 @@ async fn routes(req: Request, compute: &Arc) -> Response, compute: &Arc) -> Response Response::new(Body::from("OK")), + // debug only + info!("spec: {:?}", spec); + + let remote_extensions = match spec.remote_extensions.as_ref() { + Some(r) => r, + None => { + info!("no remote extensions spec was provided"); + let mut resp = Response::new(Body::from("no remote storage configured")); + *resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; + return resp; + } + }; + + remote_extensions.get_ext(&filename, is_library) + }; + + match ext { + Ok((ext_name, ext_path)) => { + match compute.download_extension(ext_name, ext_path).await { + Ok(_) => Response::new(Body::from("OK")), + Err(e) => { + error!("extension download failed: {}", e); + let mut resp = Response::new(Body::from(e.to_string())); + *resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; + resp + } + } + } Err(e) => { - error!("extension download failed: {}", e); - let mut resp = Response::new(Body::from(e.to_string())); + warn!("extension download failed to find extension: {}", e); + let mut resp = Response::new(Body::from("failed to find file")); *resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; resp } diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index 6060799458..7ebcf98ab0 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -493,7 +493,7 @@ impl Endpoint { pageserver_connstring: Some(pageserver_connstring), safekeeper_connstrings, storage_auth_token: auth_token.clone(), - custom_extensions: Some(vec![]), + remote_extensions: None, }; let spec_path = self.endpoint_path().join("spec.json"); std::fs::write(spec_path, serde_json::to_string_pretty(&spec)?)?; diff --git a/libs/compute_api/Cargo.toml b/libs/compute_api/Cargo.toml index 428d031a93..b377bd2cce 100644 --- a/libs/compute_api/Cargo.toml +++ b/libs/compute_api/Cargo.toml @@ -10,6 +10,9 @@ chrono.workspace = true serde.workspace = true serde_with.workspace = true serde_json.workspace = true +regex.workspace = true utils = { path = "../utils" } +remote_storage = { version = "0.1", path = "../remote_storage/" } + workspace_hack.workspace = true diff --git a/libs/compute_api/src/responses.rs b/libs/compute_api/src/responses.rs index b9aff7282b..92bbf79cd4 100644 --- a/libs/compute_api/src/responses.rs +++ b/libs/compute_api/src/responses.rs @@ -107,7 +107,6 @@ pub struct ComputeMetrics { pub num_ext_downloaded: u64, pub largest_ext_size: u64, // these are measured in bytes pub total_ext_download_size: u64, - pub prep_extensions_ms: u64, } /// Response of the `/computes/{compute_id}/spec` control-plane API. diff --git a/libs/compute_api/src/spec.rs b/libs/compute_api/src/spec.rs index 293f6dc294..3da922e517 100644 --- a/libs/compute_api/src/spec.rs +++ b/libs/compute_api/src/spec.rs @@ -3,11 +3,16 @@ //! The spec.json file is used to pass information to 'compute_ctl'. It contains //! all the information needed to start up the right version of PostgreSQL, //! and connect it to the storage nodes. +use std::collections::HashMap; + use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; +use regex::Regex; +use remote_storage::RemotePath; + /// String type alias representing Postgres identifier and /// intended to be used for DB / role names. pub type PgIdent = String; @@ -61,8 +66,55 @@ pub struct ComputeSpec { /// the pageserver and safekeepers. pub storage_auth_token: Option, - // list of prefixes to search for custom extensions in remote extension storage + // information about available remote extensions + pub remote_extensions: Option, +} + +#[derive(Clone, Debug, Default, Deserialize, Serialize)] +pub struct RemoteExtSpec { + pub public_extensions: Option>, pub custom_extensions: Option>, + pub library_index: HashMap, + pub extension_data: HashMap, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ExtensionData { + pub control_data: HashMap, + pub archive_path: String, +} + +impl RemoteExtSpec { + pub fn get_ext( + &self, + ext_name: &str, + is_library: bool, + ) -> anyhow::Result<(String, RemotePath)> { + let mut real_ext_name = ext_name; + if is_library { + // sometimes library names might have a suffix like + // library.so or library.so.3. We strip this off + // because library_index is based on the name without the file extension + let strip_lib_suffix = Regex::new(r"\.so.*").unwrap(); + let lib_raw_name = strip_lib_suffix.replace(real_ext_name, "").to_string(); + + real_ext_name = self + .library_index + .get(&lib_raw_name) + .ok_or(anyhow::anyhow!("library {} is not found", lib_raw_name))?; + } + + match self.extension_data.get(real_ext_name) { + Some(ext_data) => Ok(( + real_ext_name.to_string(), + RemotePath::from_string(&ext_data.archive_path)?, + )), + None => Err(anyhow::anyhow!( + "real_ext_name {} is not found", + real_ext_name + )), + } + } } #[serde_as] diff --git a/libs/compute_api/tests/cluster_spec.json b/libs/compute_api/tests/cluster_spec.json index 8f81e7b3bd..96db13a5da 100644 --- a/libs/compute_api/tests/cluster_spec.json +++ b/libs/compute_api/tests/cluster_spec.json @@ -205,5 +205,43 @@ "name": "zenith new", "new_name": "zenith \"new\"" } - ] + ], + "remote_extensions": { + "library_index": { + "anon": "anon", + "postgis-3": "postgis", + "libpgrouting-3.4": "postgis", + "postgis_raster-3": "postgis", + "postgis_sfcgal-3": "postgis", + "postgis_topology-3": "postgis", + "address_standardizer-3": "postgis" + }, + "extension_data": { + "anon": { + "archive_path": "5834329303/v15/extensions/anon.tar.zst", + "control_data": { + "anon.control": "# PostgreSQL Anonymizer (anon) extension\ncomment = ''Data anonymization tools''\ndefault_version = ''1.1.0''\ndirectory=''extension/anon''\nrelocatable = false\nrequires = ''pgcrypto''\nsuperuser = false\nmodule_pathname = ''$libdir/anon''\ntrusted = true\n" + } + }, + "postgis": { + "archive_path": "5834329303/v15/extensions/postgis.tar.zst", + "control_data": { + "postgis.control": "# postgis extension\ncomment = ''PostGIS geometry and geography spatial types and functions''\ndefault_version = ''3.3.2''\nmodule_pathname = ''$libdir/postgis-3''\nrelocatable = false\ntrusted = true\n", + "pgrouting.control": "# pgRouting Extension\ncomment = ''pgRouting Extension''\ndefault_version = ''3.4.2''\nmodule_pathname = ''$libdir/libpgrouting-3.4''\nrelocatable = true\nrequires = ''plpgsql''\nrequires = ''postgis''\ntrusted = true\n", + "postgis_raster.control": "# postgis_raster extension\ncomment = ''PostGIS raster types and functions''\ndefault_version = ''3.3.2''\nmodule_pathname = ''$libdir/postgis_raster-3''\nrelocatable = false\nrequires = postgis\ntrusted = true\n", + "postgis_sfcgal.control": "# postgis topology extension\ncomment = ''PostGIS SFCGAL functions''\ndefault_version = ''3.3.2''\nrelocatable = true\nrequires = postgis\ntrusted = true\n", + "postgis_topology.control": "# postgis topology extension\ncomment = ''PostGIS topology spatial types and functions''\ndefault_version = ''3.3.2''\nrelocatable = false\nschema = topology\nrequires = postgis\ntrusted = true\n", + "address_standardizer.control": "# address_standardizer extension\ncomment = ''Used to parse an address into constituent elements. Generally used to support geocoding address normalization step.''\ndefault_version = ''3.3.2''\nrelocatable = true\ntrusted = true\n", + "postgis_tiger_geocoder.control": "# postgis tiger geocoder extension\ncomment = ''PostGIS tiger geocoder and reverse geocoder''\ndefault_version = ''3.3.2''\nrelocatable = false\nschema = tiger\nrequires = ''postgis,fuzzystrmatch''\nsuperuser= false\ntrusted = true\n", + "address_standardizer_data_us.control": "# address standardizer us dataset\ncomment = ''Address Standardizer US dataset example''\ndefault_version = ''3.3.2''\nrelocatable = true\ntrusted = true\n" + } + } + }, + "custom_extensions": [ + "anon" + ], + "public_extensions": [ + "postgis" + ] + } } From 957af049c2a213999ce6e11dea7d480742feef7a Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Thu, 17 Aug 2023 12:07:25 +0200 Subject: [PATCH 17/40] ephemeral file: refactor write_blob impl to concentrate mutable state (#5004) Before this patch, we had the `off` and `blknum` as function-wide mutable state. Now it's contained in the `Writer` struct. The use of `push_bytes` instead of index-based filling of the buffer also makes it easier to reason about what's going on. This is prep for https://github.com/neondatabase/neon/pull/4994 --- pageserver/src/tenant/ephemeral_file.rs | 100 +++++++++++++++--------- 1 file changed, 65 insertions(+), 35 deletions(-) diff --git a/pageserver/src/tenant/ephemeral_file.rs b/pageserver/src/tenant/ephemeral_file.rs index ddce0cb152..237c17d852 100644 --- a/pageserver/src/tenant/ephemeral_file.rs +++ b/pageserver/src/tenant/ephemeral_file.rs @@ -102,7 +102,10 @@ impl EphemeralFile { Ok(()) } - fn get_buf_for_write(&self, blkno: u32) -> Result { + fn get_buf_for_write( + &self, + blkno: u32, + ) -> Result, io::Error> { // Look up the right page let cache = page_cache::get(); let mut write_guard = match cache @@ -137,50 +140,77 @@ pub fn is_ephemeral_file(filename: &str) -> bool { impl BlobWriter for EphemeralFile { fn write_blob(&mut self, srcbuf: &[u8]) -> Result { + struct Writer<'a> { + ephemeral_file: &'a mut EphemeralFile, + /// The block to which the next [`push_bytes`] will write. + blknum: u32, + /// The offset inside the block identified by [`blknum`] to which [`push_bytes`] will write. + off: usize, + /// Used by [`push_bytes`] to memoize the page cache write guard across calls to it. + memo_page_guard: MemoizedPageWriteGuard, + } + struct MemoizedPageWriteGuard { + guard: page_cache::PageWriteGuard<'static>, + /// The block number of the page in `guard`. + blknum: u32, + } + impl<'a> Writer<'a> { + fn new(ephemeral_file: &'a mut EphemeralFile) -> io::Result> { + let blknum = (ephemeral_file.size / PAGE_SZ as u64) as u32; + Ok(Writer { + blknum, + off: (ephemeral_file.size % PAGE_SZ as u64) as usize, + memo_page_guard: MemoizedPageWriteGuard { + guard: ephemeral_file.get_buf_for_write(blknum)?, + blknum, + }, + ephemeral_file, + }) + } + #[inline(always)] + fn push_bytes(&mut self, src: &[u8]) -> Result<(), io::Error> { + // `src_remaining` is the remaining bytes to be written + let mut src_remaining = src; + while !src_remaining.is_empty() { + let page = if self.memo_page_guard.blknum == self.blknum { + &mut self.memo_page_guard.guard + } else { + self.memo_page_guard.guard = + self.ephemeral_file.get_buf_for_write(self.blknum)?; + self.memo_page_guard.blknum = self.blknum; + &mut self.memo_page_guard.guard + }; + let dst_remaining = &mut page[self.off..]; + let n = min(dst_remaining.len(), src_remaining.len()); + dst_remaining[..n].copy_from_slice(&src_remaining[..n]); + self.off += n; + src_remaining = &src_remaining[n..]; + if self.off == PAGE_SZ { + // This block is done, move to next one. + self.blknum += 1; + self.off = 0; + } + } + Ok(()) + } + } + let pos = self.size; - - let mut blknum = (self.size / PAGE_SZ as u64) as u32; - let mut off = (pos % PAGE_SZ as u64) as usize; - - let mut buf = self.get_buf_for_write(blknum)?; + let mut writer = Writer::new(self)?; // Write the length field if srcbuf.len() < 0x80 { - buf[off] = srcbuf.len() as u8; - off += 1; + // short one-byte length header + let len_buf = [srcbuf.len() as u8]; + writer.push_bytes(&len_buf)?; } else { let mut len_buf = u32::to_be_bytes(srcbuf.len() as u32); len_buf[0] |= 0x80; - let thislen = PAGE_SZ - off; - if thislen < 4 { - // it needs to be split across pages - buf[off..(off + thislen)].copy_from_slice(&len_buf[..thislen]); - blknum += 1; - buf = self.get_buf_for_write(blknum)?; - buf[0..4 - thislen].copy_from_slice(&len_buf[thislen..]); - off = 4 - thislen; - } else { - buf[off..off + 4].copy_from_slice(&len_buf); - off += 4; - } + writer.push_bytes(&len_buf)?; } // Write the payload - let mut buf_remain = srcbuf; - while !buf_remain.is_empty() { - let mut page_remain = PAGE_SZ - off; - if page_remain == 0 { - blknum += 1; - buf = self.get_buf_for_write(blknum)?; - off = 0; - page_remain = PAGE_SZ; - } - let this_blk_len = min(page_remain, buf_remain.len()); - buf[off..(off + this_blk_len)].copy_from_slice(&buf_remain[..this_blk_len]); - off += this_blk_len; - buf_remain = &buf_remain[this_blk_len..]; - } - drop(buf); + writer.push_bytes(srcbuf)?; if srcbuf.len() < 0x80 { self.size += 1; From c8094ee51e4cf99e5dbc7f0f036fab335b999ff2 Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Thu, 17 Aug 2023 11:46:00 +0100 Subject: [PATCH 18/40] test_compatibility: run amcheck unconditionally (#4985) ## Problem The previous version of neon (that we use in the forward compatibility test) has installed `amcheck` extension now. We can run `pg_amcheck` unconditionally. ## Summary of changes - Run `pg_amcheck` in compatibility tests unconditionally --- test_runner/regress/test_compatibility.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/test_runner/regress/test_compatibility.py b/test_runner/regress/test_compatibility.py index fa386ee75a..6979b99708 100644 --- a/test_runner/regress/test_compatibility.py +++ b/test_runner/regress/test_compatibility.py @@ -394,13 +394,7 @@ def check_neon_works( test_output_dir / "dump-from-wal.filediff", ) - # TODO: Run pg_amcheck unconditionally after the next release - try: - pg_bin.run(["psql", connstr, "--command", "CREATE EXTENSION IF NOT EXISTS amcheck"]) - except subprocess.CalledProcessError: - log.info("Extension amcheck is not available, skipping pg_amcheck") - else: - pg_bin.run_capture(["pg_amcheck", connstr, "--install-missing", "--verbose"]) + pg_bin.run_capture(["pg_amcheck", connstr, "--install-missing", "--verbose"]) # Check that we can interract with the data pg_bin.run_capture(["pgbench", "--time=10", "--progress=2", connstr]) From d8b0a298b769cdc44410b9eb73207d6312649d7a Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Thu, 17 Aug 2023 13:46:49 +0300 Subject: [PATCH 19/40] Do not attach deleted tenants (#5008) Rather temporary solution before proper: https://github.com/neondatabase/neon/issues/5006 It requires more plumbing so lets not attach deleted tenants first and then implement resume. Additionally fix `assert_prefix_empty`. It had a buggy prefix calculation, and since we always asserted for absence of stuff it worked. Here I started to assert for presence of stuff too and it failed. Added more "presence" asserts to other places to be confident that it works. Resolves [#5016](https://github.com/neondatabase/neon/issues/5016) --- pageserver/src/tenant/delete.rs | 44 ++++-- pageserver/src/tenant/mgr.rs | 8 +- test_runner/fixtures/pageserver/utils.py | 36 ++++- test_runner/fixtures/remote_storage.py | 5 +- test_runner/regress/test_tenant_delete.py | 157 +++++++++++++++++++- test_runner/regress/test_timeline_delete.py | 36 ++++- 6 files changed, 249 insertions(+), 37 deletions(-) diff --git a/pageserver/src/tenant/delete.rs b/pageserver/src/tenant/delete.rs index 4f34f3c113..1f03ed495a 100644 --- a/pageserver/src/tenant/delete.rs +++ b/pageserver/src/tenant/delete.rs @@ -238,6 +238,30 @@ async fn cleanup_remaining_fs_traces( Ok(()) } +pub(crate) async fn remote_delete_mark_exists( + conf: &PageServerConf, + tenant_id: &TenantId, + remote_storage: &GenericRemoteStorage, +) -> anyhow::Result { + // If remote storage is there we rely on it + let remote_mark_path = remote_tenant_delete_mark_path(conf, tenant_id).context("path")?; + + let result = backoff::retry( + || async { remote_storage.download(&remote_mark_path).await }, + |e| matches!(e, DownloadError::NotFound), + SHOULD_RESUME_DELETION_FETCH_MARK_ATTEMPTS, + SHOULD_RESUME_DELETION_FETCH_MARK_ATTEMPTS, + "fetch_tenant_deletion_mark", + ) + .await; + + match result { + Ok(_) => Ok(true), + Err(DownloadError::NotFound) => Ok(false), + Err(e) => Err(anyhow::anyhow!(e)).context("remote_delete_mark_exists")?, + } +} + /// Orchestrates tenant shut down of all tasks, removes its in-memory structures, /// and deletes its data from both disk and s3. /// The sequence of steps: @@ -372,22 +396,10 @@ impl DeleteTenantFlow { None => return Ok(None), }; - // If remote storage is there we rely on it - let remote_mark_path = remote_tenant_delete_mark_path(conf, &tenant_id)?; - - let result = backoff::retry( - || async { remote_storage.download(&remote_mark_path).await }, - |e| matches!(e, DownloadError::NotFound), - SHOULD_RESUME_DELETION_FETCH_MARK_ATTEMPTS, - SHOULD_RESUME_DELETION_FETCH_MARK_ATTEMPTS, - "fetch_tenant_deletion_mark", - ) - .await; - - match result { - Ok(_) => Ok(acquire(tenant)), - Err(DownloadError::NotFound) => Ok(None), - Err(e) => Err(anyhow::anyhow!(e)).context("should_resume_deletion")?, + if remote_delete_mark_exists(conf, &tenant_id, remote_storage).await? { + Ok(acquire(tenant)) + } else { + Ok(None) } } diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index ae6d237066..bb8a0d7089 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -27,7 +27,7 @@ use crate::{InitializationOrder, IGNORED_TENANT_FILE_NAME}; use utils::fs_ext::PathExt; use utils::id::{TenantId, TimelineId}; -use super::delete::DeleteTenantError; +use super::delete::{remote_delete_mark_exists, DeleteTenantError}; use super::timeline::delete::DeleteTimelineFlow; /// The tenants known to the pageserver. @@ -591,6 +591,12 @@ pub async fn attach_tenant( remote_storage: GenericRemoteStorage, ctx: &RequestContext, ) -> Result<(), TenantMapInsertError> { + // Temporary solution, proper one would be to resume deletion, but that needs more plumbing around Tenant::load/Tenant::attach + // Corresponding issue https://github.com/neondatabase/neon/issues/5006 + if remote_delete_mark_exists(conf, &tenant_id, &remote_storage).await? { + return Err(anyhow::anyhow!("Tenant is marked as deleted on remote storage").into()); + } + tenant_map_insert(tenant_id, || { let tenant_dir = create_tenant_files(conf, tenant_conf, &tenant_id, CreateTenantFilesMode::Attach)?; // TODO: tenant directory remains on disk if we bail out from here on. diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index a2a49b8a6e..b61878c2a6 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -1,6 +1,8 @@ import time from typing import TYPE_CHECKING, Any, Dict, Optional +from mypy_boto3_s3.type_defs import ListObjectsV2OutputTypeDef + from fixtures.log_helper import log from fixtures.pageserver.http import PageserverApiException, PageserverHttpClient from fixtures.remote_storage import RemoteStorageKind, S3Storage @@ -230,6 +232,24 @@ if TYPE_CHECKING: def assert_prefix_empty(neon_env_builder: "NeonEnvBuilder", prefix: Optional[str] = None): + response = list_prefix(neon_env_builder, prefix) + objects = response.get("Contents") + assert ( + response["KeyCount"] == 0 + ), f"remote dir with prefix {prefix} is not empty after deletion: {objects}" + + +def assert_prefix_not_empty(neon_env_builder: "NeonEnvBuilder", prefix: Optional[str] = None): + response = list_prefix(neon_env_builder, prefix) + assert response["KeyCount"] != 0, f"remote dir with prefix {prefix} is empty: {response}" + + +def list_prefix( + neon_env_builder: "NeonEnvBuilder", prefix: Optional[str] = None +) -> ListObjectsV2OutputTypeDef: + """ + Note that this function takes into account prefix_in_bucket. + """ # For local_fs we need to properly handle empty directories, which we currently dont, so for simplicity stick to s3 api. assert neon_env_builder.remote_storage_kind in ( RemoteStorageKind.MOCK_S3, @@ -239,15 +259,21 @@ def assert_prefix_empty(neon_env_builder: "NeonEnvBuilder", prefix: Optional[str assert isinstance(neon_env_builder.remote_storage, S3Storage) assert neon_env_builder.remote_storage_client is not None + prefix_in_bucket = neon_env_builder.remote_storage.prefix_in_bucket or "" + if not prefix: + prefix = prefix_in_bucket + else: + # real s3 tests have uniqie per test prefix + # mock_s3 tests use special pageserver prefix for pageserver stuff + prefix = "/".join((prefix_in_bucket, prefix)) + # Note that this doesnt use pagination, so list is not guaranteed to be exhaustive. response = neon_env_builder.remote_storage_client.list_objects_v2( + Delimiter="/", Bucket=neon_env_builder.remote_storage.bucket_name, - Prefix=prefix or neon_env_builder.remote_storage.prefix_in_bucket or "", + Prefix=prefix, ) - objects = response.get("Contents") - assert ( - response["KeyCount"] == 0 - ), f"remote dir with prefix {prefix} is not empty after deletion: {objects}" + return response def wait_tenant_status_404( diff --git a/test_runner/fixtures/remote_storage.py b/test_runner/fixtures/remote_storage.py index ada2d42347..320e658639 100644 --- a/test_runner/fixtures/remote_storage.py +++ b/test_runner/fixtures/remote_storage.py @@ -92,8 +92,11 @@ def available_s3_storages() -> List[RemoteStorageKind]: class LocalFsStorage: root: Path + def tenant_path(self, tenant_id: TenantId) -> Path: + return self.root / "tenants" / str(tenant_id) + def timeline_path(self, tenant_id: TenantId, timeline_id: TimelineId) -> Path: - return self.root / "tenants" / str(tenant_id) / "timelines" / str(timeline_id) + return self.tenant_path(tenant_id) / "timelines" / str(timeline_id) def index_path(self, tenant_id: TenantId, timeline_id: TimelineId) -> Path: return self.timeline_path(tenant_id, timeline_id) / TIMELINE_INDEX_PART_FILE_NAME diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index f0f6cc743c..24d64f373b 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -1,5 +1,7 @@ import enum import os +import shutil +from pathlib import Path import pytest from fixtures.log_helper import log @@ -13,13 +15,18 @@ from fixtures.pageserver.http import PageserverApiException from fixtures.pageserver.utils import ( MANY_SMALL_LAYERS_TENANT_CONFIG, assert_prefix_empty, + assert_prefix_not_empty, poll_for_remote_storage_iterations, tenant_delete_wait_completed, wait_tenant_status_404, wait_until_tenant_active, wait_until_tenant_state, ) -from fixtures.remote_storage import RemoteStorageKind, available_remote_storages +from fixtures.remote_storage import ( + RemoteStorageKind, + available_remote_storages, + available_s3_storages, +) from fixtures.types import TenantId from fixtures.utils import run_pg_bench_small @@ -64,6 +71,17 @@ def test_tenant_delete_smoke( run_pg_bench_small(pg_bin, endpoint.connstr()) wait_for_last_flush_lsn(env, endpoint, tenant=tenant_id, timeline=timeline_id) + if remote_storage_kind in available_s3_storages(): + assert_prefix_not_empty( + neon_env_builder, + prefix="/".join( + ( + "tenants", + str(tenant_id), + ) + ), + ) + parent = timeline iterations = poll_for_remote_storage_iterations(remote_storage_kind) @@ -73,7 +91,7 @@ def test_tenant_delete_smoke( tenant_path = env.tenant_dir(tenant_id=tenant_id) assert not tenant_path.exists() - if remote_storage_kind in [RemoteStorageKind.MOCK_S3, RemoteStorageKind.REAL_S3]: + if remote_storage_kind in available_s3_storages(): assert_prefix_empty( neon_env_builder, prefix="/".join( @@ -189,6 +207,17 @@ def test_delete_tenant_exercise_crash_safety_failpoints( else: last_flush_lsn_upload(env, endpoint, tenant_id, timeline_id) + if remote_storage_kind in available_s3_storages(): + assert_prefix_not_empty( + neon_env_builder, + prefix="/".join( + ( + "tenants", + str(tenant_id), + ) + ), + ) + ps_http.configure_failpoints((failpoint, "return")) iterations = poll_for_remote_storage_iterations(remote_storage_kind) @@ -241,8 +270,12 @@ def test_delete_tenant_exercise_crash_safety_failpoints( tenant_delete_wait_completed(ps_http, tenant_id, iterations=iterations) - # Check remote is impty - if remote_storage_kind is RemoteStorageKind.MOCK_S3: + tenant_dir = env.tenant_dir(tenant_id) + # Check local is empty + assert not tenant_dir.exists() + + # Check remote is empty + if remote_storage_kind in available_s3_storages(): assert_prefix_empty( neon_env_builder, prefix="/".join( @@ -253,10 +286,118 @@ def test_delete_tenant_exercise_crash_safety_failpoints( ), ) - tenant_dir = env.tenant_dir(tenant_id) - # Check local is empty - assert not tenant_dir.exists() + +# TODO resume deletion (https://github.com/neondatabase/neon/issues/5006) +@pytest.mark.parametrize("remote_storage_kind", available_remote_storages()) +def test_deleted_tenant_ignored_on_attach( + neon_env_builder: NeonEnvBuilder, + remote_storage_kind: RemoteStorageKind, + pg_bin: PgBin, +): + neon_env_builder.enable_remote_storage( + remote_storage_kind=remote_storage_kind, + test_name="test_deleted_tenant_ignored_on_attach", + ) + + env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG) + + tenant_id = env.initial_tenant + + ps_http = env.pageserver.http_client() + # create two timelines + for timeline in ["first", "second"]: + timeline_id = env.neon_cli.create_timeline(timeline, tenant_id=tenant_id) + with env.endpoints.create_start(timeline, tenant_id=tenant_id) as endpoint: + run_pg_bench_small(pg_bin, endpoint.connstr()) + wait_for_last_flush_lsn(env, endpoint, tenant=tenant_id, timeline=timeline_id) + + # sanity check, data should be there + if remote_storage_kind in available_s3_storages(): + assert_prefix_not_empty( + neon_env_builder, + prefix="/".join( + ( + "tenants", + str(tenant_id), + ) + ), + ) + + # failpoint before we remove index_part from s3 + failpoint = "timeline-delete-before-index-delete" + ps_http.configure_failpoints((failpoint, "return")) + + env.pageserver.allowed_errors.extend( + ( + # allow errors caused by failpoints + f".*failpoint: {failpoint}", + # It appears when we stopped flush loop during deletion (attempt) and then pageserver is stopped + ".*freeze_and_flush_on_shutdown.*failed to freeze and flush: cannot flush frozen layers when flush_loop is not running, state is Exited", + # error from http response is also logged + ".*InternalServerError\\(Tenant is marked as deleted on remote storage.*", + '.*shutdown_pageserver{exit_code=0}: stopping left-over name="remote upload".*', + ) + ) + + iterations = poll_for_remote_storage_iterations(remote_storage_kind) + + ps_http.tenant_delete(tenant_id) + + tenant_info = wait_until_tenant_state( + pageserver_http=ps_http, + tenant_id=tenant_id, + expected_state="Broken", + iterations=iterations, + ) + + if remote_storage_kind in available_s3_storages(): + assert_prefix_not_empty( + neon_env_builder, + prefix="/".join( + ( + "tenants", + str(tenant_id), + ) + ), + ) + + reason = tenant_info["state"]["data"]["reason"] + # failpoint may not be the only error in the stack + assert reason.endswith(f"failpoint: {failpoint}"), reason + + # now we stop pageserver and remove local tenant state + env.endpoints.stop_all() + env.pageserver.stop() + + dir_to_clear = Path(env.repo_dir) / "tenants" + shutil.rmtree(dir_to_clear) + os.mkdir(dir_to_clear) + + env.pageserver.start() + + # now we call attach + with pytest.raises( + PageserverApiException, match="Tenant is marked as deleted on remote storage" + ): + ps_http.tenant_attach(tenant_id=tenant_id) + + # delete should be resumed (not yet) + # wait_tenant_status_404(ps_http, tenant_id, iterations) + + # we shouldn've created tenant dir on disk + tenant_path = env.tenant_dir(tenant_id=tenant_id) + assert not tenant_path.exists() + + if remote_storage_kind in available_s3_storages(): + assert_prefix_not_empty( + neon_env_builder, + prefix="/".join( + ( + "tenants", + str(tenant_id), + ) + ), + ) # TODO test concurrent deletions with "hang" failpoint -# TODO test tenant delete continues after attach diff --git a/test_runner/regress/test_timeline_delete.py b/test_runner/regress/test_timeline_delete.py index a48c2186de..7d2d3304e2 100644 --- a/test_runner/regress/test_timeline_delete.py +++ b/test_runner/regress/test_timeline_delete.py @@ -18,6 +18,7 @@ from fixtures.neon_fixtures import ( from fixtures.pageserver.http import PageserverApiException from fixtures.pageserver.utils import ( assert_prefix_empty, + assert_prefix_not_empty, poll_for_remote_storage_iterations, timeline_delete_wait_completed, wait_for_last_record_lsn, @@ -30,6 +31,7 @@ from fixtures.remote_storage import ( LocalFsStorage, RemoteStorageKind, available_remote_storages, + available_s3_storages, ) from fixtures.types import Lsn, TenantId, TimelineId from fixtures.utils import query_scalar, wait_until @@ -212,6 +214,19 @@ def test_delete_timeline_exercise_crash_safety_failpoints( else: last_flush_lsn_upload(env, endpoint, env.initial_tenant, timeline_id) + if remote_storage_kind in available_s3_storages(): + assert_prefix_not_empty( + neon_env_builder, + prefix="/".join( + ( + "tenants", + str(env.initial_tenant), + "timelines", + str(timeline_id), + ) + ), + ) + env.pageserver.allowed_errors.append(f".*{timeline_id}.*failpoint: {failpoint}") # It appears when we stopped flush loop during deletion and then pageserver is stopped env.pageserver.allowed_errors.append( @@ -298,7 +313,7 @@ def test_delete_timeline_exercise_crash_safety_failpoints( ps_http, env.initial_tenant, timeline_id, iterations=iterations ) - # Check remote is impty + # Check remote is empty if remote_storage_kind is RemoteStorageKind.MOCK_S3: assert_prefix_empty( neon_env_builder, @@ -739,6 +754,19 @@ def test_timeline_delete_works_for_remote_smoke( timeline_ids.append(timeline_id) + for timeline_id in timeline_ids: + assert_prefix_not_empty( + neon_env_builder, + prefix="/".join( + ( + "tenants", + str(env.initial_tenant), + "timelines", + str(timeline_id), + ) + ), + ) + for timeline_id in reversed(timeline_ids): # note that we need to finish previous deletion before scheduling next one # otherwise we can get an "HasChildren" error if deletion is not fast enough (real_s3) @@ -758,11 +786,7 @@ def test_timeline_delete_works_for_remote_smoke( # for some reason the check above doesnt immediately take effect for the below. # Assume it is mock server inconsistency and check twice. - wait_until( - 2, - 0.5, - lambda: assert_prefix_empty(neon_env_builder), - ) + wait_until(2, 0.5, lambda: assert_prefix_empty(neon_env_builder)) def test_delete_orphaned_objects( From 3e4710c59e63171cc355aff5feaf8dac7275fa0c Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Thu, 17 Aug 2023 12:05:54 +0100 Subject: [PATCH 20/40] proxy: add more sasl logs (#5012) ## Problem A customer is having trouble connecting to neon from their production environment. The logs show a mix of "Internal error" and "authentication protocol violation" but not the full error ## Summary of changes Make sure we don't miss any logs during SASL/SCRAM --- proxy/src/auth/backend/classic.rs | 13 ++++++++++++- proxy/src/sasl/stream.rs | 6 +++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/proxy/src/auth/backend/classic.rs b/proxy/src/auth/backend/classic.rs index 46bd215f3b..15d6f88203 100644 --- a/proxy/src/auth/backend/classic.rs +++ b/proxy/src/auth/backend/classic.rs @@ -36,7 +36,18 @@ pub(super) async fn authenticate( AuthInfo::Scram(secret) => { info!("auth endpoint chooses SCRAM"); let scram = auth::Scram(&secret); - let client_key = match flow.begin(scram).await?.authenticate().await? { + + let auth_flow = flow.begin(scram).await.map_err(|error| { + warn!(?error, "error sending scram acknowledgement"); + error + })?; + + let auth_outcome = auth_flow.authenticate().await.map_err(|error| { + warn!(?error, "error processing scram messages"); + error + })?; + + let client_key = match auth_outcome { sasl::Outcome::Success(key) => key, sasl::Outcome::Failure(reason) => { info!("auth backend failed with an error: {reason}"); diff --git a/proxy/src/sasl/stream.rs b/proxy/src/sasl/stream.rs index b24cc4bf44..9115b0f61a 100644 --- a/proxy/src/sasl/stream.rs +++ b/proxy/src/sasl/stream.rs @@ -4,6 +4,7 @@ use super::{messages::ServerMessage, Mechanism}; use crate::stream::PqStream; use std::io; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::info; /// Abstracts away all peculiarities of the libpq's protocol. pub struct SaslStream<'a, S> { @@ -68,7 +69,10 @@ impl SaslStream<'_, S> { ) -> super::Result> { loop { let input = self.recv().await?; - let step = mechanism.exchange(input)?; + let step = mechanism.exchange(input).map_err(|error| { + info!(?error, "error during SASL exchange"); + error + })?; use super::Step; return Ok(match step { From 64fc7eafcd865c806e02a547835c82296f5addf9 Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Thu, 17 Aug 2023 15:37:28 +0300 Subject: [PATCH 21/40] Increase timeout once again. (#5021) When failpoint is early in deletion process it takes longer to complete after failpoint is removed. Example was: https://neon-github-public-dev.s3.amazonaws.com/reports/main/5889544346/index.html#suites/3556ed71f2d69272a7014df6dcb02317/49826c68ce8492b1 --- test_runner/fixtures/pageserver/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index b61878c2a6..5acd6be9fa 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -315,4 +315,4 @@ MANY_SMALL_LAYERS_TENANT_CONFIG = { def poll_for_remote_storage_iterations(remote_storage_kind: RemoteStorageKind) -> int: - return 30 if remote_storage_kind is RemoteStorageKind.REAL_S3 else 10 + return 40 if remote_storage_kind is RemoteStorageKind.REAL_S3 else 10 From 6af5f9bfe0cf4e8b3544ee27290303bce2c4dc1e Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Thu, 17 Aug 2023 17:30:25 +0300 Subject: [PATCH 22/40] fix: format context (#5022) We return an error with unformatted `{timeline_id}`. --- pageserver/src/tenant.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 524e3a60a1..cedb381ccc 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -1104,8 +1104,9 @@ impl Tenant { { match e { LoadLocalTimelineError::Load(source) => { - return Err(anyhow::anyhow!(source) - .context("Failed to load local timeline: {timeline_id}")) + return Err(anyhow::anyhow!(source)).with_context(|| { + format!("Failed to load local timeline: {timeline_id}") + }) } LoadLocalTimelineError::ResumeDeletion(source) => { // Make sure resumed deletion wont fail loading for entire tenant. From 67af24191ee0026018e1da47a0697f5ad53d6d1a Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Thu, 17 Aug 2023 19:27:30 +0300 Subject: [PATCH 23/40] test: cleanup remote_timeline_client tests (#5013) I will have to change these as I change remote_timeline_client api in #4938. So a bit of cleanup, handle my comments which were just resolved during initial review. Cleanup: - use unwrap in tests instead of mixed `?` and `unwrap` - use `Handle` instead of `&'static Reactor` to make the RemoteTimelineClient more natural - use arrays in tests - use plain `#[tokio::test]` --- .../src/tenant/remote_timeline_client.rs | 154 +++++++++--------- 1 file changed, 77 insertions(+), 77 deletions(-) diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index f17d0f6b4d..8a50b0d268 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -222,7 +222,6 @@ use std::sync::{Arc, Mutex}; use remote_storage::{DownloadError, GenericRemoteStorage, RemotePath}; use std::ops::DerefMut; -use tokio::runtime::Runtime; use tracing::{debug, error, info, instrument, warn}; use tracing::{info_span, Instrument}; use utils::lsn::Lsn; @@ -311,7 +310,7 @@ pub enum PersistIndexPartWithDeletedFlagError { pub struct RemoteTimelineClient { conf: &'static PageServerConf, - runtime: &'static Runtime, + runtime: tokio::runtime::Handle, tenant_id: TenantId, timeline_id: TimelineId, @@ -338,7 +337,7 @@ impl RemoteTimelineClient { ) -> RemoteTimelineClient { RemoteTimelineClient { conf, - runtime: &BACKGROUND_RUNTIME, + runtime: BACKGROUND_RUNTIME.handle().to_owned(), tenant_id, timeline_id, storage_impl: remote_storage, @@ -994,7 +993,7 @@ impl RemoteTimelineClient { let tenant_id = self.tenant_id; let timeline_id = self.timeline_id; task_mgr::spawn( - self.runtime.handle(), + &self.runtime, TaskKind::RemoteUploadTask, Some(self.tenant_id), Some(self.timeline_id), @@ -1347,7 +1346,7 @@ mod tests { context::RequestContext, tenant::{ harness::{TenantHarness, TIMELINE_ID}, - Tenant, + Tenant, Timeline, }, DEFAULT_PG_VERSION, }; @@ -1356,7 +1355,6 @@ mod tests { collections::HashSet, path::{Path, PathBuf}, }; - use tokio::runtime::EnterGuard; use utils::lsn::Lsn; pub(super) fn dummy_contents(name: &str) -> Vec { @@ -1406,35 +1404,25 @@ mod tests { } struct TestSetup { - runtime: &'static tokio::runtime::Runtime, - entered_runtime: EnterGuard<'static>, harness: TenantHarness, tenant: Arc, + timeline: Arc, tenant_ctx: RequestContext, remote_fs_dir: PathBuf, client: Arc, } impl TestSetup { - fn new(test_name: &str) -> anyhow::Result { + async fn new(test_name: &str) -> anyhow::Result { // Use a current-thread runtime in the test - let runtime = Box::leak(Box::new( - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build()?, - )); - let entered_runtime = runtime.enter(); - let test_name = Box::leak(Box::new(format!("remote_timeline_client__{test_name}"))); let harness = TenantHarness::create(test_name)?; - let (tenant, ctx) = runtime.block_on(harness.load()); + let (tenant, ctx) = harness.load().await; + // create an empty timeline directory - let _ = runtime.block_on(tenant.create_test_timeline( - TIMELINE_ID, - Lsn(8), - DEFAULT_PG_VERSION, - &ctx, - ))?; + let timeline = tenant + .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx) + .await?; let remote_fs_dir = harness.conf.workdir.join("remote_fs"); std::fs::create_dir_all(remote_fs_dir)?; @@ -1456,7 +1444,7 @@ mod tests { let client = Arc::new(RemoteTimelineClient { conf: harness.conf, - runtime, + runtime: tokio::runtime::Handle::current(), tenant_id: harness.tenant_id, timeline_id: TIMELINE_ID, storage_impl: storage, @@ -1468,10 +1456,9 @@ mod tests { }); Ok(Self { - runtime, - entered_runtime, harness, tenant, + timeline, tenant_ctx: ctx, remote_fs_dir, client, @@ -1480,8 +1467,8 @@ mod tests { } // Test scheduling - #[test] - fn upload_scheduling() -> anyhow::Result<()> { + #[tokio::test] + async fn upload_scheduling() { // Test outline: // // Schedule upload of a bunch of layers. Check that they are started immediately, not queued @@ -1497,25 +1484,26 @@ mod tests { // Schedule index upload. Check that it's queued let TestSetup { - runtime, - entered_runtime: _entered_runtime, harness, tenant: _tenant, + timeline: _timeline, tenant_ctx: _tenant_ctx, remote_fs_dir, client, - } = TestSetup::new("upload_scheduling").unwrap(); + } = TestSetup::new("upload_scheduling").await.unwrap(); let timeline_path = harness.timeline_path(&TIMELINE_ID); println!("workdir: {}", harness.conf.workdir.display()); let remote_timeline_dir = - remote_fs_dir.join(timeline_path.strip_prefix(&harness.conf.workdir)?); + remote_fs_dir.join(timeline_path.strip_prefix(&harness.conf.workdir).unwrap()); println!("remote_timeline_dir: {}", remote_timeline_dir.display()); let metadata = dummy_metadata(Lsn(0x10)); - client.init_upload_queue_for_empty_remote(&metadata)?; + client + .init_upload_queue_for_empty_remote(&metadata) + .unwrap(); // Create a couple of dummy files, schedule upload for them let layer_file_name_1: LayerFileName = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(); @@ -1524,26 +1512,32 @@ mod tests { let content_1 = dummy_contents("foo"); let content_2 = dummy_contents("bar"); let content_3 = dummy_contents("baz"); - std::fs::write( - timeline_path.join(layer_file_name_1.file_name()), - &content_1, - )?; - std::fs::write( - timeline_path.join(layer_file_name_2.file_name()), - &content_2, - )?; - std::fs::write(timeline_path.join(layer_file_name_3.file_name()), content_3)?; - client.schedule_layer_file_upload( - &layer_file_name_1, - &LayerFileMetadata::new(content_1.len() as u64), - )?; - client.schedule_layer_file_upload( - &layer_file_name_2, - &LayerFileMetadata::new(content_2.len() as u64), - )?; + for (filename, content) in [ + (&layer_file_name_1, &content_1), + (&layer_file_name_2, &content_2), + (&layer_file_name_3, &content_3), + ] { + std::fs::write(timeline_path.join(filename.file_name()), content).unwrap(); + } + + client + .schedule_layer_file_upload( + &layer_file_name_1, + &LayerFileMetadata::new(content_1.len() as u64), + ) + .unwrap(); + client + .schedule_layer_file_upload( + &layer_file_name_2, + &LayerFileMetadata::new(content_2.len() as u64), + ) + .unwrap(); // Check that they are started immediately, not queued + // + // this works because we running within block_on, so any futures are now queued up until + // our next await point. { let mut guard = client.upload_queue.lock().unwrap(); let upload_queue = guard.initialized_mut().unwrap(); @@ -1557,7 +1551,9 @@ mod tests { // Schedule upload of index. Check that it is queued let metadata = dummy_metadata(Lsn(0x20)); - client.schedule_index_upload_for_metadata_update(&metadata)?; + client + .schedule_index_upload_for_metadata_update(&metadata) + .unwrap(); { let mut guard = client.upload_queue.lock().unwrap(); let upload_queue = guard.initialized_mut().unwrap(); @@ -1566,7 +1562,7 @@ mod tests { } // Wait for the uploads to finish - runtime.block_on(client.wait_completion())?; + client.wait_completion().await.unwrap(); { let mut guard = client.upload_queue.lock().unwrap(); let upload_queue = guard.initialized_mut().unwrap(); @@ -1576,7 +1572,7 @@ mod tests { } // Download back the index.json, and check that the list of files is correct - let index_part = match runtime.block_on(client.download_index_file())? { + let index_part = match client.download_index_file().await.unwrap() { MaybeDeletedIndexPart::IndexPart(index_part) => index_part, MaybeDeletedIndexPart::Deleted(_) => panic!("unexpectedly got deleted index part"), }; @@ -1588,17 +1584,19 @@ mod tests { &layer_file_name_2.file_name(), ], ); - let downloaded_metadata = index_part.parse_metadata()?; + let downloaded_metadata = index_part.parse_metadata().unwrap(); assert_eq!(downloaded_metadata, metadata); // Schedule upload and then a deletion. Check that the deletion is queued - let content_baz = dummy_contents("baz"); - std::fs::write(timeline_path.join("baz"), &content_baz)?; - client.schedule_layer_file_upload( - &layer_file_name_3, - &LayerFileMetadata::new(content_baz.len() as u64), - )?; - client.schedule_layer_file_deletion(&[layer_file_name_1.clone()])?; + client + .schedule_layer_file_upload( + &layer_file_name_3, + &LayerFileMetadata::new(content_3.len() as u64), + ) + .unwrap(); + client + .schedule_layer_file_deletion(&[layer_file_name_1.clone()]) + .unwrap(); { let mut guard = client.upload_queue.lock().unwrap(); let upload_queue = guard.initialized_mut().unwrap(); @@ -1620,7 +1618,7 @@ mod tests { ); // Finish them - runtime.block_on(client.wait_completion())?; + client.wait_completion().await.unwrap(); assert_remote_files( &[ @@ -1630,23 +1628,24 @@ mod tests { ], &remote_timeline_dir, ); - - Ok(()) } - #[test] - fn bytes_unfinished_gauge_for_layer_file_uploads() -> anyhow::Result<()> { + #[tokio::test] + async fn bytes_unfinished_gauge_for_layer_file_uploads() { // Setup let TestSetup { - runtime, harness, + tenant: _tenant, + timeline: _timeline, client, .. - } = TestSetup::new("metrics")?; + } = TestSetup::new("metrics").await.unwrap(); let metadata = dummy_metadata(Lsn(0x10)); - client.init_upload_queue_for_empty_remote(&metadata)?; + client + .init_upload_queue_for_empty_remote(&metadata) + .unwrap(); let timeline_path = harness.timeline_path(&TIMELINE_ID); @@ -1655,7 +1654,8 @@ mod tests { std::fs::write( timeline_path.join(layer_file_name_1.file_name()), &content_1, - )?; + ) + .unwrap(); #[derive(Debug, PartialEq)] struct BytesStartedFinished { @@ -1681,14 +1681,16 @@ mod tests { let init = get_bytes_started_stopped(); - client.schedule_layer_file_upload( - &layer_file_name_1, - &LayerFileMetadata::new(content_1.len() as u64), - )?; + client + .schedule_layer_file_upload( + &layer_file_name_1, + &LayerFileMetadata::new(content_1.len() as u64), + ) + .unwrap(); let pre = get_bytes_started_stopped(); - runtime.block_on(client.wait_completion())?; + client.wait_completion().await.unwrap(); let post = get_bytes_started_stopped(); @@ -1716,7 +1718,5 @@ mod tests { finished: Some(content_1.len()) } ); - - Ok(()) } } From ec10838aa4cbc0ece2d7874c7d55151dfbd50118 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Fri, 18 Aug 2023 11:44:08 +0100 Subject: [PATCH 24/40] proxy: pool connection logs (#5020) ## Problem Errors and notices that happen during a pooled connection lifecycle have no session identifiers ## Summary of changes Using a watch channel, we set the session ID whenever it changes. This way we can see the status of a connection for that session Also, adding a connection id to be able to search the entire connection lifecycle --- proxy/src/auth/backend/classic.rs | 1 - proxy/src/http/conn_pool.rs | 107 +++++++++++++++++++++++------- proxy/src/http/sql_over_http.rs | 17 +++-- proxy/src/http/websocket.rs | 4 +- 4 files changed, 95 insertions(+), 34 deletions(-) diff --git a/proxy/src/auth/backend/classic.rs b/proxy/src/auth/backend/classic.rs index 15d6f88203..9a056f1445 100644 --- a/proxy/src/auth/backend/classic.rs +++ b/proxy/src/auth/backend/classic.rs @@ -62,7 +62,6 @@ pub(super) async fn authenticate( } }; - info!("compute node's state has likely changed; requesting a wake-up"); let mut num_retries = 0; let mut node = loop { let wake_res = api.wake_compute(extra, creds).await; diff --git a/proxy/src/http/conn_pool.rs b/proxy/src/http/conn_pool.rs index 9bba846d57..180d10940c 100644 --- a/proxy/src/http/conn_pool.rs +++ b/proxy/src/http/conn_pool.rs @@ -1,16 +1,21 @@ use anyhow::Context; use async_trait::async_trait; use dashmap::DashMap; +use futures::future::poll_fn; use parking_lot::RwLock; use pbkdf2::{ password_hash::{PasswordHashString, PasswordHasher, PasswordVerifier, SaltString}, Params, Pbkdf2, }; use pq_proto::StartupMessageParams; -use std::fmt; use std::sync::atomic::{self, AtomicUsize}; use std::{collections::HashMap, sync::Arc}; +use std::{ + fmt, + task::{ready, Poll}, +}; use tokio::time; +use tokio_postgres::AsyncMessage; use crate::{auth, console}; use crate::{compute, config}; @@ -19,8 +24,8 @@ use super::sql_over_http::MAX_RESPONSE_SIZE; use crate::proxy::ConnectMechanism; -use tracing::error; -use tracing::info; +use tracing::{error, warn}; +use tracing::{info, info_span, Instrument}; pub const APP_NAME: &str = "sql_over_http"; const MAX_CONNS_PER_ENDPOINT: usize = 20; @@ -48,7 +53,7 @@ impl fmt::Display for ConnInfo { } struct ConnPoolEntry { - conn: tokio_postgres::Client, + conn: Client, _last_access: std::time::Instant, } @@ -110,8 +115,9 @@ impl GlobalConnPool { &self, conn_info: &ConnInfo, force_new: bool, - ) -> anyhow::Result { - let mut client: Option = None; + session_id: uuid::Uuid, + ) -> anyhow::Result { + let mut client: Option = None; let mut hash_valid = false; if !force_new { @@ -153,16 +159,17 @@ impl GlobalConnPool { // ok return cached connection if found and establish a new one otherwise let new_client = if let Some(client) = client { - if client.is_closed() { + if client.inner.is_closed() { info!("pool: cached connection '{conn_info}' is closed, opening a new one"); - connect_to_compute(self.proxy_config, conn_info).await + connect_to_compute(self.proxy_config, conn_info, session_id).await } else { info!("pool: reusing connection '{conn_info}'"); + client.session.send(session_id)?; return Ok(client); } } else { info!("pool: opening a new connection '{conn_info}'"); - connect_to_compute(self.proxy_config, conn_info).await + connect_to_compute(self.proxy_config, conn_info, session_id).await }; match &new_client { @@ -201,11 +208,7 @@ impl GlobalConnPool { new_client } - pub async fn put( - &self, - conn_info: &ConnInfo, - client: tokio_postgres::Client, - ) -> anyhow::Result<()> { + pub async fn put(&self, conn_info: &ConnInfo, client: Client) -> anyhow::Result<()> { let pool = self.get_or_create_endpoint_pool(&conn_info.hostname); // return connection to the pool @@ -282,11 +285,12 @@ impl GlobalConnPool { struct TokioMechanism<'a> { conn_info: &'a ConnInfo, + session_id: uuid::Uuid, } #[async_trait] impl ConnectMechanism for TokioMechanism<'_> { - type Connection = tokio_postgres::Client; + type Connection = Client; type ConnectError = tokio_postgres::Error; type Error = anyhow::Error; @@ -295,7 +299,7 @@ impl ConnectMechanism for TokioMechanism<'_> { node_info: &console::CachedNodeInfo, timeout: time::Duration, ) -> Result { - connect_to_compute_once(node_info, self.conn_info, timeout).await + connect_to_compute_once(node_info, self.conn_info, timeout, self.session_id).await } fn update_connect_config(&self, _config: &mut compute::ConnCfg) {} @@ -308,7 +312,8 @@ impl ConnectMechanism for TokioMechanism<'_> { async fn connect_to_compute( config: &config::ProxyConfig, conn_info: &ConnInfo, -) -> anyhow::Result { + session_id: uuid::Uuid, +) -> anyhow::Result { let tls = config.tls_config.as_ref(); let common_names = tls.and_then(|tls| tls.common_names.clone()); @@ -339,17 +344,27 @@ async fn connect_to_compute( .await? .context("missing cache entry from wake_compute")?; - crate::proxy::connect_to_compute(&TokioMechanism { conn_info }, node_info, &extra, &creds).await + crate::proxy::connect_to_compute( + &TokioMechanism { + conn_info, + session_id, + }, + node_info, + &extra, + &creds, + ) + .await } async fn connect_to_compute_once( node_info: &console::CachedNodeInfo, conn_info: &ConnInfo, timeout: time::Duration, -) -> Result { + mut session: uuid::Uuid, +) -> Result { let mut config = (*node_info.config).clone(); - let (client, connection) = config + let (client, mut connection) = config .user(&conn_info.username) .password(&conn_info.password) .dbname(&conn_info.dbname) @@ -358,11 +373,53 @@ async fn connect_to_compute_once( .connect(tokio_postgres::NoTls) .await?; - tokio::spawn(async move { - if let Err(e) = connection.await { - error!("connection error: {}", e); - } + let (tx, mut rx) = tokio::sync::watch::channel(session); + + let conn_id = uuid::Uuid::new_v4(); + let span = info_span!(parent: None, "connection", %conn_info, %conn_id); + span.in_scope(|| { + info!(%session, "new connection"); }); - Ok(client) + tokio::spawn( + poll_fn(move |cx| { + if matches!(rx.has_changed(), Ok(true)) { + session = *rx.borrow_and_update(); + info!(%session, "changed session"); + } + + let message = ready!(connection.poll_message(cx)); + + match message { + Some(Ok(AsyncMessage::Notice(notice))) => { + info!(%session, "notice: {}", notice); + Poll::Pending + } + Some(Ok(AsyncMessage::Notification(notif))) => { + warn!(%session, pid = notif.process_id(), channel = notif.channel(), "notification received"); + Poll::Pending + } + Some(Ok(_)) => { + warn!(%session, "unknown message"); + Poll::Pending + } + Some(Err(e)) => { + error!(%session, "connection error: {}", e); + Poll::Ready(()) + } + None => Poll::Ready(()), + } + }) + .instrument(span) + ); + + Ok(Client { + inner: client, + session: tx, + }) +} + +pub struct Client { + pub inner: tokio_postgres::Client, + session: tokio::sync::watch::Sender, } diff --git a/proxy/src/http/sql_over_http.rs b/proxy/src/http/sql_over_http.rs index 33375e63e9..4470996c04 100644 --- a/proxy/src/http/sql_over_http.rs +++ b/proxy/src/http/sql_over_http.rs @@ -16,6 +16,7 @@ use tokio_postgres::types::Type; use tokio_postgres::GenericClient; use tokio_postgres::IsolationLevel; use tokio_postgres::Row; +use tracing::Instrument; use url::Url; use super::conn_pool::ConnInfo; @@ -181,6 +182,7 @@ pub async fn handle( request: Request, sni_hostname: Option, conn_pool: Arc, + session_id: uuid::Uuid, ) -> anyhow::Result<(Value, HashMap)> { // // Determine the destination and connection params @@ -230,18 +232,18 @@ pub async fn handle( let body = hyper::body::to_bytes(request.into_body()).await?; let payload: Payload = serde_json::from_slice(&body)?; - let mut client = conn_pool.get(&conn_info, !allow_pool).await?; + let mut client = conn_pool.get(&conn_info, !allow_pool, session_id).await?; // // Now execute the query and return the result // let result = match payload { - Payload::Single(query) => query_to_json(&client, query, raw_output, array_mode) + Payload::Single(query) => query_to_json(&client.inner, query, raw_output, array_mode) .await .map(|x| (x, HashMap::default())), Payload::Batch(batch_query) => { let mut results = Vec::new(); - let mut builder = client.build_transaction(); + let mut builder = client.inner.build_transaction(); if let Some(isolation_level) = txn_isolation_level { builder = builder.isolation_level(isolation_level); } @@ -285,9 +287,12 @@ pub async fn handle( if allow_pool { // return connection to the pool - tokio::task::spawn(async move { - let _ = conn_pool.put(&conn_info, client).await; - }); + tokio::task::spawn( + async move { + let _ = conn_pool.put(&conn_info, client).await; + } + .in_current_span(), + ); } result diff --git a/proxy/src/http/websocket.rs b/proxy/src/http/websocket.rs index 794da17929..ba158dfca3 100644 --- a/proxy/src/http/websocket.rs +++ b/proxy/src/http/websocket.rs @@ -203,7 +203,7 @@ async fn ws_handler( // TODO: that deserves a refactor as now this function also handles http json client besides websockets. // Right now I don't want to blow up sql-over-http patch with file renames and do that as a follow up instead. } else if request.uri().path() == "/sql" && request.method() == Method::POST { - let result = sql_over_http::handle(request, sni_hostname, conn_pool) + let result = sql_over_http::handle(request, sni_hostname, conn_pool, session_id) .instrument(info_span!("sql-over-http")) .await; let status_code = match result { @@ -307,7 +307,7 @@ pub async fn task_main( ws_handler(req, config, conn_pool, cancel_map, session_id, sni_name) .instrument(info_span!( "ws-client", - session = format_args!("{session_id}") + session = %session_id )) .await } From f4da010aeedb206efce39ad3483ce994993916f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Fri, 18 Aug 2023 16:36:31 +0200 Subject: [PATCH 25/40] Make the compaction warning more tolerant (#5024) ## Problem The performance benchmark in `test_runner/performance/test_layer_map.py` is currently failing due to the warning added in #4888. ## Summary of changes The test mentioned has a `compaction_target_size` of 8192, which is just one page size. This is an unattainable goal, as we generate at least three pages: one for the header, one for the b-tree (minimally sized ones have just the root node in a single page), one for the data. Therefore, we add two pages to the warning limit. The warning text becomes a bit less accurate but I think this is okay. --- pageserver/src/tenant/timeline.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 502e5ed44e..e21d594cb9 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -3778,7 +3778,10 @@ impl Timeline { // Sync layers if !new_layers.is_empty() { // Print a warning if the created layer is larger than double the target size - let warn_limit = target_file_size * 2; + // Add two pages for potential overhead. This should in theory be already + // accounted for in the target calculation, but for very small targets, + // we still might easily hit the limit otherwise. + let warn_limit = target_file_size * 2 + page_cache::PAGE_SZ as u64 * 2; for layer in new_layers.iter() { if layer.desc.file_size > warn_limit { warn!( From 0b90411380776f9a6193c33dfd69f0c1541df58b Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Fri, 18 Aug 2023 17:48:55 +0200 Subject: [PATCH 26/40] Fix safekeeper recovery with auth (#5035) Fix missing a password in walrcv_connect for a safekeeper recovery. Add a test which restarts endpoint and triggers a recovery. --- pgxn/neon/libpqwalproposer.c | 2 +- pgxn/neon/walproposer.c | 16 ++++++++++++- test_runner/regress/test_wal_acceptor.py | 29 ++++++++++++++++++++++++ 3 files changed, 45 insertions(+), 2 deletions(-) diff --git a/pgxn/neon/libpqwalproposer.c b/pgxn/neon/libpqwalproposer.c index ed3b8a817f..ce9a1475d3 100644 --- a/pgxn/neon/libpqwalproposer.c +++ b/pgxn/neon/libpqwalproposer.c @@ -74,7 +74,7 @@ walprop_connect_start(char *conninfo, char *password) if (password) { keywords[n] = "password"; - values[n] = neon_auth_token; + values[n] = password; n++; } keywords[n] = "dbname"; diff --git a/pgxn/neon/walproposer.c b/pgxn/neon/walproposer.c index 807fd5c91b..d9999ef2b1 100644 --- a/pgxn/neon/walproposer.c +++ b/pgxn/neon/walproposer.c @@ -1393,8 +1393,22 @@ WalProposerRecovery(int donor, TimeLineID timeline, XLogRecPtr startpos, XLogRec char *err; WalReceiverConn *wrconn; WalRcvStreamOptions options; + char conninfo[MAXCONNINFO]; - wrconn = walrcv_connect(safekeeper[donor].conninfo, false, "wal_proposer_recovery", &err); + if (!neon_auth_token) + { + memcpy(conninfo, safekeeper[donor].conninfo, MAXCONNINFO); + } + else + { + int written = 0; + + written = snprintf((char *) conninfo, MAXCONNINFO, "password=%s %s", neon_auth_token, safekeeper[donor].conninfo); + if (written > MAXCONNINFO || written < 0) + elog(FATAL, "could not append password to the safekeeper connection string"); + } + + wrconn = walrcv_connect(conninfo, false, "wal_proposer_recovery", &err); if (!wrconn) { ereport(WARNING, diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 6695819899..c471b18db7 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -981,6 +981,35 @@ def test_sk_auth(neon_env_builder: NeonEnvBuilder): connector.safe_psql("IDENTIFY_SYSTEM", port=sk.port.pg_tenant_only, password=tenant_token) +# Try restarting endpoint with enabled auth. +def test_restart_endpoint(neon_env_builder: NeonEnvBuilder): + neon_env_builder.auth_enabled = True + neon_env_builder.num_safekeepers = 3 + env = neon_env_builder.init_start() + + env.neon_cli.create_branch("test_sk_auth_restart_endpoint") + endpoint = env.endpoints.create_start("test_sk_auth_restart_endpoint") + + with closing(endpoint.connect()) as conn: + with conn.cursor() as cur: + cur.execute("create table t(i int)") + + # Restarting endpoints and random safekeepers, to trigger recovery. + for _i in range(3): + random_sk = random.choice(env.safekeepers) + random_sk.stop() + + with closing(endpoint.connect()) as conn: + with conn.cursor() as cur: + start = random.randint(1, 100000) + end = start + random.randint(1, 10000) + cur.execute("insert into t select generate_series(%s,%s)", (start, end)) + + endpoint.stop() + random_sk.start() + endpoint.start() + + class SafekeeperEnv: def __init__( self, From 0a082aee77986326dac6fe6284edb1fd3ccf31a1 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Fri, 18 Aug 2023 19:36:25 +0300 Subject: [PATCH 27/40] test: allow race with flush and stopped queue (#5027) A lucky race can happen with the shutdown order I guess right now. Seen in [test_tenant_delete_smoke]. The message is not the greatest to match against. [test_tenant_delete_smoke]: https://neon-github-public-dev.s3.amazonaws.com/reports/main/5892262320/index.html#suites/3556ed71f2d69272a7014df6dcb02317/189a0d1245fb5a8c --- test_runner/regress/test_tenant_delete.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index 24d64f373b..5ffb713bba 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -48,6 +48,11 @@ def test_tenant_delete_smoke( env = neon_env_builder.init_start() + # lucky race with stopping from flushing a layer we fail to schedule any uploads + env.pageserver.allowed_errors.append( + ".*layer flush task.+: could not flush frozen layer: update_metadata_file" + ) + ps_http = env.pageserver.http_client() # first try to delete non existing tenant From 7a63685cde1a514c75c2a3799e3f3d0e8d1e8a99 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Fri, 18 Aug 2023 19:31:03 +0200 Subject: [PATCH 28/40] simplify page-caching of EphemeralFile (#4994) (This PR is the successor of https://github.com/neondatabase/neon/pull/4984 ) ## Summary The current way in which `EphemeralFile` uses `PageCache` complicates the Pageserver code base to a degree that isn't worth it. This PR refactors how we cache `EphemeralFile` contents, by exploiting the append-only nature of `EphemeralFile`. The result is that `PageCache` only holds `ImmutableFilePage` and `MaterializedPage`. These types of pages are read-only and evictable without write-back. This allows us to remove the writeback code from `PageCache`, also eliminating an entire failure mode. Futher, many great open-source libraries exist to solve the problem of a read-only cache, much better than our `page_cache.rs` (e.g., better replacement policy, less global locking). With this PR, we can now explore using them. ## Problem & Analysis Before this PR, `PageCache` had three types of pages: * `ImmutableFilePage`: caches Delta / Image layer file contents * `MaterializedPage`: caches results of Timeline::get (page materialization) * `EphemeralPage`: caches `EphemeralFile` contents `EphemeralPage` is quite different from `ImmutableFilePage` and `MaterializedPage`: * Immutable and materialized pages are for the acceleration of (future) reads of the same data using `PAGE_CACHE_SIZE * PAGE_SIZE` bytes of DRAM. * Ephemeral pages are a write-back cache of `EphemeralFile` contents, i.e., if there is pressure in the page cache, we spill `EphemeralFile` contents to disk. `EphemeralFile` is only used by `InMemoryLayer`, for the following purposes: * **write**: when filling up the `InMemoryLayer`, via `impl BlobWriter for EphemeralFile` * **read**: when doing **page reconstruction** for a page@lsn that isn't written to disk * **read**: when writing L0 layer files, we re-read the `InMemoryLayer` and put the contents into the L0 delta writer (**`create_delta_layer`**). This happens every 10min or when InMemoryLayer reaches 256MB in size. The access patterns of the `InMemoryLayer` use case are as follows: * **write**: via `BlobWriter`, strictly append-only * **read for page reconstruction**: via `BlobReader`, random * **read for `create_delta_layer`**: via `BlobReader`, dependent on data, but generally random. Why? * in classical LSM terms, this function is what writes the memory-resident `C0` tree into the disk-resident `C1` tree * in our system, though, the values of InMemoryLayer are stored in an EphemeralFile, and hence they are not guaranteed to be memory-resident * the function reads `Value`s in `Key, LSN` order, which is `!=` insert order What do these `EphemeralFile`-level access patterns mean for the page cache? * **write**: * the common case is that `Value` is a WAL record, and if it isn't a full-page-image WAL record, then it's smaller than `PAGE_SIZE` * So, the `EphemeralPage` pages act as a buffer for these `< PAGE_CACHE` sized writes. * If there's no page cache eviction between subsequent `InMemoryLayer::put_value` calls, the `EphemeralPage` is still resident, so the page cache avoids doing a `write` system call. * In practice, a busy page server will have page cache evictions because we only configure 64MB of page cache size. * **reads for page reconstruction**: read acceleration, just as for the other page types. * **reads for `create_delta_layer`**: * The `Value` reads happen through a `BlockCursor`, which optimizes the case of repeated reads from the same page. * So, the best case is that subsequent values are located on the same page; hence `BlockCursor`s buffer is maximally effective. * The worst case is that each `Value` is on a different page; hence the `BlockCursor`'s 1-page-sized buffer is ineffective. * The best case translates into `256MB/PAGE_SIZE` page cache accesses, one per page. * the worst case translates into `#Values` page cache accesses * again, the page cache accesses must be assumed to be random because the `Value`s aren't accessed in insertion order but `Key, LSN` order. ## Summary of changes Preliminaries for this PR were: - #5003 - #5004 - #5005 - uncommitted microbenchmark in #5011 Based on the observations outlined above, this PR makes the following changes: * Rip out `EphemeralPage` from `page_cache.rs` * Move the `block_io::FileId` to `page_cache::FileId` * Add a `PAGE_SIZE`d buffer to the `EphemeralPage` struct. It's called `mutable_tail`. * Change `write_blob` to use `mutable_tail` for the write buffering instead of a page cache page. * if `mutable_tail` is full, it writes it out to disk, zeroes it out, and re-uses it. * There is explicitly no double-buffering, so that memory allocation per `EphemeralFile` instance is fixed. * Change `read_blob` to return different `BlockLease` variants depending on `blknum` * for the `blknum` that corresponds to the `mutable_tail`, return a ref to it * Rust borrowing rules prevent `write_blob` calls while refs are outstanding. * for all non-tail blocks, return a page-cached `ImmutablePage` * It is safe to page-cache these as ImmutablePage because EphemeralFile is append-only. ## Performance How doe the changes above affect performance? M claim is: not significantly. * **write path**: * before this PR, the `EphemeralFile::write_blob` didn't issue its own `write` system calls. * If there were enough free pages, it didn't issue *any* `write` system calls. * If it had to evict other `EphemeralPage`s to get pages a page for its writes (`get_buf_for_write`), the page cache code would implicitly issue the writeback of victim pages as needed. * With this PR, `EphemeralFile::write_blob` *always* issues *all* of its *own* `write` system calls. * Also, the writes are explicit instead of implicit through page cache write back, which will help #4743 * The perf impact of always doing the writes is the CPU overhead and syscall latency. * Before this PR, we might have never issued them if there were enough free pages. * We don't issue `fsync` and can expect the writes to only hit the kernel page cache. * There is also an advantage in issuing the writes directly: the perf impact is paid by the tenant that caused the writes, instead of whatever tenant evicts the `EphemeralPage`. * **reads for page reconstruction**: no impact. * The `write_blob` function pre-warms the page cache when it writes the `mutable_tail` to disk. * So, the behavior is the same as with the EphemeralPages before this PR. * **reads for `create_delta_layer`**: no impact. * Same argument as for page reconstruction. * Note for the future: * going through the page cache likely causes read amplification here. Why? * Due to the `Key,Lsn`-ordered access pattern, we don't read all the values in the page before moving to the next page. In the worst case, we might read the same page multiple times to read different `Values` from it. * So, it might be better to bypass the page cache here. * Idea drafts: * bypass PS page cache + prefetch pipeline + iovec-based IO * bypass PS page cache + use `copy_file_range` to copy from ephemeral file into the L0 delta file, without going through user space --- pageserver/src/page_cache.rs | 205 ++++---------- pageserver/src/tenant.rs | 3 - pageserver/src/tenant/block_io.rs | 32 +-- pageserver/src/tenant/ephemeral_file.rs | 262 +++++++----------- .../tenant/storage_layer/inmemory_layer.rs | 4 +- 5 files changed, 174 insertions(+), 332 deletions(-) diff --git a/pageserver/src/page_cache.rs b/pageserver/src/page_cache.rs index 8306ce4636..e1e696ddad 100644 --- a/pageserver/src/page_cache.rs +++ b/pageserver/src/page_cache.rs @@ -10,6 +10,42 @@ //! PostgreSQL buffer size, and a Slot struct for each buffer to contain //! information about what's stored in the buffer. //! +//! # Types Of Pages +//! +//! [`PageCache`] only supports immutable pages. +//! Hence there is no need to worry about coherency. +//! +//! Two types of pages are supported: +//! +//! * **Materialized pages**, filled & used by page reconstruction +//! * **Immutable File pages**, filled & used by [`crate::tenant::block_io`] and [`crate::tenant::ephemeral_file`]. +//! +//! Note that [`crate::tenant::ephemeral_file::EphemeralFile`] is generally mutable, but, it's append-only. +//! It uses the page cache only for the blocks that are already fully written and immutable. +//! +//! # Filling The Page Cache +//! +//! Page cache maps from a cache key to a buffer slot. +//! The cache key uniquely identifies the piece of data that is being cached. +//! +//! The cache key for **materialized pages** is [`TenantId`], [`TimelineId`], [`Key`], and [`Lsn`]. +//! Use [`PageCache::memorize_materialized_page`] and [`PageCache::lookup_materialized_page`] for fill & access. +//! +//! The cache key for **immutable file** pages is [`FileId`] and a block number. +//! Users of page cache that wish to page-cache an arbitrary (immutable!) on-disk file do the following: +//! * Have a mechanism to deterministically associate the on-disk file with a [`FileId`]. +//! * Get a [`FileId`] using [`next_file_id`]. +//! * Use the mechanism to associate the on-disk file with the returned [`FileId`]. +//! * Use [`PageCache::read_immutable_buf`] to get a [`ReadBufResult`]. +//! * If the page was already cached, it'll be the [`ReadBufResult::Found`] variant that contains +//! a read guard for the page. Just use it. +//! * If the page was not cached, it'll be the [`ReadBufResult::NotFound`] variant that contains +//! a write guard for the page. Fill the page with the contents of the on-disk file. +//! Then call [`PageWriteGuard::mark_valid`] to mark the page as valid. +//! Then try again to [`PageCache::read_immutable_buf`]. +//! Unless there's high cache pressure, the page should now be cached. +//! (TODO: allow downgrading the write guard to a read guard to ensure forward progress.) +//! //! # Locking //! //! There are two levels of locking involved: There's one lock for the "mapping" @@ -40,20 +76,18 @@ use std::{ collections::{hash_map::Entry, HashMap}, convert::TryInto, sync::{ - atomic::{AtomicU8, AtomicUsize, Ordering}, + atomic::{AtomicU64, AtomicU8, AtomicUsize, Ordering}, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockError, }, }; use anyhow::Context; use once_cell::sync::OnceCell; -use tracing::error; use utils::{ id::{TenantId, TimelineId}, lsn::Lsn, }; -use crate::tenant::{block_io, ephemeral_file, writeback_ephemeral_file}; use crate::{metrics::PageCacheSizeMetrics, repository::Key}; static PAGE_CACHE: OnceCell = OnceCell::new(); @@ -87,6 +121,17 @@ pub fn get() -> &'static PageCache { pub const PAGE_SZ: usize = postgres_ffi::BLCKSZ as usize; const MAX_USAGE_COUNT: u8 = 5; +/// See module-level comment. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct FileId(u64); + +static NEXT_ID: AtomicU64 = AtomicU64::new(1); + +/// See module-level comment. +pub fn next_file_id() -> FileId { + FileId(NEXT_ID.fetch_add(1, Ordering::Relaxed)) +} + /// /// CacheKey uniquely identifies a "thing" to cache in the page cache. /// @@ -97,12 +142,8 @@ enum CacheKey { hash_key: MaterializedPageHashKey, lsn: Lsn, }, - EphemeralPage { - file_id: ephemeral_file::FileId, - blkno: u32, - }, ImmutableFilePage { - file_id: block_io::FileId, + file_id: FileId, blkno: u32, }, } @@ -128,7 +169,6 @@ struct Slot { struct SlotInner { key: Option, buf: &'static mut [u8; PAGE_SZ], - dirty: bool, } impl Slot { @@ -177,9 +217,7 @@ pub struct PageCache { /// can have a separate mapping map, next to this field. materialized_page_map: RwLock>>, - ephemeral_page_map: RwLock>, - - immutable_page_map: RwLock>, + immutable_page_map: RwLock>, /// The actual buffers with their metadata. slots: Box<[Slot]>, @@ -258,14 +296,6 @@ impl PageWriteGuard<'_> { ); self.valid = true; } - pub fn mark_dirty(&mut self) { - // only ephemeral pages can be dirty ATM. - assert!(matches!( - self.inner.key, - Some(CacheKey::EphemeralPage { .. }) - )); - self.inner.dirty = true; - } } impl Drop for PageWriteGuard<'_> { @@ -280,7 +310,6 @@ impl Drop for PageWriteGuard<'_> { let self_key = self.inner.key.as_ref().unwrap(); PAGE_CACHE.get().unwrap().remove_mapping(self_key); self.inner.key = None; - self.inner.dirty = false; } } } @@ -388,62 +417,16 @@ impl PageCache { Ok(()) } - // Section 1.2: Public interface functions for working with Ephemeral pages. + // Section 1.2: Public interface functions for working with immutable file pages. - pub fn read_ephemeral_buf( - &self, - file_id: ephemeral_file::FileId, - blkno: u32, - ) -> anyhow::Result { - let mut cache_key = CacheKey::EphemeralPage { file_id, blkno }; - - self.lock_for_read(&mut cache_key) - } - - pub fn write_ephemeral_buf( - &self, - file_id: ephemeral_file::FileId, - blkno: u32, - ) -> anyhow::Result { - let cache_key = CacheKey::EphemeralPage { file_id, blkno }; - - self.lock_for_write(&cache_key) - } - - /// Immediately drop all buffers belonging to given file, without writeback - pub fn drop_buffers_for_ephemeral(&self, drop_file_id: ephemeral_file::FileId) { - for slot_idx in 0..self.slots.len() { - let slot = &self.slots[slot_idx]; - - let mut inner = slot.inner.write().unwrap(); - if let Some(key) = &inner.key { - match key { - CacheKey::EphemeralPage { file_id, blkno: _ } if *file_id == drop_file_id => { - // remove mapping for old buffer - self.remove_mapping(key); - inner.key = None; - inner.dirty = false; - } - _ => {} - } - } - } - } - - // Section 1.3: Public interface functions for working with immutable file pages. - - pub fn read_immutable_buf( - &self, - file_id: block_io::FileId, - blkno: u32, - ) -> anyhow::Result { + pub fn read_immutable_buf(&self, file_id: FileId, blkno: u32) -> anyhow::Result { let mut cache_key = CacheKey::ImmutableFilePage { file_id, blkno }; self.lock_for_read(&mut cache_key) } - /// Immediately drop all buffers belonging to given file, without writeback - pub fn drop_buffers_for_immutable(&self, drop_file_id: block_io::FileId) { + /// Immediately drop all buffers belonging to given file + pub fn drop_buffers_for_immutable(&self, drop_file_id: FileId) { for slot_idx in 0..self.slots.len() { let slot = &self.slots[slot_idx]; @@ -456,7 +439,6 @@ impl PageCache { // remove mapping for old buffer self.remove_mapping(key); inner.key = None; - inner.dirty = false; } _ => {} } @@ -534,10 +516,6 @@ impl PageCache { CacheKey::MaterializedPage { .. } => { unreachable!("Materialized pages use lookup_materialized_page") } - CacheKey::EphemeralPage { .. } => ( - &crate::metrics::PAGE_CACHE.read_accesses_ephemeral, - &crate::metrics::PAGE_CACHE.read_hits_ephemeral, - ), CacheKey::ImmutableFilePage { .. } => ( &crate::metrics::PAGE_CACHE.read_accesses_immutable, &crate::metrics::PAGE_CACHE.read_hits_immutable, @@ -578,7 +556,6 @@ impl PageCache { // Make the slot ready let slot = &self.slots[slot_idx]; inner.key = Some(cache_key.clone()); - inner.dirty = false; slot.usage_count.store(1, Ordering::Relaxed); return Ok(ReadBufResult::NotFound(PageWriteGuard { @@ -640,7 +617,6 @@ impl PageCache { // Make the slot ready let slot = &self.slots[slot_idx]; inner.key = Some(cache_key.clone()); - inner.dirty = false; slot.usage_count.store(1, Ordering::Relaxed); return Ok(WriteBufResult::NotFound(PageWriteGuard { @@ -679,10 +655,6 @@ impl PageCache { *lsn = version.lsn; Some(version.slot_idx) } - CacheKey::EphemeralPage { file_id, blkno } => { - let map = self.ephemeral_page_map.read().unwrap(); - Some(*map.get(&(*file_id, *blkno))?) - } CacheKey::ImmutableFilePage { file_id, blkno } => { let map = self.immutable_page_map.read().unwrap(); Some(*map.get(&(*file_id, *blkno))?) @@ -706,10 +678,6 @@ impl PageCache { None } } - CacheKey::EphemeralPage { file_id, blkno } => { - let map = self.ephemeral_page_map.read().unwrap(); - Some(*map.get(&(*file_id, *blkno))?) - } CacheKey::ImmutableFilePage { file_id, blkno } => { let map = self.immutable_page_map.read().unwrap(); Some(*map.get(&(*file_id, *blkno))?) @@ -743,12 +711,6 @@ impl PageCache { panic!("could not find old key in mapping") } } - CacheKey::EphemeralPage { file_id, blkno } => { - let mut map = self.ephemeral_page_map.write().unwrap(); - map.remove(&(*file_id, *blkno)) - .expect("could not find old key in mapping"); - self.size_metrics.current_bytes_ephemeral.sub_page_sz(1); - } CacheKey::ImmutableFilePage { file_id, blkno } => { let mut map = self.immutable_page_map.write().unwrap(); map.remove(&(*file_id, *blkno)) @@ -788,17 +750,7 @@ impl PageCache { } } } - CacheKey::EphemeralPage { file_id, blkno } => { - let mut map = self.ephemeral_page_map.write().unwrap(); - match map.entry((*file_id, *blkno)) { - Entry::Occupied(entry) => Some(*entry.get()), - Entry::Vacant(entry) => { - entry.insert(slot_idx); - self.size_metrics.current_bytes_ephemeral.add_page_sz(1); - None - } - } - } + CacheKey::ImmutableFilePage { file_id, blkno } => { let mut map = self.immutable_page_map.write().unwrap(); match map.entry((*file_id, *blkno)) { @@ -849,25 +801,8 @@ impl PageCache { } }; if let Some(old_key) = &inner.key { - if inner.dirty { - if let Err(err) = Self::writeback(old_key, inner.buf) { - // Writing the page to disk failed. - // - // FIXME: What to do here, when? We could propagate the error to the - // caller, but victim buffer is generally unrelated to the original - // call. It can even belong to a different tenant. Currently, we - // report the error to the log and continue the clock sweep to find - // a different victim. But if the problem persists, the page cache - // could fill up with dirty pages that we cannot evict, and we will - // loop retrying the writebacks indefinitely. - error!("writeback of buffer {:?} failed: {}", old_key, err); - continue; - } - } - // remove mapping for old buffer self.remove_mapping(old_key); - inner.dirty = false; inner.key = None; } return Ok((slot_idx, inner)); @@ -875,28 +810,6 @@ impl PageCache { } } - fn writeback(cache_key: &CacheKey, buf: &[u8]) -> Result<(), std::io::Error> { - match cache_key { - CacheKey::MaterializedPage { - hash_key: _, - lsn: _, - } => Err(std::io::Error::new( - std::io::ErrorKind::Other, - "unexpected dirty materialized page", - )), - CacheKey::EphemeralPage { file_id, blkno } => { - writeback_ephemeral_file(*file_id, *blkno, buf) - } - CacheKey::ImmutableFilePage { - file_id: _, - blkno: _, - } => Err(std::io::Error::new( - std::io::ErrorKind::Other, - "unexpected dirty immutable page", - )), - } - } - /// Initialize a new page cache /// /// This should be called only once at page server startup. @@ -907,7 +820,6 @@ impl PageCache { let size_metrics = &crate::metrics::PAGE_CACHE_SIZE; size_metrics.max_bytes.set_page_sz(num_pages); - size_metrics.current_bytes_ephemeral.set_page_sz(0); size_metrics.current_bytes_immutable.set_page_sz(0); size_metrics.current_bytes_materialized_page.set_page_sz(0); @@ -917,11 +829,7 @@ impl PageCache { let buf: &mut [u8; PAGE_SZ] = chunk.try_into().unwrap(); Slot { - inner: RwLock::new(SlotInner { - key: None, - buf, - dirty: false, - }), + inner: RwLock::new(SlotInner { key: None, buf }), usage_count: AtomicU8::new(0), } }) @@ -929,7 +837,6 @@ impl PageCache { Self { materialized_page_map: Default::default(), - ephemeral_page_map: Default::default(), immutable_page_map: Default::default(), slots, next_evict_slot: AtomicUsize::new(0), diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index cedb381ccc..309020391f 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -136,9 +136,6 @@ pub use timeline::{ LocalLayerInfoForDiskUsageEviction, LogicalSizeCalculationCause, PageReconstructError, Timeline, }; -// re-export this function so that page_cache.rs can use it. -pub use crate::tenant::ephemeral_file::writeback as writeback_ephemeral_file; - // re-export for use in remote_timeline_client.rs pub use crate::tenant::metadata::save_metadata; diff --git a/pageserver/src/tenant/block_io.rs b/pageserver/src/tenant/block_io.rs index 3cc4e61a95..503e5bd4e6 100644 --- a/pageserver/src/tenant/block_io.rs +++ b/pageserver/src/tenant/block_io.rs @@ -6,7 +6,6 @@ use crate::page_cache::{self, PageReadGuard, ReadBufResult, PAGE_SZ}; use bytes::Bytes; use std::ops::{Deref, DerefMut}; use std::os::unix::fs::FileExt; -use std::sync::atomic::AtomicU64; /// This is implemented by anything that can read 8 kB (PAGE_SZ) /// blocks, using the page cache @@ -43,37 +42,34 @@ where } } -/// A block accessible for reading -/// -/// During builds with `#[cfg(test)]`, this is a proper enum -/// with two variants to support testing code. During normal -/// builds, it just has one variant and is thus a cheap newtype -/// wrapper of [`PageReadGuard`] -pub enum BlockLease { +/// Reference to an in-memory copy of an immutable on-disk block. +pub enum BlockLease<'a> { PageReadGuard(PageReadGuard<'static>), + EphemeralFileMutableTail(&'a [u8; PAGE_SZ]), #[cfg(test)] Rc(std::rc::Rc<[u8; PAGE_SZ]>), } -impl From> for BlockLease { - fn from(value: PageReadGuard<'static>) -> Self { +impl From> for BlockLease<'static> { + fn from(value: PageReadGuard<'static>) -> BlockLease<'static> { BlockLease::PageReadGuard(value) } } #[cfg(test)] -impl From> for BlockLease { +impl<'a> From> for BlockLease<'a> { fn from(value: std::rc::Rc<[u8; PAGE_SZ]>) -> Self { BlockLease::Rc(value) } } -impl Deref for BlockLease { +impl<'a> Deref for BlockLease<'a> { type Target = [u8; PAGE_SZ]; fn deref(&self) -> &Self::Target { match self { BlockLease::PageReadGuard(v) => v.deref(), + BlockLease::EphemeralFileMutableTail(v) => v, #[cfg(test)] BlockLease::Rc(v) => v.deref(), } @@ -116,13 +112,6 @@ where self.reader.read_blk(blknum) } } -static NEXT_ID: AtomicU64 = AtomicU64::new(1); -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct FileId(u64); - -fn next_file_id() -> FileId { - FileId(NEXT_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed)) -} /// An adapter for reading a (virtual) file using the page cache. /// @@ -132,7 +121,7 @@ pub struct FileBlockReader { pub file: F, /// Unique ID of this file, used as key in the page cache. - file_id: FileId, + file_id: page_cache::FileId, } impl FileBlockReader @@ -140,7 +129,7 @@ where F: FileExt, { pub fn new(file: F) -> Self { - let file_id = next_file_id(); + let file_id = page_cache::next_file_id(); FileBlockReader { file_id, file } } @@ -157,7 +146,6 @@ where F: FileExt, { fn read_blk(&self, blknum: u32) -> Result { - // Look up the right page let cache = page_cache::get(); loop { match cache diff --git a/pageserver/src/tenant/ephemeral_file.rs b/pageserver/src/tenant/ephemeral_file.rs index 237c17d852..5de9c24d90 100644 --- a/pageserver/src/tenant/ephemeral_file.rs +++ b/pageserver/src/tenant/ephemeral_file.rs @@ -2,54 +2,31 @@ //! used to keep in-memory layers spilled on disk. use crate::config::PageServerConf; -use crate::page_cache::{self, ReadBufResult, WriteBufResult, PAGE_SZ}; +use crate::page_cache::{self, PAGE_SZ}; use crate::tenant::blob_io::BlobWriter; use crate::tenant::block_io::{BlockLease, BlockReader}; use crate::virtual_file::VirtualFile; -use once_cell::sync::Lazy; use std::cmp::min; -use std::collections::HashMap; use std::fs::OpenOptions; use std::io::{self, ErrorKind}; use std::ops::DerefMut; use std::os::unix::prelude::FileExt; use std::path::PathBuf; -use std::sync::{Arc, RwLock}; +use std::sync::atomic::AtomicU64; use tracing::*; use utils::id::{TenantId, TimelineId}; -/// -/// This is the global cache of file descriptors (File objects). -/// -static EPHEMERAL_FILES: Lazy> = Lazy::new(|| { - RwLock::new(EphemeralFiles { - next_file_id: FileId(1), - files: HashMap::new(), - }) -}); - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct FileId(u64); - -impl std::fmt::Display for FileId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -pub struct EphemeralFiles { - next_file_id: FileId, - - files: HashMap>, -} - pub struct EphemeralFile { - file_id: FileId, + page_cache_file_id: page_cache::FileId, + _tenant_id: TenantId, _timeline_id: TimelineId, - file: Arc, - - pub size: u64, + file: VirtualFile, + size: u64, + /// An ephemeral file is append-only. + /// We keep the last page, which can still be modified, in [`Self::mutable_tail`]. + /// The other pages, which can no longer be modified, are accessed through the page cache. + mutable_tail: [u8; PAGE_SZ], } impl EphemeralFile { @@ -58,74 +35,31 @@ impl EphemeralFile { tenant_id: TenantId, timeline_id: TimelineId, ) -> Result { - let mut l = EPHEMERAL_FILES.write().unwrap(); - let file_id = l.next_file_id; - l.next_file_id = FileId(l.next_file_id.0 + 1); + static NEXT_FILENAME: AtomicU64 = AtomicU64::new(1); + let filename_disambiguator = + NEXT_FILENAME.fetch_add(1, std::sync::atomic::Ordering::Relaxed); let filename = conf .timeline_path(&tenant_id, &timeline_id) - .join(PathBuf::from(format!("ephemeral-{}", file_id))); + .join(PathBuf::from(format!("ephemeral-{filename_disambiguator}"))); let file = VirtualFile::open_with_options( &filename, OpenOptions::new().read(true).write(true).create(true), )?; - let file_rc = Arc::new(file); - l.files.insert(file_id, file_rc.clone()); Ok(EphemeralFile { - file_id, + page_cache_file_id: page_cache::next_file_id(), _tenant_id: tenant_id, _timeline_id: timeline_id, - file: file_rc, + file, size: 0, + mutable_tail: [0u8; PAGE_SZ], }) } - fn fill_buffer(&self, buf: &mut [u8], blkno: u32) -> Result<(), io::Error> { - let mut off = 0; - while off < PAGE_SZ { - let n = self - .file - .read_at(&mut buf[off..], blkno as u64 * PAGE_SZ as u64 + off as u64)?; - - if n == 0 { - // Reached EOF. Fill the rest of the buffer with zeros. - const ZERO_BUF: [u8; PAGE_SZ] = [0u8; PAGE_SZ]; - - buf[off..].copy_from_slice(&ZERO_BUF[off..]); - break; - } - - off += n; - } - Ok(()) - } - - fn get_buf_for_write( - &self, - blkno: u32, - ) -> Result, io::Error> { - // Look up the right page - let cache = page_cache::get(); - let mut write_guard = match cache - .write_ephemeral_buf(self.file_id, blkno) - .map_err(|e| to_io_error(e, "Failed to write ephemeral buf"))? - { - WriteBufResult::Found(guard) => guard, - WriteBufResult::NotFound(mut guard) => { - // Read the page from disk into the buffer - // TODO: if we're overwriting the whole page, no need to read it in first - self.fill_buffer(guard.deref_mut(), blkno)?; - guard.mark_valid(); - - // And then fall through to modify it. - guard - } - }; - write_guard.mark_dirty(); - - Ok(write_guard) + pub(crate) fn size(&self) -> u64 { + self.size } } @@ -146,49 +80,74 @@ impl BlobWriter for EphemeralFile { blknum: u32, /// The offset inside the block identified by [`blknum`] to which [`push_bytes`] will write. off: usize, - /// Used by [`push_bytes`] to memoize the page cache write guard across calls to it. - memo_page_guard: MemoizedPageWriteGuard, - } - struct MemoizedPageWriteGuard { - guard: page_cache::PageWriteGuard<'static>, - /// The block number of the page in `guard`. - blknum: u32, } impl<'a> Writer<'a> { fn new(ephemeral_file: &'a mut EphemeralFile) -> io::Result> { - let blknum = (ephemeral_file.size / PAGE_SZ as u64) as u32; Ok(Writer { - blknum, + blknum: (ephemeral_file.size / PAGE_SZ as u64) as u32, off: (ephemeral_file.size % PAGE_SZ as u64) as usize, - memo_page_guard: MemoizedPageWriteGuard { - guard: ephemeral_file.get_buf_for_write(blknum)?, - blknum, - }, ephemeral_file, }) } #[inline(always)] fn push_bytes(&mut self, src: &[u8]) -> Result<(), io::Error> { - // `src_remaining` is the remaining bytes to be written let mut src_remaining = src; while !src_remaining.is_empty() { - let page = if self.memo_page_guard.blknum == self.blknum { - &mut self.memo_page_guard.guard - } else { - self.memo_page_guard.guard = - self.ephemeral_file.get_buf_for_write(self.blknum)?; - self.memo_page_guard.blknum = self.blknum; - &mut self.memo_page_guard.guard - }; - let dst_remaining = &mut page[self.off..]; + let dst_remaining = &mut self.ephemeral_file.mutable_tail[self.off..]; let n = min(dst_remaining.len(), src_remaining.len()); dst_remaining[..n].copy_from_slice(&src_remaining[..n]); self.off += n; src_remaining = &src_remaining[n..]; if self.off == PAGE_SZ { - // This block is done, move to next one. - self.blknum += 1; - self.off = 0; + match self.ephemeral_file.file.write_all_at( + &self.ephemeral_file.mutable_tail, + self.blknum as u64 * PAGE_SZ as u64, + ) { + Ok(_) => { + // Pre-warm the page cache with what we just wrote. + // This isn't necessary for coherency/correctness, but it's how we've always done it. + let cache = page_cache::get(); + match cache.read_immutable_buf( + self.ephemeral_file.page_cache_file_id, + self.blknum, + ) { + Ok(page_cache::ReadBufResult::Found(_guard)) => { + // This function takes &mut self, so, it shouldn't be possible to reach this point. + unreachable!("we just wrote blknum {} and this function takes &mut self, so, no concurrent read_blk is possible", self.blknum); + } + Ok(page_cache::ReadBufResult::NotFound(mut write_guard)) => { + let buf: &mut [u8] = write_guard.deref_mut(); + debug_assert_eq!(buf.len(), PAGE_SZ); + buf.copy_from_slice(&self.ephemeral_file.mutable_tail); + write_guard.mark_valid(); + // pre-warm successful + } + Err(e) => { + error!("ephemeral_file write_blob failed to get immutable buf to pre-warm page cache: {e:?}"); + // fail gracefully, it's not the end of the world if we can't pre-warm the cache here + } + } + // Zero the buffer for re-use. + // Zeroing is critical for correcntess because the write_blob code below + // and similarly read_blk expect zeroed pages. + self.ephemeral_file.mutable_tail.fill(0); + // This block is done, move to next one. + self.blknum += 1; + self.off = 0; + } + Err(e) => { + return Err(std::io::Error::new( + ErrorKind::Other, + // order error before path because path is long and error is short + format!( + "ephemeral_file: write_blob: write-back full tail blk #{}: {:#}: {}", + self.blknum, + e, + self.ephemeral_file.file.path.display(), + ), + )); + } + } } } Ok(()) @@ -227,10 +186,7 @@ impl Drop for EphemeralFile { fn drop(&mut self) { // drop all pages from page cache let cache = page_cache::get(); - cache.drop_buffers_for_ephemeral(self.file_id); - - // remove entry from the hash map - EPHEMERAL_FILES.write().unwrap().files.remove(&self.file_id); + cache.drop_buffers_for_immutable(self.page_cache_file_id); // unlink the file let res = std::fs::remove_file(&self.file.path); @@ -250,54 +206,48 @@ impl Drop for EphemeralFile { } } -pub fn writeback(file_id: FileId, blkno: u32, buf: &[u8]) -> Result<(), io::Error> { - if let Some(file) = EPHEMERAL_FILES.read().unwrap().files.get(&file_id) { - match file.write_all_at(buf, blkno as u64 * PAGE_SZ as u64) { - Ok(_) => Ok(()), - Err(e) => Err(io::Error::new( - ErrorKind::Other, - format!( - "failed to write back to ephemeral file at {} error: {}", - file.path.display(), - e - ), - )), - } - } else { - Err(io::Error::new( - ErrorKind::Other, - "could not write back page, not found in ephemeral files hash", - )) - } -} - impl BlockReader for EphemeralFile { fn read_blk(&self, blknum: u32) -> Result { - // Look up the right page - let cache = page_cache::get(); - loop { - match cache - .read_ephemeral_buf(self.file_id, blknum) - .map_err(|e| to_io_error(e, "Failed to read ephemeral buf"))? - { - ReadBufResult::Found(guard) => return Ok(guard.into()), - ReadBufResult::NotFound(mut write_guard) => { - // Read the page from disk into the buffer - self.fill_buffer(write_guard.deref_mut(), blknum)?; - write_guard.mark_valid(); + let flushed_blknums = 0..self.size / PAGE_SZ as u64; + if flushed_blknums.contains(&(blknum as u64)) { + let cache = page_cache::get(); + loop { + match cache + .read_immutable_buf(self.page_cache_file_id, blknum) + .map_err(|e| { + std::io::Error::new( + std::io::ErrorKind::Other, + // order path before error because error is anyhow::Error => might have many contexts + format!( + "ephemeral file: read immutable page #{}: {}: {:#}", + blknum, + self.file.path.display(), + e, + ), + ) + })? { + page_cache::ReadBufResult::Found(guard) => { + return Ok(BlockLease::PageReadGuard(guard)) + } + page_cache::ReadBufResult::NotFound(mut write_guard) => { + let buf: &mut [u8] = write_guard.deref_mut(); + debug_assert_eq!(buf.len(), PAGE_SZ); + self.file + .read_exact_at(&mut buf[..], blknum as u64 * PAGE_SZ as u64)?; + write_guard.mark_valid(); - // Swap for read lock - continue; - } - }; + // Swap for read lock + continue; + } + }; + } + } else { + debug_assert_eq!(blknum as u64, self.size / PAGE_SZ as u64); + Ok(BlockLease::EphemeralFileMutableTail(&self.mutable_tail)) } } } -fn to_io_error(e: anyhow::Error, context: &str) -> io::Error { - io::Error::new(ErrorKind::Other, format!("{context}: {e:#}")) -} - #[cfg(test)] mod tests { use super::*; diff --git a/pageserver/src/tenant/storage_layer/inmemory_layer.rs b/pageserver/src/tenant/storage_layer/inmemory_layer.rs index aa9d0884e0..d3ec78887d 100644 --- a/pageserver/src/tenant/storage_layer/inmemory_layer.rs +++ b/pageserver/src/tenant/storage_layer/inmemory_layer.rs @@ -230,11 +230,11 @@ impl std::fmt::Display for InMemoryLayer { impl InMemoryLayer { /// - /// Get layer size on the disk + /// Get layer size. /// pub async fn size(&self) -> Result { let inner = self.inner.read().await; - Ok(inner.file.size) + Ok(inner.file.size()) } /// From ed5bce7cba07a1a7f924152afd1c4e1f65b6c2e0 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Fri, 18 Aug 2023 19:34:29 +0200 Subject: [PATCH 29/40] rfcs: archive my MVCC S3 Notion Proposal (#5040) This is a copy from the [original Notion page](https://www.notion.so/neondatabase/Proposal-Pageserver-MVCC-S3-Storage-8a424c0c7ec5459e89d3e3f00e87657c?pvs=4), taken on 2023-08-16. This is for archival mostly. The RFC that we're likely to go with is https://github.com/neondatabase/neon/pull/4919. --- docs/rfcs/026-pageserver-s3-mvcc.md | 316 ++++++++++++++++++++++++++++ 1 file changed, 316 insertions(+) create mode 100644 docs/rfcs/026-pageserver-s3-mvcc.md diff --git a/docs/rfcs/026-pageserver-s3-mvcc.md b/docs/rfcs/026-pageserver-s3-mvcc.md new file mode 100644 index 0000000000..2a8c925781 --- /dev/null +++ b/docs/rfcs/026-pageserver-s3-mvcc.md @@ -0,0 +1,316 @@ +This is a copy from the [original Notion page](https://www.notion.so/neondatabase/Proposal-Pageserver-MVCC-S3-Storage-8a424c0c7ec5459e89d3e3f00e87657c?pvs=4), taken on 2023-08-16. + +This is for archival mostly. +The RFC that we're likely to go with is https://github.com/neondatabase/neon/pull/4919. + +--- + +# Proposal: Pageserver MVCC S3 Storage + +tl;dr: this proposal enables Control Plane to attach a tenant to a new pageserver without being 100% certain that it has been detached from the old pageserver. This enables us to automate failover if a pageserver dies (no human in the loop). + +# Problem Statement + +The current Neon architecture requires the Control Plane to guarantee that a tenant is only attached to one pageserver at a time. If a tenant is attached to multiple pageservers simultaneously, the pageservers will overwrite each other’s changes in S3 for that tenant, resulting in data loss for that tenant. + +The above imposes limitations on tenant relocation and future designs for high availability. For instance, Control Plane cannot relocate a tenant to another pageserver before it is 100% certain that the tenant is detached from the source pageserver. If the source pageserver is unresponsive, the tenant detach procedure cannot proceed, and Control Plane has no choice but to wait for either the source to become responsive again, or rely on a node failure detection mechanism to detect that the source pageserver is dead, and give permission to skip the detachment step. Either way, the tenant is unavailable for an extended period, and we have no means to improve it in the current architecture. + +Note that there is no 100% correct node failure detection mechanism, and even techniques to accelerate failure detection, such as ********************************shoot-the-other-node-in-the-head,******************************** have their limits. So, we currently rely on humans as node failure detectors: they get alerted via PagerDuty, assess the situation under high stress, and make the decision. If they make the wrong call, or the apparent dead pageserver somehow resurrects later, we’ll have data loss. + +Also, by relying on humans, we’re [incurring needless unscalable toil](https://sre.google/sre-book/eliminating-toil/): as Neon grows, pageserver failures will become more and more frequent because our fleet grows. Each instance will need quick response time to minimize downtime for the affected tenants, which implies higher toil, higher resulting attrition, and/or higher personnel cost. + +Lastly, there are foreseeable needs by operation and product such as zero-downtime relocation and automatic failover/HA. For such features, the ability to have a tenant purposefully or accidentally attached to more than one pageserver will greatly reduce risk of data loss, and improve availability. + +# High-Level Idea + +The core idea is to evolve the per-Tenant S3 state to an MVCC-like scheme, allowing multiple pageservers to operate on the same tenant S3 state without interference. To make changes to S3, pageservers acquire long-running transactions from Control Plane. After opening a transaction, Pageservers make PUTs directly against S3, but they keys include the transaction ID, so overwrites never happen. Periodically, pageservers talk back to Control Plane to commit their transaction. This is where Control Plane enforces strict linearizability, favoring availability over work-conservation: commit is only granted if no transaction started after the one that’s requesting commit. Garbage collection is done through deadlists, and it’s simplified tremendously by above commit grant/reject policy. + +Minimal changes are required for safekeepers to allow WAL for a single timeline be consumed by more than one pageserver without premature truncation. + +**Above scheme makes it safe to attach tenants without a 100% correct node failure detection mechanism. Further, it makes it safe to interleave tenant-attachment to pageservers, unlocking new capabilities for (internal) product features:** + +- **Fast, Zero-Toil Failover on Network Partitions or Instance Failure**: if a pageserver is not reachable (network partition, hardware failure, overload) we want to spread its attached tenants to new pageservers to restore availability, within the range of *seconds*. We cannot afford gracious timeouts to maximize the probability that the unreachable pageserver has ceased writing to S3. This proposal enables us to attach the tenants to the replacement pageservers, and redirect their computes, without having to wait for confirmation that the unreachable pageserver has ceased writing to S3. +- **************************************Zero-Downtime Relocation:************************************** we want to be able to relocate tenants to different pageservers with minimized availability or a latency impact. This proposal enables us to attach the relocating Tenant to the destination Pageserver before detaching it from the source Pageserver. This can help minimize downtime because we can wait for the destination to catch up on WAL processing before redirecting Computes. + +# Design + +The core idea is to evolve the per-Tenant S3 state to a per-tenant MVCC-like scheme. + +To make S3 changes for a given tenant, Pageserver requests a transaction ID from control plane for that tenant. Without a transaction ID, Pageserver does not write to S3. + +Once Pageserver received a transaction ID it is allowed to produce new objects and overwrite objects created in this transaction. Pageserver is not allowed to delete any objects; instead, it marks the object as deleted by appending the key to the transaction’s deadlist for later deletion. Commits of transactions are serialized through Control Plane: when Pageserver wants to commit a transaction, it sends an RPC to Control Plane. Control Plane responds with a commit grant or commit reject message. Commit grant means that the transaction’s changes are now visible to subsequent transactions. Commit reject means that the transaction’s changes are not and never will be visible to another Pageserver instance, and the rejected Pageserver is to cease further activity on that tenant. + +## ****************************************************Commit grant/reject policy**************************************************** + +For the purposes of Pageserver, we want **linearizability** of a tenant’s S3 state. Since our transactions are scoped per tenant, it is sufficient for linearizability to grant commit if and only if no other transaction has been started since the commit-requesting transaction started. + +For example, consider the case of a single tenant, attached to Pageserver A. Pageserver A has an open transaction but becomes unresponsive. Control Plane decides to relocate the tenant to another Pageserver B. It need *not* wait for A to be 100%-certainly down before B can start uploading to S3 for that tenant. Instead, B can start a new transaction right away, make progress, and get commit grants; What about A? The transaction is RejectPending in Control Plane until A eventually becomes responsive again, tries to commit, gets a rejection, acknowledges it, and thus its transaction becomes RejectAcknowledge. If A is definitively dead, operator can also force-transition from state RejectPending to RejectAcknowledged. But critically, Control Plane doesn’t have for A’s transaction to become RejectAcknowledge before attaching the tenant to B. + +```mermaid +sequenceDiagram + + participant CP + participant A + participant S3 + participant B + + CP -->> A: attach tenant + activate A + A -->> CP: start txn + CP -->> A: txn=23, last_committed_txn=22 + + + Note over CP,A: network partition + CP --x A: heartbeat + CP --x A: heartbeat + + Note over CP: relocate tenant to avoid downtime + CP -->> B: attach tenant + activate B + B -->> CP: start txn + Note over CP: mark A's txn 23 as RejectPending + CP -->> B: txn=24, last-committed txn is 22 + B -->> S3: PUT X.layer.24
PUT index_part.json.24 referencing X.layer.24 + B -->> CP: request commit + CP -->> B: granted + B -->> CP: start txn + CP -->> B: txn=25, last_committed_txn=22 + + A -->> S3: PUT Y.layer.23
PUT index_part.json.23 referencing Y.layer.23 + A --x CP: request commit + A --x CP: request commit + + Note over CP,A: partition is over + + A -->> CP: request commit + + Note over CP: most recently started txn is 25, not 23, reject + + CP -->> A: reject + A -->> CP: acknowledge reject + + Note over CP: mark A's txn 23 as RejectAcknowledged + + deactivate A + + B -->> S3: PUT 000-FFF_X-Y.layer.**************25**************
... + + deactivate B + + +``` + +If a Pageserver gets a rejection to a commit request, it acknowledges rejection and cedes further S3 uploads for the tenant, until it receives a `/detach` request for the tenant (control plane has most likely attached the tenant to another pageserver in the meantime). + +In practice, Control Plane will probably extend the commit grant/reject schema above, taking into account the pageserver to which it last attached the tenant. In the above example, Control Plane could remember that the pageserver that is supposed to host the tenant is pageserver B, and reject start-txn and commit requests from pageserver A. It would also use such requests from A as a signal that A is reachable again, and retry the `/detach` . + + + +## ********************Visibility******************** + +We mentioned earlier that once a transaction commits, its changes are visible to subsequent transactions. But how does a given transaction know where to look for the data? There is no longer a single `index_part.json` per timeline, or a single `timelines/:timeline_id` prefix to look for; they’re all multi-versioned, suffixed by the txn number. +The solution is: at transaction start, Pageserver receives the last-committed transaction ID from Control Plane (`last_committed_txn` in the diagram). last_commited_txn is the upper bound for what is visible for the current transaction. Control Plane keeps track of each open transaction’s last_committed_txn for purposes of garbage collection (see later paragraph). +Equipped with last_committed_txn, Pageserver then discovers + +- the current index part of a timeline at `tenants/:tenant_id/timelines/:timeline_id/index_part.json.$last_committed_txn`. The `index_part.json.$last_committed_txn` has the exact same contents as the current architecture’s index_part.json, i.e. full list of layers. +- the list of existent timelines as part of the `attach` RPC from CP; + +There is no other S3 state per tenant, so, that’s all the visibility required. +An alternative to receiving the list of existent timelines from CP is to introduce a proper **********SetOfTimelines********** object in S3, and multi-version it just like above. For example, we could have a `tenants/:tenant_id/timelines.json.$txn` file that references `index_part.json.$last_committed_txn` . It can be added later if more separation between CP and PS is desired. + +So, the only MVCC’ed object types in this proposal are LayerFile and IndexPart (=individual timeline), but not the SetOfTimelines in a given tenant. Is this a problem? For example, the Pageserver’s garbage collection code needs to know the full set of timelines of a tenant. Otherwise it’ll make incorrect decisions. What if Pageserver A knows about timelines {R,S}, but another Pageserver B created an additional branch T, so, its set of timelines is {R,S,T}. Both pageservers will run GC code, and so, PS A may decide to delete a layer that’s still needed for branch T. Not a problem with this propsoal, because the effect of GC (i.e., layer deletion) is properly MVCC’ed. + +## Longevity Of Transactions & Availability + +Pageserver depends on Control Plane to start a new transaction. If ControlPlane is down, no new transactions can be started. + +Pageservers commit transactions based on a maximum amount of uncommitted changes that have accumulated in S3. A lower maximum increases dependence and load on ControlPlane which decreases availability. A higher maximum risks losing more work in the event of failover; the work will have to be re-done in a new transaction on the new node. + +Pageservers are persist the open txn id in local storage, so that they can resume the transaction after restart, without dependence on Control Plane. + +## **Operations** + +********PUTs:******** + +- **layer files** + - current architecture: layer files are supposed to be write-once, but actually, there are edge-cases where we PUT the same layer file name twice; namely if we PUT the file to S3 but crash before uploading the index part that references it; then detach + attach, and re-run compaction, which is non-deterministic. + - this proposal: with transactions, we can now upload layers and index_part.json concurrently, just need to make sure layer file upload is done before we request txn commit. +- **index part** upload: `index_part.json.$txn` may be created and subsequently overwritten multiple times in a transaction; it is an availability/work-loss trade-off how often to request a commit from CP. + +**************DELETEs**************: for deletion, we maintain a deadlist per transaction. It is located at `tenants/:tenant_id/deadlist/deadlist.json.$txn`. It is PUT once before the pageserver requests requests commit, and not changed after sending request to commit. An object created in the current txn need not (but can) be on the deadlist — it can be DELETEd immediately because it’s not visible to other transactions. An example use case would be an L0 layer that gets compacted within one transaction; or, if we ever start MVCC’ing the set of timelines of a tenant, a short-lived branch that is created & destroyed within one transaction. + + + +### Rationale For Deadlist.json + +Given that this proposal only MVCC’s layers and indexparts, one may ask why the deadlist isn’t part of indexpart. The reason is to not lose generality: the deadlist is just a list of keys; it is not necessary to understand the data format of the versioned object to process the deadlist. This is important for garbage collection / vacuuming, which we’ll come to in the next section. + +## Garbage Collection / Vacuuming + +After a transaction has reached reject-acknowledged state, Control Plane initiates a garbage collection procedure for the aborted transaction. + +Control Plane is in the unique position about transaction states. Here is a sketch of the exact transaction states and what Control Plane keeps track of. + +``` +struct Tenant { + ... + + txns: HashMap, + // the most recently started txn's id; only most recently sarted can win + next_winner_txn: Option, +} +struct Transaction { + id: TxnId, // immutable + last_committed_txn: TxnId, // immutable; the most recent txn in state `Committed` + // when self was started + pageserver_id: PageserverId, + state: enum { + Open, + Committed, + RejectPending, + RejectAcknowledged, // invariant: we know all S3 activity has ceded + GarbageCollected, + } +} +``` + +Object creations & deletions by a rejected transaction have never been visible to other transactions. That is true for both RejectPending and RejectAcknowledged states. The difference is that, in RejectPending, the pageserver may still be uploading to S3, whereas in RejectAcknowledged, Control Plane can be certain that all S3 activity in the name of that transaction has ceded. So, once a transaction reaches state RejectAcknowledged state, it is safe to DELETE all objects created by that transaction, and discard the transaction’s deadlists. + +A transaction T in state Committed has subsequent transactions that may or may not reference the objects it created. None of the subsequent transaction can reference the objects on T’s deadlist, though, as per the Deadlist Invariant (see previous section). + +So, for garbage collection, we need to assess transactions in state Committed and RejectAcknowledged: + +- Commited: delete objects on the deadlist. + - We don’t need a LIST request here, the deadlist is sufficient. So, it’s really cheap. + - This is **not true MVCC garbage collection**; by deleting the objects on Committed transaction T ’s deadlist, we might delete data referenced by other transactions that were concurrent with T, i.e., they started while T was still open. However, the fact that T is committed means that the other transactions are RejectPending or RejectAcknowledged, so, they don’t matter. Pageservers executing these doomed RejectPending transactions must handle 404 for GETs gracefully, e.g., by trying to commit txn so they observe the rejection they’re destined to get anyways. 404’s for RejectAcknowledged is handled below. +- RejectAcknowledged: delete all objects created in that txn, and discard deadlists. + - 404s / object-already-deleted type messages must be expected because of Committed garbage collection (see above) + - How to get this list of objects created in a txn? Open but solvable design question; Ideas: + - **Brute force**: within tenant prefix, search for all keys ending in `.$txn` and delete them. + - **WAL for PUTs**: before a txn PUTs an object, it logs to S3, or some other equivalently durable storage, that it’s going to do it. If we log to S3, this means we have to do an additional WAL PUT per “readl” PUT. + - ******************************LIST with reorg’ed S3 layout (preferred one right now):****************************** layout S3 key space such that `$txn` comes first, i.e., `tenants/:tenant_id/$txn/timelines/:timeline_id/*.json.$txn` . That way, when we need to GC a RejectAcknowledged txn, we just LIST the entire `tenants/:tenant_id/$txn` prefix and delete it. The cost of GC for RejectAcknowledged transactions is thus proportional to the number of objects created in that transaction. + +## Branches + +This proposal only MVCC’s layer files and and index_part.json, but leaves the tenant object not-MVCCed. We argued earlier that it’s fine to ignore this for now, because + +1. Control Plane can act as source-of-truth for the set of timelines, and +2. The only operation that makes decision based on “set of timelines” is GC, which in turn only does layer deletions, and layer deletions ***are*** properly MVCC’ed. + +Now that we’ve introduced garbage collection, let’s elaborate a little more on (2). Recall our example from earlier: Pageserver A knows about timelines {R,S}, but another Pageserver B created an additional branch T, so, its set of timelines is {R,S,T}. Both pageservers will run GC code, and so, PS A may decide to delete a layer that’s still needed for branch T. + +How does the MVCC’ing of layer files protect us here? If A decides to delete that layer, it’s just on A’s transaction’s deadlist, but still present in S3 and usable by B. If A commits first, B won’t be able to commit and the layers in timeline T will be vacuumed. If B commits first, A’s deadlist is discarded and the layer continues to exist. + +## Safekeeper Changes + +We need to teach the safekeepers that there can be multiple pageservers requesting WAL for the same timeline, in order to prevent premature WAL truncation. + +In the current architecture, the Safekeeper service currently assumes only one Pageserver and is allowed to prune WAL older than that Pageserver’s `remote_consistent_lsn`. Safekeeper currently learns the `remote_consistent_lsn` through the walreceiver protocol. + +So, if we have a tenant attached to two pageservers at the same time, they will both try to stream WAL and the Safekeeper will get confused about which connection’s `remote_consistent_lsn` to use as a basis for WAL pruning. + +What do we need to change to make it work? We need to make sure that the Safekeepers only prune WAL up to the `remote_consistent_lsn` of the last-committed transaction. + +The straight-forward way to get it is to re-design WAL pruning as follows: + +1. Pageserver reports remote_consistent_lsn as part of transaction commit to Control Plane. +2. Control Plane makes sure transaction state update is persisted. +3. Control Plane (asynchronous to transaction commit) reconciles with Safekeepers to ensure WAL pruning happens. + +The above requires non-trivial changes, but, in the light of other planned projects such as restore-tenant-from-safekeeper-wal-backups, I think Control Plane will need to get involved in WAL pruning anyways. + +# How This Proposal Unlocks Future Features + +Let us revisit the example from the introduction where we were thinking about handling network partitions. Network partitions need to be solved first, because they’re unavoidable in distributed systems. We did that. Now let’s see how we can solve actual product problems: + +## **Fast, Zero-Toil Failover on Network Partitions or Instance Failure** + +The “Problem Statement” section outlined the current architecture’s problems with regards to network partitions or instance failure: it requires a 100% correct node-dead detector to make decisions, which doesn’t exist in reality. We rely instead on human toil: an oncall engineer has to inspect the situation and make a decision, which may be incorrect and in any case take time in the order of minutes, which means equivalent downtime for users. + +With this proposal, automatic failover for pageservers is trivial: + +If a pageserver is unresponsive from Control Plane’s / Compute’s perspective, Control Plane does the following: + +- attach all tenants of the unresponsive pageserver to new pageservers +- switch over these tenants’ computes immediately; + +At this point, availability is restored and user pain relieved. + +What’s left is to somehow close the doomed transaction of the unresponsive pageserver, so that it beomes RejectAcknowledged, and GC can make progress. Since S3 is cheap, we can afford to wait a really long time here, especially if we put a soft bound on the amount of data a transaction may produce before it must commit. Procedure: + +1. Ensure the unresponsive pageserver is taken out of rotation for new attachments. That probably should happen as part of the routine above. +2. Make a human operator investigate decide what to do (next morning, NO ONCALL ALERT): + 1. Inspect the instance, investigate logs, understand root cause. + 2. Try to re-establish connectivity between pageserver and Control Plane so that pageserver can retry commits, get rejected, ack rejection ⇒ enable GC. + 3. Use below procedure to decomission pageserver. + +### Decomissioning A Pageserver (Dead or Alive-but-Unrespsonive) + +The solution, enabled by this proposal: + +1. Ensure that pageserver’s S3 credentials are revoked so that it cannot make new uploads, which wouldn’t be tracked anywhere. +2. Let enough time pass for the S3 credential revocation to propagate. Amazon doesn’t give a guarantee here. As stated earlier, we can easily afford to wait here. +3. Mark all Open and RejectPending transactions of that pageserver as RejectAcknowledge. + +Revocation of the S3 credentials is required so that, once we transition all the transactions of that pageserver to RejectAcknowledge, once garbage-collection pass is guaranteed to delete all objects that will ever exist for that pageserver. That way, we need not check *****GarbageCollected***** transactions every again. + +## Workflow: Zero-Downtime Relocation + +With zero-downtime relocation, the goal is to have the target pageserver warmed up, i.e., at the same `last_record_lsn` as the source pageserver, before switching over Computes from source to target pageserver. + +With this proposal, it works like so: + +1. Grant source pageserver its last open transaction. This one is doomed to be rejected later, unless the relocation fails. +2. Grant target pageserver its first open transaction. +3. Have target pageserver catch up on WAL, streaming from last-committed-txn’s remote_consistent_lsn onwards. +4. Once target pageserver reports `last_record_lsn` close enough to source pageserver, target pageserver requests commit. +5. Drain compute traffic from source to target pageserver. (Source can still answer requests until it tries to commit and gets reject, so, this will be quite smooth). + +Note that as soon as we complete step (4), the source pageserver’s transaction is doomed to be rejected later. Conversely, if the target can’t catch up fast enough, the source will make a transaction commit earlier. This will generally happen if there is a lot of write traffic coming in. The design space to make thing smooth here is large, but well explored in other areas of computing, e.g., VM live migration. We have all the important policy levers at hand, e.g., + +- delaying source commits if we see target making progress +- slowing down source consumption (need some signalling mechanism for it) +- slowing down compute wal generation +- … + +It doesn’t really matter, what’s important is that two pageservers can overlap. + +# Additional Trade-Offs / Remarks Brought Up During Peer Review + +This proposal was read by and discussed @Stas and @Dmitry Rodionov prior to publishing it with the broader team. (This does not mean they endorse this proposal!). + +Issues that we discussed: + +1. **Frequency of transactions:** If even idle tenants commit every 10min or so, that’s quite a lot of load on Control Plane. Can we minimize it by Equating Transaction Commit Period to Attachment Period? I.e. start txn on attach, commit on detach? + 1. Would be nice, but, if a tenant is attached for 1 month, then PS dies, we lose 1 month of work. + 2. ⇒ my solution to this problem: Adjusted this proposal to make transaction commit frequency proportional to amount of uncommitted data. + 1. It’s ok to spend resources on active users, they pay us money to do it! + 2. The amount of work per transaction is minimal. + 1. In current Control Plane, it’s a small database transaction that is super unlikely to conflict with other transactions. + 2. I have very little concerns about scalability of the commit workload on CP side because it's trivially horizontally scalable by sharding by tenant. + 3. There's no super stringent availability requirement on control plane; if a txn can't commit because it can't reach the CP, PS can continue & retry in the background, speculating that it's CP downtime and not PS-partitioned-off scenario. + 4. Without stringent availability requirement, there's flexibility for future changes to CP-side-implementation. +2. ************************************************Does this proposal address mirroring / no-performance-degradation failover ?************************************************ + 1. No it doesn’t. It only provides the building block for attaching a tenant to a new pageserver without having to worry that the tenant is detached on the old pageserver. + 2. A simple scheme to build no-performance-degradation failover on top of this proposal is to have an asynchronous read-only replica of a tenant on another pageserver in the same region. + 3. Another more ambitious scheme to get no-performance-degradation would be [One-Pager: Layer File Spreading (Christian)](https://www.notion.so/One-Pager-Layer-File-Spreading-Christian-eb6b64182a214e11b3fceceee688d843?pvs=21); this proposal would be used in layer file spreading for risk-free automation of TenantLeader failover, which hasn’t been addressed Ithere. + 4. In any way, failover would restart from an older S3 state, and need to re-ingest WAL before being able to server recently written pages. + 1. Is that a show-stopper? I think not. + 2. Is it suboptimal? Absolutely: if a pageserver instance fails, all its tenants will be distributed among the remaining pageservers (OK), and all these tenants will ask the safekeepers for WAL at the same time (BAD). So, pageserver instance failure will cause a load spike in safekeepers. + 1. Personally I think that’s an OK trade-off to make. + 2. There are countless options to avoid / mitigate the load spike. E.g., pro-actively streaming WAL to the standby read-only replica. + +3. ********************************************Does this proposal allow multiple writers for a tenant?******************************************** + 1. In abstract terms, this proposal provides a linearized history for a given S3 prefix. + 2. In concrete terms, this proposal provides a linearized history per tenant. + 3. There can be multiple writers at a given time, but only one of them will win to become part of the linearized history. +4. ************************************************************************************Alternative ideas mentioned during meetings that should be turned into a written prospoal like this one:************************************************************************************ + 1. @Dmitry Rodionov : having linearized storage of index_part.json in some database that allows serializable transactions / atomic compare-and-swap PUT + 2. @Dmitry Rodionov : + 3. @Stas : something like this scheme, but somehow find a way to equate attachment duration with transaction duration, without losing work if pageserver dies months after attachment. From f6c671c1406ce8373bf7b3cdf9a9d9f4c33b53cf Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Fri, 18 Aug 2023 20:48:33 +0300 Subject: [PATCH 30/40] resume timeline deletions on attach (#5030) closes [#5036](https://github.com/neondatabase/neon/issues/5036) --- pageserver/src/tenant.rs | 31 ++++- test_runner/regress/test_timeline_delete.py | 125 +++++++++++++++++++- 2 files changed, 150 insertions(+), 6 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 309020391f..eaaa0f4a74 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -616,6 +616,9 @@ impl Tenant { .instrument(info_span!("download_index_part", %timeline_id)), ); } + + let mut timelines_to_resume_deletions = vec![]; + // Wait for all the download tasks to complete & collect results. let mut remote_index_and_client = HashMap::new(); let mut timeline_ancestors = HashMap::new(); @@ -632,9 +635,12 @@ impl Tenant { ); remote_index_and_client.insert(timeline_id, (index_part, client)); } - MaybeDeletedIndexPart::Deleted(_) => { - info!("timeline {} is deleted, skipping", timeline_id); - continue; + MaybeDeletedIndexPart::Deleted(index_part) => { + info!( + "timeline {} is deleted, picking to resume deletion", + timeline_id + ); + timelines_to_resume_deletions.push((timeline_id, index_part, client)); } } } @@ -659,6 +665,25 @@ impl Tenant { })?; } + // Walk through deleted timelines, resume deletion + for (timeline_id, index_part, remote_timeline_client) in timelines_to_resume_deletions { + remote_timeline_client + .init_upload_queue_stopped_to_continue_deletion(&index_part) + .context("init queue stopped") + .map_err(LoadLocalTimelineError::ResumeDeletion)?; + + DeleteTimelineFlow::resume_deletion( + Arc::clone(self), + timeline_id, + &index_part.parse_metadata().context("parse_metadata")?, + Some(remote_timeline_client), + None, + ) + .await + .context("resume_deletion") + .map_err(LoadLocalTimelineError::ResumeDeletion)?; + } + std::fs::remove_file(&marker_file) .with_context(|| format!("unlink attach marker file {}", marker_file.display()))?; crashsafe::fsync(marker_file.parent().expect("marker file has parent dir")) diff --git a/test_runner/regress/test_timeline_delete.py b/test_runner/regress/test_timeline_delete.py index 7d2d3304e2..3647442da3 100644 --- a/test_runner/regress/test_timeline_delete.py +++ b/test_runner/regress/test_timeline_delete.py @@ -17,6 +17,7 @@ from fixtures.neon_fixtures import ( ) from fixtures.pageserver.http import PageserverApiException from fixtures.pageserver.utils import ( + MANY_SMALL_LAYERS_TENANT_CONFIG, assert_prefix_empty, assert_prefix_not_empty, poll_for_remote_storage_iterations, @@ -34,7 +35,7 @@ from fixtures.remote_storage import ( available_s3_storages, ) from fixtures.types import Lsn, TenantId, TimelineId -from fixtures.utils import query_scalar, wait_until +from fixtures.utils import query_scalar, run_pg_bench_small, wait_until def test_timeline_delete(neon_simple_env: NeonEnv): @@ -208,7 +209,7 @@ def test_delete_timeline_exercise_crash_safety_failpoints( timeline_id = env.neon_cli.create_timeline("delete") with env.endpoints.create_start("delete") as endpoint: # generate enough layers - pg_bin.run(["pgbench", "-i", "-I dtGvp", "-s1", endpoint.connstr()]) + run_pg_bench_small(pg_bin, endpoint.connstr()) if remote_storage_kind is RemoteStorageKind.NOOP: wait_for_last_flush_lsn(env, endpoint, env.initial_tenant, timeline_id) else: @@ -812,7 +813,7 @@ def test_delete_orphaned_objects( timeline_id = env.neon_cli.create_timeline("delete") with env.endpoints.create_start("delete") as endpoint: # generate enough layers - pg_bin.run(["pgbench", "-i", "-I dtGvp", "-s1", endpoint.connstr()]) + run_pg_bench_small(pg_bin, endpoint.connstr()) last_flush_lsn_upload(env, endpoint, env.initial_tenant, timeline_id) # write orphaned file that is missing from the index @@ -848,3 +849,121 @@ def test_delete_orphaned_objects( ) assert env.remote_storage.index_path(env.initial_tenant, timeline_id).exists() + + +@pytest.mark.parametrize("remote_storage_kind", available_remote_storages()) +def test_timeline_delete_resumed_on_attach( + neon_env_builder: NeonEnvBuilder, + remote_storage_kind: RemoteStorageKind, + pg_bin: PgBin, +): + neon_env_builder.enable_remote_storage( + remote_storage_kind=remote_storage_kind, + test_name="test_deleted_tenant_ignored_on_attach", + ) + + env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG) + + tenant_id = env.initial_tenant + + ps_http = env.pageserver.http_client() + + timeline_id = env.neon_cli.create_timeline("delete") + with env.endpoints.create_start("delete") as endpoint: + # generate enough layers + run_pg_bench_small(pg_bin, endpoint.connstr()) + last_flush_lsn_upload(env, endpoint, env.initial_tenant, timeline_id) + + if remote_storage_kind in available_s3_storages(): + assert_prefix_not_empty( + neon_env_builder, + prefix="/".join( + ( + "tenants", + str(env.initial_tenant), + "timelines", + str(timeline_id), + ) + ), + ) + + # failpoint before we remove index_part from s3 + failpoint = "timeline-delete-during-rm" + ps_http.configure_failpoints((failpoint, "return")) + + env.pageserver.allowed_errors.extend( + ( + # allow errors caused by failpoints + f".*failpoint: {failpoint}", + # It appears when we stopped flush loop during deletion (attempt) and then pageserver is stopped + ".*freeze_and_flush_on_shutdown.*failed to freeze and flush: cannot flush frozen layers when flush_loop is not running, state is Exited", + # error from http response is also logged + ".*InternalServerError\\(Tenant is marked as deleted on remote storage.*", + # Polling after attach may fail with this + f".*InternalServerError\\(Tenant {tenant_id} is not active.*", + '.*shutdown_pageserver{exit_code=0}: stopping left-over name="remote upload".*', + ) + ) + + iterations = poll_for_remote_storage_iterations(remote_storage_kind) + + ps_http.timeline_delete(tenant_id, timeline_id) + + timeline_info = wait_until_timeline_state( + pageserver_http=ps_http, + tenant_id=env.initial_tenant, + timeline_id=timeline_id, + expected_state="Broken", + iterations=iterations, + ) + + reason = timeline_info["state"]["Broken"]["reason"] + log.info(f"timeline broken: {reason}") + + # failpoint may not be the only error in the stack + assert reason.endswith(f"failpoint: {failpoint}"), reason + + if remote_storage_kind in available_s3_storages(): + assert_prefix_not_empty( + neon_env_builder, + prefix="/".join( + ( + "tenants", + str(tenant_id), + "timelines", + str(timeline_id), + ) + ), + ) + + # now we stop pageserver and remove local tenant state + env.endpoints.stop_all() + env.pageserver.stop() + + dir_to_clear = Path(env.repo_dir) / "tenants" + shutil.rmtree(dir_to_clear) + os.mkdir(dir_to_clear) + + env.pageserver.start() + + # now we call attach + ps_http.tenant_attach(tenant_id=tenant_id) + + # delete should be resumed + wait_timeline_detail_404(ps_http, env.initial_tenant, timeline_id, iterations=iterations) + + tenant_path = env.timeline_dir(tenant_id=tenant_id, timeline_id=timeline_id) + assert not tenant_path.exists() + + if remote_storage_kind in available_s3_storages(): + assert_prefix_empty( + neon_env_builder, + prefix="/".join( + ( + "tenants", + str(timeline_id), + "timelines", + str(timeline_id), + ) + ), + ) From 30888a24d9b6f5c9be812088002ac2383b230cf1 Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Fri, 18 Aug 2023 20:49:11 +0300 Subject: [PATCH 31/40] Avoid flakiness in test_timeline_delete_fail_before_local_delete (#5032) The problem was that timeline detail can return timelines in not only active state. And by the time request comes timeline deletion can still be in progress if we're unlucky (test execution happened to be slower for some reason) Reference for failed test run https://neon-github-public-dev.s3.amazonaws.com/reports/pr-5022/5891420105/index.html#suites/f588e0a787c49e67b29490359c589fae/dab036e9bd673274 The error was `Exception: detail succeeded (it should return 404)` reported by @koivunej --- pageserver/src/http/routes.rs | 1 - test_runner/regress/test_timeline_delete.py | 10 +--------- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 1e8dada85e..450eb8072f 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -517,7 +517,6 @@ async fn timeline_delete_handler( .instrument(info_span!("timeline_delete", %tenant_id, %timeline_id)) .await?; - // FIXME: needs to be an error for console to retry it. Ideally Accepted should be used and retried until 404. json_response(StatusCode::ACCEPTED, ()) } diff --git a/test_runner/regress/test_timeline_delete.py b/test_runner/regress/test_timeline_delete.py index 3647442da3..e641065c43 100644 --- a/test_runner/regress/test_timeline_delete.py +++ b/test_runner/regress/test_timeline_delete.py @@ -488,15 +488,7 @@ def test_timeline_delete_fail_before_local_delete(neon_env_builder: NeonEnvBuild # Wait for tenant to finish loading. wait_until_tenant_active(ps_http, tenant_id=env.initial_tenant, iterations=10, period=1) - try: - data = ps_http.timeline_detail(env.initial_tenant, leaf_timeline_id) - log.debug(f"detail {data}") - except PageserverApiException as e: - log.debug(e) - if e.status_code != 404: - raise - else: - raise Exception("detail succeeded (it should return 404)") + wait_timeline_detail_404(ps_http, env.initial_tenant, leaf_timeline_id, iterations=4) assert ( not leaf_timeline_path.exists() From 5c6a692cf1818482682a70be391f30d6c89c65d7 Mon Sep 17 00:00:00 2001 From: Felix Prasanna <91577249+fprasx@users.noreply.github.com> Date: Fri, 18 Aug 2023 14:29:20 -0400 Subject: [PATCH 32/40] bump `VM_BUILDER_VERSION` to v0.16.2 (#5031) A very slight change that allows us to configure the UID of the neon-postgres cgroup owner. We start postgres in this cgroup so we can scale it with the cgroups v2 api. Currently, the control plane overwrites the entrypoint set by `vm-builder`, so `compute_ctl` (and thus postgres), is not started in the neon-postgres cgroup. Having `compute_ctl` start postgres in the cgroup should fix this. However, at the moment appears like it does not have the correct permissions. Configuring the neon-postgres UID to `postgres` (which is the UID `compute_ctl` runs under) should hopefully fix this. See #4920 - the PR to modify `compute_ctl` to start postgres in the cgorup. See: neondatabase/autoscaling#480, neondatabase/autoscaling#477. Both these PR's are part of an effort to increase `vm-builder`'s configurability and allow us to adjust it as we integrate in the monitor. --- .github/workflows/build_and_test.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index a8eab7a86f..f014f51fa9 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -780,7 +780,7 @@ jobs: run: shell: sh -eu {0} env: - VM_BUILDER_VERSION: v0.15.4 + VM_BUILDER_VERSION: v0.16.2 steps: - name: Checkout @@ -801,7 +801,12 @@ jobs: - name: Build vm image run: | - ./vm-builder -enable-file-cache -src=369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} -dst=369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} + ./vm-builder \ + -enable-file-cache \ + -enable-monitor \ + -enable-informant \ + -src=369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} \ + -dst=369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} - name: Pushing vm-compute-node image run: | From 368ee6c8cafbd1c30bc4ddc5c013b745e1ac8b92 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Sat, 19 Aug 2023 01:01:44 +0300 Subject: [PATCH 33/40] refactor: failpoint support (#5033) - move them to pageserver which is the only dependant on the crate fail - "move" the exported macro to the new module - support at init time the same failpoints as runtime Found while debugging test failures and making tests more repeatable by allowing "exit" from pageserver start via environment variables. Made those changes to `test_gc_cutoff.py`. --------- Co-authored-by: Christian Schwarz --- libs/utils/src/lib.rs | 38 ------------ pageserver/src/bin/pageserver.rs | 4 +- pageserver/src/failpoint_support.rs | 86 +++++++++++++++++++++++++++ pageserver/src/http/routes.rs | 9 +-- pageserver/src/lib.rs | 2 + pageserver/src/tenant.rs | 8 ++- pageserver/src/walingest.rs | 2 +- test_runner/regress/test_gc_cutoff.py | 17 +++--- 8 files changed, 105 insertions(+), 61 deletions(-) create mode 100644 pageserver/src/failpoint_support.rs diff --git a/libs/utils/src/lib.rs b/libs/utils/src/lib.rs index 5c8e01f41a..638dba427b 100644 --- a/libs/utils/src/lib.rs +++ b/libs/utils/src/lib.rs @@ -68,44 +68,6 @@ pub mod completion; /// Reporting utilities pub mod error; -mod failpoint_macro_helpers { - - /// use with fail::cfg("$name", "return(2000)") - /// - /// The effect is similar to a "sleep(2000)" action, i.e. we sleep for the - /// specified time (in milliseconds). The main difference is that we use async - /// tokio sleep function. Another difference is that we print lines to the log, - /// which can be useful in tests to check that the failpoint was hit. - #[macro_export] - macro_rules! failpoint_sleep_millis_async { - ($name:literal) => {{ - // If the failpoint is used with a "return" action, set should_sleep to the - // returned value (as string). Otherwise it's set to None. - let should_sleep = (|| { - ::fail::fail_point!($name, |x| x); - ::std::option::Option::None - })(); - - // Sleep if the action was a returned value - if let ::std::option::Option::Some(duration_str) = should_sleep { - $crate::failpoint_sleep_helper($name, duration_str).await - } - }}; - } - - // Helper function used by the macro. (A function has nicer scoping so we - // don't need to decorate everything with "::") - pub async fn failpoint_sleep_helper(name: &'static str, duration_str: String) { - let millis = duration_str.parse::().unwrap(); - let d = std::time::Duration::from_millis(millis); - - tracing::info!("failpoint {:?}: sleeping for {:?}", name, d); - tokio::time::sleep(d).await; - tracing::info!("failpoint {:?}: sleep done", name); - } -} -pub use failpoint_macro_helpers::failpoint_sleep_helper; - /// This is a shortcut to embed git sha into binaries and avoid copying the same build script to all packages /// /// we have several cases: diff --git a/pageserver/src/bin/pageserver.rs b/pageserver/src/bin/pageserver.rs index e0c969279e..635e12e8fe 100644 --- a/pageserver/src/bin/pageserver.rs +++ b/pageserver/src/bin/pageserver.rs @@ -6,7 +6,7 @@ use std::{env, ops::ControlFlow, path::Path, str::FromStr}; use anyhow::{anyhow, Context}; use clap::{Arg, ArgAction, Command}; -use fail::FailScenario; + use metrics::launch_timestamp::{set_launch_timestamp_metric, LaunchTimestamp}; use pageserver::disk_usage_eviction_task::{self, launch_disk_usage_global_eviction_task}; use pageserver::metrics::{STARTUP_DURATION, STARTUP_IS_LOADING}; @@ -121,7 +121,7 @@ fn main() -> anyhow::Result<()> { } // Initialize up failpoints support - let scenario = FailScenario::setup(); + let scenario = pageserver::failpoint_support::init(); // Basic initialization of things that don't change after startup virtual_file::init(conf.max_file_descriptors); diff --git a/pageserver/src/failpoint_support.rs b/pageserver/src/failpoint_support.rs new file mode 100644 index 0000000000..2190eba18a --- /dev/null +++ b/pageserver/src/failpoint_support.rs @@ -0,0 +1,86 @@ +/// use with fail::cfg("$name", "return(2000)") +/// +/// The effect is similar to a "sleep(2000)" action, i.e. we sleep for the +/// specified time (in milliseconds). The main difference is that we use async +/// tokio sleep function. Another difference is that we print lines to the log, +/// which can be useful in tests to check that the failpoint was hit. +#[macro_export] +macro_rules! __failpoint_sleep_millis_async { + ($name:literal) => {{ + // If the failpoint is used with a "return" action, set should_sleep to the + // returned value (as string). Otherwise it's set to None. + let should_sleep = (|| { + ::fail::fail_point!($name, |x| x); + ::std::option::Option::None + })(); + + // Sleep if the action was a returned value + if let ::std::option::Option::Some(duration_str) = should_sleep { + $crate::failpoint_support::failpoint_sleep_helper($name, duration_str).await + } + }}; +} +pub use __failpoint_sleep_millis_async as sleep_millis_async; + +// Helper function used by the macro. (A function has nicer scoping so we +// don't need to decorate everything with "::") +#[doc(hidden)] +pub(crate) async fn failpoint_sleep_helper(name: &'static str, duration_str: String) { + let millis = duration_str.parse::().unwrap(); + let d = std::time::Duration::from_millis(millis); + + tracing::info!("failpoint {:?}: sleeping for {:?}", name, d); + tokio::time::sleep(d).await; + tracing::info!("failpoint {:?}: sleep done", name); +} + +pub fn init() -> fail::FailScenario<'static> { + // The failpoints lib provides support for parsing the `FAILPOINTS` env var. + // We want non-default behavior for `exit`, though, so, we handle it separately. + // + // Format for FAILPOINTS is "name=actions" separated by ";". + let actions = std::env::var("FAILPOINTS"); + if actions.is_ok() { + std::env::remove_var("FAILPOINTS"); + } else { + // let the library handle non-utf8, or nothing for not present + } + + let scenario = fail::FailScenario::setup(); + + if let Ok(val) = actions { + val.split(';') + .enumerate() + .map(|(i, s)| s.split_once('=').ok_or((i, s))) + .for_each(|res| { + let (name, actions) = match res { + Ok(t) => t, + Err((i, s)) => { + panic!( + "startup failpoints: missing action on the {}th failpoint; try `{s}=return`", + i + 1, + ); + } + }; + if let Err(e) = apply_failpoint(name, actions) { + panic!("startup failpoints: failed to apply failpoint {name}={actions}: {e}"); + } + }); + } + + scenario +} + +pub(crate) fn apply_failpoint(name: &str, actions: &str) -> Result<(), String> { + if actions == "exit" { + fail::cfg_callback(name, exit_failpoint) + } else { + fail::cfg(name, actions) + } +} + +#[inline(never)] +fn exit_failpoint() { + tracing::info!("Exit requested by failpoint"); + std::process::exit(1); +} diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 450eb8072f..f86657fa77 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -979,14 +979,7 @@ async fn failpoints_handler( // We recognize one extra "action" that's not natively recognized // by the failpoints crate: exit, to immediately kill the process - let cfg_result = if fp.actions == "exit" { - fail::cfg_callback(fp.name, || { - info!("Exit requested by failpoint"); - std::process::exit(1); - }) - } else { - fail::cfg(fp.name, &fp.actions) - }; + let cfg_result = crate::failpoint_support::apply_failpoint(&fp.name, &fp.actions); if let Err(err_msg) = cfg_result { return Err(ApiError::BadRequest(anyhow!( diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index 4ff8ffcc4f..cb20caba1f 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -21,6 +21,8 @@ pub mod walingest; pub mod walrecord; pub mod walredo; +pub mod failpoint_support; + use std::path::Path; use crate::task_mgr::TaskKind; diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index eaaa0f4a74..09d4f49d18 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -689,7 +689,7 @@ impl Tenant { crashsafe::fsync(marker_file.parent().expect("marker file has parent dir")) .context("fsync tenant directory after unlinking attach marker file")?; - utils::failpoint_sleep_millis_async!("attach-before-activate"); + crate::failpoint_support::sleep_millis_async!("attach-before-activate"); info!("Done"); @@ -1098,7 +1098,7 @@ impl Tenant { debug!("loading tenant task"); - utils::failpoint_sleep_millis_async!("before-loading-tenant"); + crate::failpoint_support::sleep_millis_async!("before-loading-tenant"); // Load in-memory state to reflect the local files on disk // @@ -2438,7 +2438,9 @@ impl Tenant { .refresh_gc_info_internal(target_timeline_id, horizon, pitr, ctx) .await?; - utils::failpoint_sleep_millis_async!("gc_iteration_internal_after_getting_gc_timelines"); + crate::failpoint_support::sleep_millis_async!( + "gc_iteration_internal_after_getting_gc_timelines" + ); // If there is nothing to GC, we don't want any messages in the INFO log. if !gc_timelines.is_empty() { diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index 8d4c1842bd..340b75877d 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -312,7 +312,7 @@ impl<'a> WalIngest<'a> { // particular point in the WAL. For more fine-grained control, // we could peek into the message and only pause if it contains // a particular string, for example, but this is enough for now. - utils::failpoint_sleep_millis_async!("wal-ingest-logical-message-sleep"); + crate::failpoint_support::sleep_millis_async!("wal-ingest-logical-message-sleep"); } } diff --git a/test_runner/regress/test_gc_cutoff.py b/test_runner/regress/test_gc_cutoff.py index f58abb4575..be3355f5cc 100644 --- a/test_runner/regress/test_gc_cutoff.py +++ b/test_runner/regress/test_gc_cutoff.py @@ -12,13 +12,8 @@ from fixtures.neon_fixtures import NeonEnvBuilder, PgBin # test anyway, so it doesn't need any special attention here. @pytest.mark.timeout(600) def test_gc_cutoff(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): - env = neon_env_builder.init_start() - - pageserver_http = env.pageserver.http_client() - - # Use aggressive GC and checkpoint settings, so that we also exercise GC during the test - tenant_id, _ = env.neon_cli.create_tenant( - conf={ + env = neon_env_builder.init_start( + initial_tenant_conf={ "gc_period": "10 s", "gc_horizon": f"{1024 ** 2}", "checkpoint_distance": f"{1024 ** 2}", @@ -29,6 +24,11 @@ def test_gc_cutoff(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): "image_creation_threshold": "2", } ) + + pageserver_http = env.pageserver.http_client() + + # Use aggressive GC and checkpoint settings, so that we also exercise GC during the test + tenant_id = env.initial_tenant endpoint = env.endpoints.create_start("main", tenant_id=tenant_id) connstr = endpoint.connstr(options="-csynchronous_commit=off") pg_bin.run_capture(["pgbench", "-i", "-s10", connstr]) @@ -39,5 +39,4 @@ def test_gc_cutoff(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): with pytest.raises(subprocess.SubprocessError): pg_bin.run_capture(["pgbench", "-P1", "-N", "-c5", "-T500", "-Mprepared", connstr]) env.pageserver.stop() - env.pageserver.start() - pageserver_http.configure_failpoints(("after-timeline-gc-removed-layers", "exit")) + env.pageserver.start(extra_env_vars={"FAILPOINTS": "after-timeline-gc-removed-layers=exit"}) From a23b0773f126fd46acb6da8ffadfdd4fe5d92a6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Sat, 19 Aug 2023 00:56:03 +0200 Subject: [PATCH 34/40] Fix DeltaLayer dumping (#5045) ## Problem Before, DeltaLayer dumping (via `cargo run --release -p pagectl -- print-layer-file` ) would crash as one can't call `Handle::block_on` in an async executor thread. ## Summary of changes Avoid the problem by using `DeltaLayerInner::load_keys` to load the keys into RAM (which we already do during compaction), and then load the values one by one during dumping. --- pageserver/src/tenant.rs | 25 +++++++ .../src/tenant/storage_layer/delta_layer.rs | 75 ++++++++----------- 2 files changed, 57 insertions(+), 43 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 09d4f49d18..8ca2c4e01a 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -3951,6 +3951,31 @@ mod tests { Ok(()) } + #[tokio::test] + async fn delta_layer_dumping() -> anyhow::Result<()> { + let (tenant, ctx) = TenantHarness::create("test_layer_dumping")?.load().await; + let tline = tenant + .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) + .await?; + make_some_layers(tline.as_ref(), Lsn(0x20)).await?; + + let layer_map = tline.layers.read().await; + let level0_deltas = layer_map.layer_map().get_level0_deltas()?; + + assert!(!level0_deltas.is_empty()); + + for delta in level0_deltas { + let delta = layer_map.get_from_desc(&delta); + // Ensure we are dumping a delta layer here + let delta = delta.downcast_delta_layer().unwrap(); + + delta.dump(false, &ctx).await.unwrap(); + delta.dump(true, &ctx).await.unwrap(); + } + + Ok(()) + } + #[tokio::test] async fn corrupt_metadata() -> anyhow::Result<()> { const TEST_NAME: &str = "corrupt_metadata"; diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index bff42a1ec2..6094a7db52 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -51,7 +51,6 @@ use std::ops::Range; use std::os::unix::fs::FileExt; use std::path::{Path, PathBuf}; use std::sync::Arc; -use tokio::runtime::Handle; use tokio::sync::OnceCell; use tracing::*; @@ -177,10 +176,6 @@ impl DeltaKey { Lsn(u64::from_be_bytes(self.0[KEY_SIZE..].try_into().unwrap())) } - fn extract_key_from_buf(buf: &[u8]) -> Key { - Key::from_slice(&buf[..KEY_SIZE]) - } - fn extract_lsn_from_buf(buf: &[u8]) -> Lsn { let mut lsn_buf = [0u8; 8]; lsn_buf.copy_from_slice(&buf[KEY_SIZE..]); @@ -277,48 +272,42 @@ impl Layer for DeltaLayer { tree_reader.dump().await?; - let cursor = file.block_cursor(); + let keys = DeltaLayerInner::load_keys(&Ref(&**inner)).await?; // A subroutine to dump a single blob - let dump_blob = |blob_ref: BlobRef| -> anyhow::Result { - // TODO this is not ideal, but on the other hand we are in dumping code... - let buf = Handle::current().block_on(cursor.read_blob(blob_ref.pos()))?; - let val = Value::des(&buf)?; - let desc = match val { - Value::Image(img) => { - format!(" img {} bytes", img.len()) - } - Value::WalRecord(rec) => { - let wal_desc = walrecord::describe_wal_record(&rec)?; - format!( - " rec {} bytes will_init: {} {}", - buf.len(), - rec.will_init(), - wal_desc - ) - } - }; - Ok(desc) + let dump_blob = |val: ValueRef<_>| -> _ { + async move { + let buf = val.reader.read_blob(val.blob_ref.pos()).await?; + let val = Value::des(&buf)?; + let desc = match val { + Value::Image(img) => { + format!(" img {} bytes", img.len()) + } + Value::WalRecord(rec) => { + let wal_desc = walrecord::describe_wal_record(&rec)?; + format!( + " rec {} bytes will_init: {} {}", + buf.len(), + rec.will_init(), + wal_desc + ) + } + }; + Ok(desc) + } }; - tree_reader - .visit( - &[0u8; DELTA_KEY_SIZE], - VisitDirection::Forwards, - |delta_key, val| { - let blob_ref = BlobRef(val); - let key = DeltaKey::extract_key_from_buf(delta_key); - let lsn = DeltaKey::extract_lsn_from_buf(delta_key); - - let desc = match dump_blob(blob_ref) { - Ok(desc) => desc, - Err(err) => format!("ERROR: {}", err), - }; - println!(" key {} at {}: {}", key, lsn, desc); - true - }, - ) - .await?; + for entry in keys { + let DeltaEntry { key, lsn, val, .. } = entry; + let desc = match dump_blob(val).await { + Ok(desc) => desc, + Err(err) => { + let err: anyhow::Error = err; + format!("ERROR: {err}") + } + }; + println!(" key {key} at {lsn}: {desc}"); + } Ok(()) } From 9140a950f4ccd58bd4699fd3674fffebb3a9eaf9 Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Sun, 20 Aug 2023 12:28:50 +0300 Subject: [PATCH 35/40] Resume tenant deletion on attach (#5039) I'm still a bit nervous about attach -> crash case. But it should work. (unlike case with timeline). Ideally would be cool to cover this with test. This continues tradition of adding bool flags for Tenant::set_stopping. Probably lifecycle project will help with fixing it. --- pageserver/src/tenant.rs | 84 +++++++++++++++++++---- pageserver/src/tenant/delete.rs | 34 +++++++-- pageserver/src/tenant/mgr.rs | 11 +-- test_runner/fixtures/pageserver/utils.py | 2 +- test_runner/regress/test_tenant_delete.py | 16 ++--- 5 files changed, 112 insertions(+), 35 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 8ca2c4e01a..2ec5fe90ad 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -29,6 +29,7 @@ use std::collections::hash_map::Entry; use std::collections::BTreeSet; use std::collections::HashMap; use std::fmt::Debug; +use std::fmt::Display; use std::fs; use std::fs::File; use std::fs::OpenOptions; @@ -499,6 +500,7 @@ impl Tenant { conf: &'static PageServerConf, tenant_id: TenantId, broker_client: storage_broker::BrokerClientChannel, + tenants: &'static tokio::sync::RwLock, remote_storage: GenericRemoteStorage, ctx: &RequestContext, ) -> anyhow::Result> { @@ -513,7 +515,7 @@ impl Tenant { tenant_conf, wal_redo_manager, tenant_id, - Some(remote_storage), + Some(remote_storage.clone()), )); // Do all the hard work in the background @@ -528,17 +530,61 @@ impl Tenant { "attach tenant", false, async move { + // Ideally we should use Tenant::set_broken_no_wait, but it is not supposed to be used when tenant is in loading state. + let make_broken = |t: &Tenant, err: anyhow::Error| { + error!("attach failed, setting tenant state to Broken: {err:?}"); + t.state.send_modify(|state| { + assert_eq!( + *state, + TenantState::Attaching, + "the attach task owns the tenant state until activation is complete" + ); + *state = TenantState::broken_from_reason(err.to_string()); + }); + }; + + let pending_deletion = { + match DeleteTenantFlow::should_resume_deletion( + conf, + Some(&remote_storage), + &tenant_clone, + ) + .await + { + Ok(should_resume_deletion) => should_resume_deletion, + Err(err) => { + make_broken(&tenant_clone, anyhow::anyhow!(err)); + return Ok(()); + } + } + }; + + info!("pending_deletion {}", pending_deletion.is_some()); + + if let Some(deletion) = pending_deletion { + match DeleteTenantFlow::resume_from_attach( + deletion, + &tenant_clone, + tenants, + &ctx, + ) + .await + { + Err(err) => { + make_broken(&tenant_clone, anyhow::anyhow!(err)); + return Ok(()); + } + Ok(()) => return Ok(()), + } + } + match tenant_clone.attach(&ctx).await { Ok(()) => { info!("attach finished, activating"); tenant_clone.activate(broker_client, None, &ctx); } Err(e) => { - error!("attach failed, setting tenant state to Broken: {:?}", e); - tenant_clone.state.send_modify(|state| { - assert_eq!(*state, TenantState::Attaching, "the attach task owns the tenant state until activation is complete"); - *state = TenantState::broken_from_reason(e.to_string()); - }); + make_broken(&tenant_clone, anyhow::anyhow!(e)); } } Ok(()) @@ -833,6 +879,7 @@ impl Tenant { "initial tenant load", false, async move { + // Ideally we should use Tenant::set_broken_no_wait, but it is not supposed to be used when tenant is in loading state. let make_broken = |t: &Tenant, err: anyhow::Error| { error!("load failed, setting tenant state to Broken: {err:?}"); t.state.send_modify(|state| { @@ -880,7 +927,7 @@ impl Tenant { .as_mut() .and_then(|x| x.initial_logical_size_attempt.take()); - match DeleteTenantFlow::resume( + match DeleteTenantFlow::resume_from_load( deletion, &tenant_clone, init_order.as_ref(), @@ -902,7 +949,7 @@ impl Tenant { match tenant_clone.load(init_order.as_ref(), &ctx).await { Ok(()) => { - debug!("load finished",); + debug!("load finished"); tenant_clone.activate(broker_client, background_jobs_can_start, &ctx); } @@ -1795,7 +1842,7 @@ impl Tenant { // It's mesed up. // we just ignore the failure to stop - match self.set_stopping(shutdown_progress, false).await { + match self.set_stopping(shutdown_progress, false, false).await { Ok(()) => {} Err(SetStoppingError::Broken) => { // assume that this is acceptable @@ -1837,15 +1884,18 @@ impl Tenant { /// This function is not cancel-safe! /// /// `allow_transition_from_loading` is needed for the special case of loading task deleting the tenant. + /// `allow_transition_from_attaching` is needed for the special case of attaching deleted tenant. async fn set_stopping( &self, progress: completion::Barrier, allow_transition_from_loading: bool, + allow_transition_from_attaching: bool, ) -> Result<(), SetStoppingError> { let mut rx = self.state.subscribe(); // cannot stop before we're done activating, so wait out until we're done activating rx.wait_for(|state| match state { + TenantState::Attaching if allow_transition_from_attaching => true, TenantState::Activating(_) | TenantState::Attaching => { info!( "waiting for {} to turn Active|Broken|Stopping", @@ -1862,12 +1912,19 @@ impl Tenant { // we now know we're done activating, let's see whether this task is the winner to transition into Stopping let mut err = None; let stopping = self.state.send_if_modified(|current_state| match current_state { - TenantState::Activating(_) | TenantState::Attaching => { - unreachable!("we ensured above that we're done with activation, and, there is no re-activation") + TenantState::Activating(_) => { + unreachable!("1we ensured above that we're done with activation, and, there is no re-activation") + } + TenantState::Attaching => { + if !allow_transition_from_attaching { + unreachable!("2we ensured above that we're done with activation, and, there is no re-activation") + }; + *current_state = TenantState::Stopping { progress }; + true } TenantState::Loading => { if !allow_transition_from_loading { - unreachable!("we ensured above that we're done with activation, and, there is no re-activation") + unreachable!("3we ensured above that we're done with activation, and, there is no re-activation") }; *current_state = TenantState::Stopping { progress }; true @@ -1943,7 +2000,8 @@ impl Tenant { self.set_broken_no_wait(reason) } - pub(crate) fn set_broken_no_wait(&self, reason: String) { + pub(crate) fn set_broken_no_wait(&self, reason: impl Display) { + let reason = reason.to_string(); self.state.send_modify(|current_state| { match *current_state { TenantState::Activating(_) | TenantState::Loading | TenantState::Attaching => { diff --git a/pageserver/src/tenant/delete.rs b/pageserver/src/tenant/delete.rs index 1f03ed495a..de509cd3de 100644 --- a/pageserver/src/tenant/delete.rs +++ b/pageserver/src/tenant/delete.rs @@ -275,8 +275,9 @@ pub(crate) async fn remote_delete_mark_exists( /// It is resumable from any step in case a crash/restart occurs. /// There are three entrypoints to the process: /// 1. [`DeleteTenantFlow::run`] this is the main one called by a management api handler. -/// 2. [`DeleteTenantFlow::resume`] is called during restarts when local or remote deletion marks are still there. -/// Note the only other place that messes around timeline delete mark is the `Tenant::spawn_load` function. +/// 2. [`DeleteTenantFlow::resume_from_load`] is called during restarts when local or remote deletion marks are still there. +/// 3. [`DeleteTenantFlow::resume_from_attach`] is called when deletion is resumed tenant is found to be deleted during attach process. +/// Note the only other place that messes around timeline delete mark is the `Tenant::spawn_load` function. #[derive(Default)] pub enum DeleteTenantFlow { #[default] @@ -403,7 +404,7 @@ impl DeleteTenantFlow { } } - pub(crate) async fn resume( + pub(crate) async fn resume_from_load( guard: DeletionGuard, tenant: &Arc, init_order: Option<&InitializationOrder>, @@ -413,7 +414,7 @@ impl DeleteTenantFlow { let (_, progress) = completion::channel(); tenant - .set_stopping(progress, true) + .set_stopping(progress, true, false) .await .expect("cant be stopping or broken"); @@ -441,6 +442,31 @@ impl DeleteTenantFlow { .await } + pub(crate) async fn resume_from_attach( + guard: DeletionGuard, + tenant: &Arc, + tenants: &'static tokio::sync::RwLock, + ctx: &RequestContext, + ) -> Result<(), DeleteTenantError> { + let (_, progress) = completion::channel(); + + tenant + .set_stopping(progress, false, true) + .await + .expect("cant be stopping or broken"); + + tenant.attach(ctx).await.context("attach")?; + + Self::background( + guard, + tenant.conf, + tenant.remote_storage.clone(), + tenants, + tenant, + ) + .await + } + async fn prepare( tenants: &tokio::sync::RwLock, tenant_id: TenantId, diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index bb8a0d7089..57237e8b88 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -27,7 +27,7 @@ use crate::{InitializationOrder, IGNORED_TENANT_FILE_NAME}; use utils::fs_ext::PathExt; use utils::id::{TenantId, TimelineId}; -use super::delete::{remote_delete_mark_exists, DeleteTenantError}; +use super::delete::DeleteTenantError; use super::timeline::delete::DeleteTimelineFlow; /// The tenants known to the pageserver. @@ -201,7 +201,8 @@ pub(crate) fn schedule_local_tenant_processing( let tenant = if conf.tenant_attaching_mark_file_path(&tenant_id).exists() { info!("tenant {tenant_id} has attaching mark file, resuming its attach operation"); if let Some(remote_storage) = remote_storage { - match Tenant::spawn_attach(conf, tenant_id, broker_client, remote_storage, ctx) { + match Tenant::spawn_attach(conf, tenant_id, broker_client, tenants, remote_storage, ctx) + { Ok(tenant) => tenant, Err(e) => { error!("Failed to spawn_attach tenant {tenant_id}, reason: {e:#}"); @@ -591,12 +592,6 @@ pub async fn attach_tenant( remote_storage: GenericRemoteStorage, ctx: &RequestContext, ) -> Result<(), TenantMapInsertError> { - // Temporary solution, proper one would be to resume deletion, but that needs more plumbing around Tenant::load/Tenant::attach - // Corresponding issue https://github.com/neondatabase/neon/issues/5006 - if remote_delete_mark_exists(conf, &tenant_id, &remote_storage).await? { - return Err(anyhow::anyhow!("Tenant is marked as deleted on remote storage").into()); - } - tenant_map_insert(tenant_id, || { let tenant_dir = create_tenant_files(conf, tenant_conf, &tenant_id, CreateTenantFilesMode::Attach)?; // TODO: tenant directory remains on disk if we bail out from here on. diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index 5acd6be9fa..9767d5b547 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -315,4 +315,4 @@ MANY_SMALL_LAYERS_TENANT_CONFIG = { def poll_for_remote_storage_iterations(remote_storage_kind: RemoteStorageKind) -> int: - return 40 if remote_storage_kind is RemoteStorageKind.REAL_S3 else 10 + return 40 if remote_storage_kind is RemoteStorageKind.REAL_S3 else 15 diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index 5ffb713bba..448dcfaff7 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -292,9 +292,8 @@ def test_delete_tenant_exercise_crash_safety_failpoints( ) -# TODO resume deletion (https://github.com/neondatabase/neon/issues/5006) @pytest.mark.parametrize("remote_storage_kind", available_remote_storages()) -def test_deleted_tenant_ignored_on_attach( +def test_tenant_delete_is_resumed_on_attach( neon_env_builder: NeonEnvBuilder, remote_storage_kind: RemoteStorageKind, pg_bin: PgBin, @@ -336,6 +335,8 @@ def test_deleted_tenant_ignored_on_attach( ( # allow errors caused by failpoints f".*failpoint: {failpoint}", + # From deletion polling + f".*NotFound: tenant {env.initial_tenant}.*", # It appears when we stopped flush loop during deletion (attempt) and then pageserver is stopped ".*freeze_and_flush_on_shutdown.*failed to freeze and flush: cannot flush frozen layers when flush_loop is not running, state is Exited", # error from http response is also logged @@ -381,20 +382,17 @@ def test_deleted_tenant_ignored_on_attach( env.pageserver.start() # now we call attach - with pytest.raises( - PageserverApiException, match="Tenant is marked as deleted on remote storage" - ): - ps_http.tenant_attach(tenant_id=tenant_id) + ps_http.tenant_attach(tenant_id=tenant_id) - # delete should be resumed (not yet) - # wait_tenant_status_404(ps_http, tenant_id, iterations) + # delete should be resumed + wait_tenant_status_404(ps_http, tenant_id, iterations) # we shouldn've created tenant dir on disk tenant_path = env.tenant_dir(tenant_id=tenant_id) assert not tenant_path.exists() if remote_storage_kind in available_s3_storages(): - assert_prefix_not_empty( + assert_prefix_empty( neon_env_builder, prefix="/".join( ( From 130ccb4b679c1fe8af789de702956d2f276de55a Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Sun, 20 Aug 2023 12:33:19 +0300 Subject: [PATCH 36/40] Remove initial timeline id troubles (#5044) I made a mistake when I adding `env.initial_timeline: Optional[TimelineId]` in the #3839, should had just generated it and used it to create a specific timeline. This PR fixes those mistakes, and some extra calling into psql which must be slower than python field access. --- test_runner/fixtures/neon_fixtures.py | 9 ++- .../regress/test_disk_usage_eviction.py | 17 ++--- test_runner/regress/test_gc_aggressive.py | 13 ++-- test_runner/regress/test_large_schema.py | 6 +- test_runner/regress/test_layer_eviction.py | 6 +- test_runner/regress/test_metric_collection.py | 9 +-- test_runner/regress/test_ondemand_download.py | 14 ++-- test_runner/regress/test_read_trace.py | 17 ++--- test_runner/regress/test_remote_storage.py | 15 ++--- test_runner/regress/test_tenant_detach.py | 22 +++---- .../test_tenants_with_remote_storage.py | 8 +-- .../regress/test_threshold_based_eviction.py | 2 - test_runner/regress/test_timeline_delete.py | 17 ++--- test_runner/regress/test_wal_acceptor.py | 64 ++++++------------- 14 files changed, 81 insertions(+), 138 deletions(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 61cd169fa3..b2cd0fe968 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -427,6 +427,7 @@ class NeonEnvBuilder: default_branch_name: str = DEFAULT_BRANCH_NAME, preserve_database_files: bool = False, initial_tenant: Optional[TenantId] = None, + initial_timeline: Optional[TimelineId] = None, ): self.repo_dir = repo_dir self.rust_log_override = rust_log_override @@ -452,6 +453,7 @@ class NeonEnvBuilder: self.pg_version = pg_version self.preserve_database_files = preserve_database_files self.initial_tenant = initial_tenant or TenantId.generate() + self.initial_timeline = initial_timeline or TimelineId.generate() def init_configs(self) -> NeonEnv: # Cannot create more than one environment from one builder @@ -473,9 +475,10 @@ class NeonEnvBuilder: f"Services started, creating initial tenant {env.initial_tenant} and its initial timeline" ) initial_tenant, initial_timeline = env.neon_cli.create_tenant( - tenant_id=env.initial_tenant, conf=initial_tenant_conf + tenant_id=env.initial_tenant, conf=initial_tenant_conf, timeline_id=env.initial_timeline ) - env.initial_timeline = initial_timeline + assert env.initial_tenant == initial_tenant + assert env.initial_timeline == initial_timeline log.info(f"Initial timeline {initial_tenant}/{initial_timeline} created successfully") return env @@ -784,7 +787,7 @@ class NeonEnv: # generate initial tenant ID here instead of letting 'neon init' generate it, # so that we don't need to dig it out of the config file afterwards. self.initial_tenant = config.initial_tenant - self.initial_timeline: Optional[TimelineId] = None + self.initial_timeline = config.initial_timeline # Create a config file corresponding to the options toml = textwrap.dedent( diff --git a/test_runner/regress/test_disk_usage_eviction.py b/test_runner/regress/test_disk_usage_eviction.py index 1e9b130b1c..182069315e 100644 --- a/test_runner/regress/test_disk_usage_eviction.py +++ b/test_runner/regress/test_disk_usage_eviction.py @@ -1,4 +1,3 @@ -import shutil import time from dataclasses import dataclass from typing import Dict, Tuple @@ -14,7 +13,7 @@ from fixtures.neon_fixtures import ( ) from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import wait_for_upload_queue_empty -from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind +from fixtures.remote_storage import RemoteStorageKind from fixtures.types import Lsn, TenantId, TimelineId from fixtures.utils import wait_until @@ -138,22 +137,14 @@ def eviction_env(request, neon_env_builder: NeonEnvBuilder, pg_bin: PgBin) -> Ev neon_env_builder.enable_remote_storage(RemoteStorageKind.LOCAL_FS, f"{request.node.name}") - env = neon_env_builder.init_start() + # initial tenant will not be present on this pageserver + env = neon_env_builder.init_configs() + env.start() pageserver_http = env.pageserver.http_client() # allow because we are invoking this manually; we always warn on executing disk based eviction env.pageserver.allowed_errors.append(r".* running disk usage based eviction due to pressure.*") - # remove the initial tenant - assert env.initial_timeline - pageserver_http.tenant_detach(env.initial_tenant) - assert isinstance(env.remote_storage, LocalFsStorage) - tenant_remote_storage = env.remote_storage.root / "tenants" / str(env.initial_tenant) - assert tenant_remote_storage.is_dir() - shutil.rmtree(tenant_remote_storage) - env.initial_tenant = TenantId("0" * 32) - env.initial_timeline = None - # Choose small layer_size so that we can use low pgbench_scales and still get a large count of layers. # Large count of layers and small layer size is good for testing because it makes evictions predictable. # Predictable in the sense that many layer evictions will be required to reach the eviction target, because diff --git a/test_runner/regress/test_gc_aggressive.py b/test_runner/regress/test_gc_aggressive.py index 53fa70903f..be817521cd 100644 --- a/test_runner/regress/test_gc_aggressive.py +++ b/test_runner/regress/test_gc_aggressive.py @@ -11,8 +11,7 @@ from fixtures.neon_fixtures import ( wait_for_last_flush_lsn, ) from fixtures.remote_storage import RemoteStorageKind -from fixtures.types import TenantId, TimelineId -from fixtures.utils import query_scalar +from fixtures.types import TimelineId # Test configuration # @@ -71,13 +70,11 @@ def test_gc_aggressive(neon_env_builder: NeonEnvBuilder): # Disable pitr, because here we want to test branch creation after GC neon_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}" env = neon_env_builder.init_start() - env.neon_cli.create_branch("test_gc_aggressive", "main") + timeline = env.neon_cli.create_branch("test_gc_aggressive", "main") endpoint = env.endpoints.create_start("test_gc_aggressive") log.info("postgres is running on test_gc_aggressive branch") with endpoint.cursor() as cur: - timeline = TimelineId(query_scalar(cur, "SHOW neon.timeline_id")) - # Create table, and insert the first 100 rows cur.execute("CREATE TABLE foo (id int, counter int, t text)") cur.execute( @@ -109,7 +106,8 @@ def test_gc_index_upload(neon_env_builder: NeonEnvBuilder, remote_storage_kind: ) env = neon_env_builder.init_start() - env.neon_cli.create_branch("test_gc_index_upload", "main") + tenant_id = env.initial_tenant + timeline_id = env.neon_cli.create_branch("test_gc_index_upload", "main") endpoint = env.endpoints.create_start("test_gc_index_upload") pageserver_http = env.pageserver.http_client() @@ -117,9 +115,6 @@ def test_gc_index_upload(neon_env_builder: NeonEnvBuilder, remote_storage_kind: pg_conn = endpoint.connect() cur = pg_conn.cursor() - tenant_id = TenantId(query_scalar(cur, "SHOW neon.tenant_id")) - timeline_id = TimelineId(query_scalar(cur, "SHOW neon.timeline_id")) - cur.execute("CREATE TABLE foo (id int, counter int, t text)") cur.execute( """ diff --git a/test_runner/regress/test_large_schema.py b/test_runner/regress/test_large_schema.py index ac83131ba2..72bf32fcd3 100644 --- a/test_runner/regress/test_large_schema.py +++ b/test_runner/regress/test_large_schema.py @@ -74,9 +74,9 @@ def test_large_schema(neon_env_builder: NeonEnvBuilder): cur.execute("select * from pg_depend order by refclassid, refobjid, refobjsubid") # Check layer file sizes - tenant_id = endpoint.safe_psql("show neon.tenant_id")[0][0] - timeline_id = endpoint.safe_psql("show neon.timeline_id")[0][0] - timeline_path = "{}/tenants/{}/timelines/{}/".format(env.repo_dir, tenant_id, timeline_id) + timeline_path = "{}/tenants/{}/timelines/{}/".format( + env.repo_dir, env.initial_tenant, env.initial_timeline + ) for filename in os.listdir(timeline_path): if filename.startswith("00000"): log.info(f"layer {filename} size is {os.path.getsize(timeline_path + filename)}") diff --git a/test_runner/regress/test_layer_eviction.py b/test_runner/regress/test_layer_eviction.py index 1269210d0d..8f627defb5 100644 --- a/test_runner/regress/test_layer_eviction.py +++ b/test_runner/regress/test_layer_eviction.py @@ -8,7 +8,7 @@ from fixtures.neon_fixtures import ( ) from fixtures.pageserver.utils import wait_for_last_record_lsn, wait_for_upload from fixtures.remote_storage import RemoteStorageKind -from fixtures.types import Lsn, TenantId, TimelineId +from fixtures.types import Lsn from fixtures.utils import query_scalar @@ -34,8 +34,8 @@ def test_basic_eviction( client = env.pageserver.http_client() endpoint = env.endpoints.create_start("main") - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline # Create a number of layers in the tenant with endpoint.cursor() as cur: diff --git a/test_runner/regress/test_metric_collection.py b/test_runner/regress/test_metric_collection.py index 80ffe5126d..3f4b42707a 100644 --- a/test_runner/regress/test_metric_collection.py +++ b/test_runner/regress/test_metric_collection.py @@ -18,8 +18,7 @@ from fixtures.neon_fixtures import ( ) from fixtures.port_distributor import PortDistributor from fixtures.remote_storage import RemoteStorageKind -from fixtures.types import TenantId, TimelineId -from fixtures.utils import query_scalar +from fixtures.types import TenantId from pytest_httpserver import HTTPServer from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response @@ -115,15 +114,13 @@ def test_metric_collection( # Order of fixtures shutdown is not specified, and if http server gets down # before pageserver, pageserver log might contain such errors in the end. env.pageserver.allowed_errors.append(".*metrics endpoint refused the sent metrics*") - env.neon_cli.create_branch("test_metric_collection") + tenant_id = env.initial_tenant + timeline_id = env.neon_cli.create_branch("test_metric_collection") endpoint = env.endpoints.create_start("test_metric_collection") pg_conn = endpoint.connect() cur = pg_conn.cursor() - tenant_id = TenantId(query_scalar(cur, "SHOW neon.tenant_id")) - timeline_id = TimelineId(query_scalar(cur, "SHOW neon.timeline_id")) - cur.execute("CREATE TABLE foo (id int, counter int, t text)") cur.execute( """ diff --git a/test_runner/regress/test_ondemand_download.py b/test_runner/regress/test_ondemand_download.py index 17a63535cf..0640e65e57 100644 --- a/test_runner/regress/test_ondemand_download.py +++ b/test_runner/regress/test_ondemand_download.py @@ -78,8 +78,8 @@ def test_ondemand_download_large_rel( client = env.pageserver.http_client() - tenant_id = endpoint.safe_psql("show neon.tenant_id")[0][0] - timeline_id = endpoint.safe_psql("show neon.timeline_id")[0][0] + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline # We want to make sure that the data is large enough that the keyspace is partitioned. num_rows = 1000000 @@ -183,8 +183,8 @@ def test_ondemand_download_timetravel( client = env.pageserver.http_client() - tenant_id = endpoint.safe_psql("show neon.tenant_id")[0][0] - timeline_id = endpoint.safe_psql("show neon.timeline_id")[0][0] + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline lsns = [] @@ -342,8 +342,8 @@ def test_download_remote_layers_api( client = env.pageserver.http_client() - tenant_id = endpoint.safe_psql("show neon.tenant_id")[0][0] - timeline_id = endpoint.safe_psql("show neon.timeline_id")[0][0] + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline table_len = 10000 with endpoint.cursor() as cur: @@ -516,7 +516,6 @@ def test_compaction_downloads_on_demand_without_image_creation( tenant_id = env.initial_tenant timeline_id = env.initial_timeline - assert timeline_id is not None with env.endpoints.create_start("main") as endpoint: # no particular reason to create the layers like this, but we are sure @@ -590,7 +589,6 @@ def test_compaction_downloads_on_demand_with_image_creation( env = neon_env_builder.init_start(initial_tenant_conf=stringify(conf)) tenant_id = env.initial_tenant timeline_id = env.initial_timeline - assert timeline_id is not None pageserver_http = env.pageserver.http_client() diff --git a/test_runner/regress/test_read_trace.py b/test_runner/regress/test_read_trace.py index 9ebe53fc17..cae8ca3919 100644 --- a/test_runner/regress/test_read_trace.py +++ b/test_runner/regress/test_read_trace.py @@ -2,7 +2,7 @@ from contextlib import closing from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.pageserver.utils import wait_for_last_record_lsn -from fixtures.types import Lsn, TenantId, TimelineId +from fixtures.types import Lsn from fixtures.utils import query_scalar @@ -12,24 +12,21 @@ from fixtures.utils import query_scalar # Additionally, tests that pageserver is able to create tenants with custom configs. def test_read_request_tracing(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 1 - env = neon_env_builder.init_start() - - tenant, _ = env.neon_cli.create_tenant( - conf={ + env = neon_env_builder.init_start( + initial_tenant_conf={ "trace_read_requests": "true", } ) - timeline = env.neon_cli.create_timeline("test_trace_replay", tenant_id=tenant) - endpoint = env.endpoints.create_start("test_trace_replay", "main", tenant) + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline + endpoint = env.endpoints.create_start("main") with closing(endpoint.connect()) as conn: with conn.cursor() as cur: cur.execute("create table t (i integer);") cur.execute(f"insert into t values (generate_series(1,{10000}));") cur.execute("select count(*) from t;") - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) current_lsn = Lsn(query_scalar(cur, "SELECT pg_current_wal_flush_lsn()")) # wait until pageserver receives that data pageserver_http = env.pageserver.http_client() @@ -38,5 +35,5 @@ def test_read_request_tracing(neon_env_builder: NeonEnvBuilder): # Stop postgres so we drop the connection and flush the traces endpoint.stop() - trace_path = env.repo_dir / "traces" / str(tenant) / str(timeline) + trace_path = env.repo_dir / "traces" / str(tenant_id) / str(timeline_id) assert trace_path.exists() diff --git a/test_runner/regress/test_remote_storage.py b/test_runner/regress/test_remote_storage.py index 4f5b193ce2..b865e3ce24 100644 --- a/test_runner/regress/test_remote_storage.py +++ b/test_runner/regress/test_remote_storage.py @@ -95,12 +95,12 @@ def test_remote_storage_backup_and_restore( client = env.pageserver.http_client() - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline # Thats because of UnreliableWrapper's injected failures env.pageserver.allowed_errors.append( - f".*failed to fetch tenant deletion mark at tenants/({tenant_id}|{env.initial_tenant})/deleted attempt 1.*" + f".*failed to fetch tenant deletion mark at tenants/{tenant_id}/deleted attempt 1.*" ) checkpoint_numbers = range(1, 3) @@ -403,8 +403,7 @@ def test_remote_timeline_client_calls_started_metric( ) tenant_id = env.initial_tenant - assert env.initial_timeline is not None - timeline_id: TimelineId = env.initial_timeline + timeline_id = env.initial_timeline client = env.pageserver.http_client() @@ -542,8 +541,7 @@ def test_timeline_deletion_with_files_stuck_in_upload_queue( } ) tenant_id = env.initial_tenant - assert env.initial_timeline is not None - timeline_id: TimelineId = env.initial_timeline + timeline_id = env.initial_timeline timeline_path = env.timeline_dir(tenant_id, timeline_id) @@ -808,8 +806,7 @@ def test_compaction_delete_before_upload( ) tenant_id = env.initial_tenant - assert env.initial_timeline is not None - timeline_id: TimelineId = env.initial_timeline + timeline_id = env.initial_timeline client = env.pageserver.http_client() diff --git a/test_runner/regress/test_tenant_detach.py b/test_runner/regress/test_tenant_detach.py index b189510a9e..07b751bcca 100644 --- a/test_runner/regress/test_tenant_detach.py +++ b/test_runner/regress/test_tenant_detach.py @@ -463,8 +463,8 @@ def test_detach_while_attaching( client = env.pageserver.http_client() - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline # Attempts to connect from compute to pageserver while the tenant is # temporarily detached produces these errors in the pageserver log. @@ -615,8 +615,8 @@ def test_ignored_tenant_download_missing_layers( pageserver_http = env.pageserver.http_client() endpoint = env.endpoints.create_start("main") - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline # Attempts to connect from compute to pageserver while the tenant is # temporarily detached produces these errors in the pageserver log. @@ -679,10 +679,10 @@ def test_ignored_tenant_stays_broken_without_metadata( ) env = neon_env_builder.init_start() pageserver_http = env.pageserver.http_client() - endpoint = env.endpoints.create_start("main") + env.endpoints.create_start("main") - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline # Attempts to connect from compute to pageserver while the tenant is # temporarily detached produces these errors in the pageserver log. @@ -723,9 +723,9 @@ def test_load_attach_negatives( ) env = neon_env_builder.init_start() pageserver_http = env.pageserver.http_client() - endpoint = env.endpoints.create_start("main") + env.endpoints.create_start("main") - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) + tenant_id = env.initial_tenant # Attempts to connect from compute to pageserver while the tenant is # temporarily detached produces these errors in the pageserver log. @@ -773,8 +773,8 @@ def test_ignore_while_attaching( pageserver_http = env.pageserver.http_client() - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline # Attempts to connect from compute to pageserver while the tenant is # temporarily detached produces these errors in the pageserver log. diff --git a/test_runner/regress/test_tenants_with_remote_storage.py b/test_runner/regress/test_tenants_with_remote_storage.py index 397a2ea534..2925f8c2da 100644 --- a/test_runner/regress/test_tenants_with_remote_storage.py +++ b/test_runner/regress/test_tenants_with_remote_storage.py @@ -142,8 +142,8 @@ def test_tenants_attached_after_download( client = env.pageserver.http_client() - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline # Thats because of UnreliableWrapper's injected failures env.pageserver.allowed_errors.append( @@ -252,8 +252,8 @@ def test_tenant_redownloads_truncated_file_on_startup( pageserver_http = env.pageserver.http_client() endpoint = env.endpoints.create_start("main") - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline with endpoint.cursor() as cur: cur.execute("CREATE TABLE t1 AS VALUES (123, 'foobar');") diff --git a/test_runner/regress/test_threshold_based_eviction.py b/test_runner/regress/test_threshold_based_eviction.py index b1bc9623ce..a0e423e7ff 100644 --- a/test_runner/regress/test_threshold_based_eviction.py +++ b/test_runner/regress/test_threshold_based_eviction.py @@ -10,7 +10,6 @@ from fixtures.neon_fixtures import ( ) from fixtures.pageserver.http import LayerMapInfo from fixtures.remote_storage import RemoteStorageKind -from fixtures.types import TimelineId from pytest_httpserver import HTTPServer # NB: basic config change tests are in test_tenant_conf.py @@ -45,7 +44,6 @@ def test_threshold_based_eviction( ) tenant_id, timeline_id = env.initial_tenant, env.initial_timeline - assert isinstance(timeline_id, TimelineId) ps_http = env.pageserver.http_client() assert ps_http.tenant_config(tenant_id).effective_config["eviction_policy"] == { diff --git a/test_runner/regress/test_timeline_delete.py b/test_runner/regress/test_timeline_delete.py index e641065c43..916c0111f7 100644 --- a/test_runner/regress/test_timeline_delete.py +++ b/test_runner/regress/test_timeline_delete.py @@ -359,8 +359,8 @@ def test_timeline_resurrection_on_attach( ps_http = env.pageserver.http_client() pg = env.endpoints.create_start("main") - tenant_id = TenantId(pg.safe_psql("show neon.tenant_id")[0][0]) - main_timeline_id = TimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = env.initial_tenant + main_timeline_id = env.initial_timeline with pg.cursor() as cur: cur.execute("CREATE TABLE f (i integer);") @@ -512,8 +512,6 @@ def test_timeline_delete_fail_before_local_delete(neon_env_builder: NeonEnvBuild ), ) - assert env.initial_timeline is not None - for timeline_id in (intermediate_timeline_id, env.initial_timeline): timeline_delete_wait_completed( ps_http, tenant_id=env.initial_tenant, timeline_id=timeline_id @@ -716,13 +714,9 @@ def test_timeline_delete_works_for_remote_smoke( ps_http = env.pageserver.http_client() pg = env.endpoints.create_start("main") - tenant_id = TenantId(pg.safe_psql("show neon.tenant_id")[0][0]) - main_timeline_id = TimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline - assert tenant_id == env.initial_tenant - assert main_timeline_id == env.initial_timeline - - assert env.initial_timeline is not None timeline_ids = [env.initial_timeline] for i in range(2): branch_timeline_id = env.neon_cli.create_branch(f"new{i}", "main") @@ -743,9 +737,8 @@ def test_timeline_delete_works_for_remote_smoke( log.info("waiting for checkpoint upload") wait_for_upload(ps_http, tenant_id, branch_timeline_id, current_lsn) log.info("upload of checkpoint is done") - timeline_id = TimelineId(pg.safe_psql("show neon.timeline_id")[0][0]) - timeline_ids.append(timeline_id) + timeline_ids.append(branch_timeline_id) for timeline_id in timeline_ids: assert_prefix_not_empty( diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index c471b18db7..8ca93845b2 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -270,7 +270,8 @@ def test_broker(neon_env_builder: NeonEnvBuilder): neon_env_builder.enable_local_fs_remote_storage() env = neon_env_builder.init_start() - env.neon_cli.create_branch("test_broker", "main") + tenant_id = env.initial_tenant + timeline_id = env.neon_cli.create_branch("test_broker", "main") # FIXME: Is this expected? env.pageserver.allowed_errors.append( @@ -280,10 +281,6 @@ def test_broker(neon_env_builder: NeonEnvBuilder): endpoint = env.endpoints.create_start("test_broker") endpoint.safe_psql("CREATE TABLE t(key int primary key, value text)") - # learn neon timeline from compute - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) - # wait until remote_consistent_lsn gets advanced on all safekeepers clients = [sk.http_client() for sk in env.safekeepers] stat_before = [cli.timeline_status(tenant_id, timeline_id) for cli in clients] @@ -325,7 +322,8 @@ def test_wal_removal(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): ".*init_tenant_mgr: marking .* as locally complete, while it doesnt exist in remote index.*" ) - env.neon_cli.create_branch("test_safekeepers_wal_removal") + tenant_id = env.initial_tenant + timeline_id = env.neon_cli.create_branch("test_safekeepers_wal_removal") endpoint = env.endpoints.create_start("test_safekeepers_wal_removal") # Note: it is important to insert at least two segments, as currently @@ -338,9 +336,6 @@ def test_wal_removal(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): ] ) - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) - # force checkpoint to advance remote_consistent_lsn pageserver_conn_options = {} if auth_enabled: @@ -451,13 +446,10 @@ def test_wal_backup(neon_env_builder: NeonEnvBuilder, remote_storage_kind: Remot env = neon_env_builder.init_start() - env.neon_cli.create_branch("test_safekeepers_wal_backup") + tenant_id = env.initial_tenant + timeline_id = env.neon_cli.create_branch("test_safekeepers_wal_backup") endpoint = env.endpoints.create_start("test_safekeepers_wal_backup") - # learn neon timeline from compute - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) - pg_conn = endpoint.connect() cur = pg_conn.cursor() cur.execute("create table t(key int, value text)") @@ -505,14 +497,11 @@ def test_s3_wal_replay(neon_env_builder: NeonEnvBuilder, remote_storage_kind: Re neon_env_builder.remote_storage_users = RemoteStorageUsers.SAFEKEEPER env = neon_env_builder.init_start() - env.neon_cli.create_branch("test_s3_wal_replay") + tenant_id = env.initial_tenant + timeline_id = env.neon_cli.create_branch("test_s3_wal_replay") endpoint = env.endpoints.create_start("test_s3_wal_replay") - # learn neon timeline from compute - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) - expected_sum = 0 with closing(endpoint.connect()) as conn: @@ -796,15 +785,12 @@ def test_timeline_status(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): neon_env_builder.auth_enabled = auth_enabled env = neon_env_builder.init_start() - env.neon_cli.create_branch("test_timeline_status") + tenant_id = env.initial_tenant + timeline_id = env.neon_cli.create_branch("test_timeline_status") endpoint = env.endpoints.create_start("test_timeline_status") wa = env.safekeepers[0] - # learn neon timeline from compute - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) - if not auth_enabled: wa_http_cli = wa.http_client() wa_http_cli.check_status() @@ -887,15 +873,12 @@ def test_start_replication_term(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_start() - env.neon_cli.create_branch("test_start_replication_term") + tenant_id = env.initial_tenant + timeline_id = env.neon_cli.create_branch("test_start_replication_term") endpoint = env.endpoints.create_start("test_start_replication_term") endpoint.safe_psql("CREATE TABLE t(key int primary key, value text)") - # learn neon timeline from compute - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) - sk = env.safekeepers[0] sk_http_cli = sk.http_client() tli_status = sk_http_cli.timeline_status(tenant_id, timeline_id) @@ -922,15 +905,12 @@ def test_sk_auth(neon_env_builder: NeonEnvBuilder): neon_env_builder.auth_enabled = True env = neon_env_builder.init_start() - env.neon_cli.create_branch("test_sk_auth") - endpoint = env.endpoints.create_start("test_sk_auth") + tenant_id = env.initial_tenant + timeline_id = env.neon_cli.create_branch("test_sk_auth") + env.endpoints.create_start("test_sk_auth") sk = env.safekeepers[0] - # learn neon timeline from compute - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) - tenant_token = env.auth_keys.generate_tenant_token(tenant_id) full_token = env.auth_keys.generate_safekeeper_token() @@ -1185,7 +1165,8 @@ def test_replace_safekeeper(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 4 env = neon_env_builder.init_start() - env.neon_cli.create_branch("test_replace_safekeeper") + tenant_id = env.initial_tenant + timeline_id = env.neon_cli.create_branch("test_replace_safekeeper") log.info("Use only first 3 safekeepers") env.safekeepers[3].stop() @@ -1193,10 +1174,6 @@ def test_replace_safekeeper(neon_env_builder: NeonEnvBuilder): endpoint.active_safekeepers = [1, 2, 3] endpoint.start() - # learn neon timeline from compute - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) - execute_payload(endpoint) show_statuses(env.safekeepers, tenant_id, timeline_id) @@ -1448,7 +1425,8 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 4 env = neon_env_builder.init_start() - env.neon_cli.create_branch("test_pull_timeline") + tenant_id = env.initial_tenant + timeline_id = env.neon_cli.create_branch("test_pull_timeline") log.info("Use only first 3 safekeepers") env.safekeepers[3].stop() @@ -1456,10 +1434,6 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder): endpoint.active_safekeepers = [1, 2, 3] endpoint.start() - # learn neon timeline from compute - tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0]) - timeline_id = TimelineId(endpoint.safe_psql("show neon.timeline_id")[0][0]) - execute_payload(endpoint) show_statuses(env.safekeepers, tenant_id, timeline_id) From b95addddd54dc1b25850b0784206941ebaea6af4 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 21 Aug 2023 12:29:36 +0100 Subject: [PATCH 37/40] pageserver: do not read redundant `timeline_layers` from IndexPart, so that we can remove it later (#4972) ## Problem IndexPart contains two redundant lists of layer names: a set of the names, and then a map of name to metadata. We already required that all the layers in `timeline_layers` are also in `layers_metadata`, in `initialize_with_current_remote_index_part`, so if there were any index_part.json files in the field that relied on these sets being different, they would already be broken. ## Summary of changes `timeline_layers` is made private and no longer read at runtime. It is still serialized, but not deserialized. `disk_consistent_lsn` is also made private, as this field only exists for convenience of humans reading the serialized JSON. This prepares us to entirely remove `timeline_layers` in a future release, once this change is fully deployed, and therefore no pageservers are trying to read the field. --- .../src/tenant/remote_timeline_client.rs | 6 ++++- .../tenant/remote_timeline_client/index.rs | 24 +++++++++++-------- pageserver/src/tenant/timeline.rs | 4 ++-- pageserver/src/tenant/upload_queue.rs | 23 +++++------------- 4 files changed, 27 insertions(+), 30 deletions(-) diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index 8a50b0d268..3193a7eb57 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -1578,7 +1578,11 @@ mod tests { }; assert_file_list( - &index_part.timeline_layers, + &index_part + .layer_metadata + .keys() + .map(|f| f.to_owned()) + .collect(), &[ &layer_file_name_1.file_name(), &layer_file_name_2.file_name(), diff --git a/pageserver/src/tenant/remote_timeline_client/index.rs b/pageserver/src/tenant/remote_timeline_client/index.rs index fdbf26e6ae..8985ab0865 100644 --- a/pageserver/src/tenant/remote_timeline_client/index.rs +++ b/pageserver/src/tenant/remote_timeline_client/index.rs @@ -62,10 +62,9 @@ pub struct IndexPart { #[serde(skip_serializing_if = "Option::is_none")] pub deleted_at: Option, - /// Layer names, which are stored on the remote storage. - /// - /// Additional metadata can might exist in `layer_metadata`. - pub timeline_layers: HashSet, + /// Legacy field: equal to the keys of `layer_metadata`, only written out for forward compat + #[serde(default, skip_deserializing)] + timeline_layers: HashSet, /// Per layer file name metadata, which can be present for a present or missing layer file. /// @@ -74,9 +73,10 @@ pub struct IndexPart { pub layer_metadata: HashMap, // 'disk_consistent_lsn' is a copy of the 'disk_consistent_lsn' in the metadata. - // It's duplicated here for convenience. + // It's duplicated for convenience when reading the serialized structure, but is + // private because internally we would read from metadata instead. #[serde_as(as = "DisplayFromStr")] - pub disk_consistent_lsn: Lsn, + disk_consistent_lsn: Lsn, metadata_bytes: Vec, } @@ -85,7 +85,11 @@ impl IndexPart { /// used to understand later versions. /// /// Version is currently informative only. - const LATEST_VERSION: usize = 2; + /// Version history + /// - 2: added `deleted_at` + /// - 3: no longer deserialize `timeline_layers` (serialized format is the same, but timeline_layers + /// is always generated from the keys of `layer_metadata`) + const LATEST_VERSION: usize = 3; pub const FILE_NAME: &'static str = "index_part.json"; pub fn new( @@ -166,7 +170,7 @@ mod tests { let expected = IndexPart { // note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead? version: 1, - timeline_layers: HashSet::from(["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap()]), + timeline_layers: HashSet::new(), layer_metadata: HashMap::from([ ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata { file_size: 25600000, @@ -203,7 +207,7 @@ mod tests { let expected = IndexPart { // note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead? version: 1, - timeline_layers: HashSet::from(["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap()]), + timeline_layers: HashSet::new(), layer_metadata: HashMap::from([ ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata { file_size: 25600000, @@ -241,7 +245,7 @@ mod tests { let expected = IndexPart { // note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead? version: 2, - timeline_layers: HashSet::from(["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap()]), + timeline_layers: HashSet::new(), layer_metadata: HashMap::from([ ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata { file_size: 25600000, diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index e21d594cb9..db565e2975 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -1730,7 +1730,7 @@ impl Timeline { let mut corrupted_local_layers = Vec::new(); let mut added_remote_layers = Vec::new(); - for remote_layer_name in &index_part.timeline_layers { + for remote_layer_name in index_part.layer_metadata.keys() { let local_layer = local_only_layers.remove(remote_layer_name); let remote_layer_metadata = index_part @@ -1890,7 +1890,7 @@ impl Timeline { Some(index_part) => { info!( "initializing upload queue from remote index with {} layer files", - index_part.timeline_layers.len() + index_part.layer_metadata.len() ); remote_client.init_upload_queue(index_part)?; self.create_remote_layers(index_part, local_layers, disk_consistent_lsn) diff --git a/pageserver/src/tenant/upload_queue.rs b/pageserver/src/tenant/upload_queue.rs index a62cc99adf..e2a24fa48f 100644 --- a/pageserver/src/tenant/upload_queue.rs +++ b/pageserver/src/tenant/upload_queue.rs @@ -140,23 +140,12 @@ impl UploadQueue { } } - let mut files = HashMap::with_capacity(index_part.timeline_layers.len()); - for layer_name in &index_part.timeline_layers { - match index_part - .layer_metadata - .get(layer_name) - .map(LayerFileMetadata::from) - { - Some(layer_metadata) => { - files.insert(layer_name.to_owned(), layer_metadata); - } - None => { - anyhow::bail!( - "No remote layer metadata found for layer {}", - layer_name.file_name() - ); - } - } + let mut files = HashMap::with_capacity(index_part.layer_metadata.len()); + for (layer_name, layer_metadata) in &index_part.layer_metadata { + files.insert( + layer_name.to_owned(), + LayerFileMetadata::from(layer_metadata), + ); } let index_part_metadata = index_part.parse_metadata()?; From 615a490239c14bf243cf5c3abfedb57bb790f811 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 21 Aug 2023 17:30:28 +0100 Subject: [PATCH 38/40] pageserver: refactor Tenant/Timeline args into structs (#5053) ## Problem There are some common types that we pass into tenants and timelines as we construct them, such as remote storage and the broker client. Currently the list is small, but this is likely to grow -- the deletion queue PR (#4960) pushed some methods to the point of clippy complaining they had too many args, because of the extra deletion queue client being passed around. There are some shared objects that currently aren't passed around explicitly because they use a static `once_cell` (e.g. CONCURRENT_COMPACTIONS), but as we add more resource management and concurreny control over time, it will be more readable & testable to pass a type around in the respective Resources object, rather than to coordinate via static objects. The `Resources` structures in this PR will make it easier to add references to central coordination functions, without having to rely on statics. ## Summary of changes - For `Tenant`, the `broker_client` and `remote_storage` are bundled into `TenantSharedResources` - For `Timeline`, the `remote_client` is wrapped into `TimelineResources`. Both of these structures will get an additional deletion queue member in #4960. --- pageserver/src/bin/pageserver.rs | 7 +- pageserver/src/tenant.rs | 100 ++++++++++++++--------- pageserver/src/tenant/mgr.rs | 50 +++++++----- pageserver/src/tenant/timeline.rs | 10 ++- pageserver/src/tenant/timeline/delete.rs | 4 +- 5 files changed, 105 insertions(+), 66 deletions(-) diff --git a/pageserver/src/bin/pageserver.rs b/pageserver/src/bin/pageserver.rs index 635e12e8fe..71e3a0ff3f 100644 --- a/pageserver/src/bin/pageserver.rs +++ b/pageserver/src/bin/pageserver.rs @@ -11,6 +11,7 @@ use metrics::launch_timestamp::{set_launch_timestamp_metric, LaunchTimestamp}; use pageserver::disk_usage_eviction_task::{self, launch_disk_usage_global_eviction_task}; use pageserver::metrics::{STARTUP_DURATION, STARTUP_IS_LOADING}; use pageserver::task_mgr::WALRECEIVER_RUNTIME; +use pageserver::tenant::TenantSharedResources; use remote_storage::GenericRemoteStorage; use tokio::time::Instant; use tracing::*; @@ -382,8 +383,10 @@ fn start_pageserver( BACKGROUND_RUNTIME.block_on(mgr::init_tenant_mgr( conf, - broker_client.clone(), - remote_storage.clone(), + TenantSharedResources { + broker_client: broker_client.clone(), + remote_storage: remote_storage.clone(), + }, order, ))?; diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 2ec5fe90ad..9f7e7ff451 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -56,6 +56,7 @@ use self::remote_timeline_client::RemoteTimelineClient; use self::timeline::uninit::TimelineUninitMark; use self::timeline::uninit::UninitializedTimeline; use self::timeline::EvictionTaskTenantState; +use self::timeline::TimelineResources; use crate::config::PageServerConf; use crate::context::{DownloadBehavior, RequestContext}; use crate::import_datadir; @@ -150,6 +151,14 @@ pub const TENANT_ATTACHING_MARKER_FILENAME: &str = "attaching"; pub const TENANT_DELETED_MARKER_FILE_NAME: &str = "deleted"; +/// References to shared objects that are passed into each tenant, such +/// as the shared remote storage client and process initialization state. +#[derive(Clone)] +pub struct TenantSharedResources { + pub broker_client: storage_broker::BrokerClientChannel, + pub remote_storage: Option, +} + /// /// Tenant consists of multiple timelines. Keep them in a hash table. /// @@ -389,7 +398,7 @@ impl Tenant { async fn timeline_init_and_sync( &self, timeline_id: TimelineId, - remote_client: Option, + resources: TimelineResources, remote_startup_data: Option, local_metadata: Option, ancestor: Option>, @@ -410,7 +419,7 @@ impl Tenant { timeline_id, up_to_date_metadata, ancestor.clone(), - remote_client, + resources, init_order, CreateTimelineCause::Load, )?; @@ -701,14 +710,22 @@ impl Tenant { .expect("just put it in above"); // TODO again handle early failure - self.load_remote_timeline(timeline_id, index_part, remote_metadata, remote_client, ctx) - .await - .with_context(|| { - format!( - "failed to load remote timeline {} for tenant {}", - timeline_id, self.tenant_id - ) - })?; + self.load_remote_timeline( + timeline_id, + index_part, + remote_metadata, + TimelineResources { + remote_client: Some(remote_client), + }, + ctx, + ) + .await + .with_context(|| { + format!( + "failed to load remote timeline {} for tenant {}", + timeline_id, self.tenant_id + ) + })?; } // Walk through deleted timelines, resume deletion @@ -763,7 +780,7 @@ impl Tenant { timeline_id: TimelineId, index_part: IndexPart, remote_metadata: TimelineMetadata, - remote_client: RemoteTimelineClient, + resources: TimelineResources, ctx: &RequestContext, ) -> anyhow::Result<()> { span::debug_assert_current_span_has_tenant_id(); @@ -793,7 +810,7 @@ impl Tenant { self.timeline_init_and_sync( timeline_id, - Some(remote_client), + resources, Some(RemoteStartupData { index_part, remote_metadata, @@ -840,8 +857,7 @@ impl Tenant { pub(crate) fn spawn_load( conf: &'static PageServerConf, tenant_id: TenantId, - broker_client: storage_broker::BrokerClientChannel, - remote_storage: Option, + resources: TenantSharedResources, init_order: Option, tenants: &'static tokio::sync::RwLock, ctx: &RequestContext, @@ -856,6 +872,9 @@ impl Tenant { } }; + let broker_client = resources.broker_client; + let remote_storage = resources.remote_storage; + let wal_redo_manager = Arc::new(PostgresRedoManager::new(conf, tenant_id)); let tenant = Tenant::new( TenantState::Loading, @@ -1241,16 +1260,9 @@ impl Tenant { ) -> Result<(), LoadLocalTimelineError> { span::debug_assert_current_span_has_tenant_id(); - let remote_client = self.remote_storage.as_ref().map(|remote_storage| { - RemoteTimelineClient::new( - remote_storage.clone(), - self.conf, - self.tenant_id, - timeline_id, - ) - }); + let mut resources = self.build_timeline_resources(timeline_id); - let (remote_startup_data, remote_client) = match remote_client { + let (remote_startup_data, remote_client) = match resources.remote_client { Some(remote_client) => match remote_client.download_index_file().await { Ok(index_part) => { let index_part = match index_part { @@ -1338,9 +1350,10 @@ impl Tenant { return Ok(()); } - (None, remote_client) + (None, resources.remote_client) } }; + resources.remote_client = remote_client; let ancestor = if let Some(ancestor_timeline_id) = local_metadata.ancestor_timeline() { let ancestor_timeline = self.get_timeline(ancestor_timeline_id, false) @@ -1353,7 +1366,7 @@ impl Tenant { self.timeline_init_and_sync( timeline_id, - remote_client, + resources, remote_startup_data, Some(local_metadata), ancestor, @@ -2225,7 +2238,7 @@ impl Tenant { new_timeline_id: TimelineId, new_metadata: &TimelineMetadata, ancestor: Option>, - remote_client: Option, + resources: TimelineResources, init_order: Option<&InitializationOrder>, cause: CreateTimelineCause, ) -> anyhow::Result> { @@ -2254,7 +2267,7 @@ impl Tenant { new_timeline_id, self.tenant_id, Arc::clone(&self.walredo_mgr), - remote_client, + resources, pg_version, initial_logical_size_can_start.cloned(), initial_logical_size_attempt.cloned().flatten(), @@ -2902,6 +2915,23 @@ impl Tenant { Ok(timeline) } + /// Call this before constructing a timeline, to build its required structures + fn build_timeline_resources(&self, timeline_id: TimelineId) -> TimelineResources { + let remote_client = if let Some(remote_storage) = self.remote_storage.as_ref() { + let remote_client = RemoteTimelineClient::new( + remote_storage.clone(), + self.conf, + self.tenant_id, + timeline_id, + ); + Some(remote_client) + } else { + None + }; + + TimelineResources { remote_client } + } + /// Creates intermediate timeline structure and its files. /// /// An empty layer map is initialized, and new data and WAL can be imported starting @@ -2918,25 +2948,17 @@ impl Tenant { ) -> anyhow::Result { let tenant_id = self.tenant_id; - let remote_client = if let Some(remote_storage) = self.remote_storage.as_ref() { - let remote_client = RemoteTimelineClient::new( - remote_storage.clone(), - self.conf, - tenant_id, - new_timeline_id, - ); + let resources = self.build_timeline_resources(new_timeline_id); + if let Some(remote_client) = &resources.remote_client { remote_client.init_upload_queue_for_empty_remote(new_metadata)?; - Some(remote_client) - } else { - None - }; + } let timeline_struct = self .create_timeline_struct( new_timeline_id, new_metadata, ancestor, - remote_client, + resources, None, CreateTimelineCause::Load, ) diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 57237e8b88..a558c7d0ba 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -29,6 +29,7 @@ use utils::id::{TenantId, TimelineId}; use super::delete::DeleteTenantError; use super::timeline::delete::DeleteTimelineFlow; +use super::TenantSharedResources; /// The tenants known to the pageserver. /// The enum variants are used to distinguish the different states that the pageserver can be in. @@ -66,8 +67,7 @@ static TENANTS: Lazy> = Lazy::new(|| RwLock::new(TenantsMap:: #[instrument(skip_all)] pub async fn init_tenant_mgr( conf: &'static PageServerConf, - broker_client: storage_broker::BrokerClientChannel, - remote_storage: Option, + resources: TenantSharedResources, init_order: InitializationOrder, ) -> anyhow::Result<()> { // Scan local filesystem for attached tenants @@ -125,8 +125,7 @@ pub async fn init_tenant_mgr( match schedule_local_tenant_processing( conf, &tenant_dir_path, - broker_client.clone(), - remote_storage.clone(), + resources.clone(), Some(init_order.clone()), &TENANTS, &ctx, @@ -162,8 +161,7 @@ pub async fn init_tenant_mgr( pub(crate) fn schedule_local_tenant_processing( conf: &'static PageServerConf, tenant_path: &Path, - broker_client: storage_broker::BrokerClientChannel, - remote_storage: Option, + resources: TenantSharedResources, init_order: Option, tenants: &'static tokio::sync::RwLock, ctx: &RequestContext, @@ -200,9 +198,15 @@ pub(crate) fn schedule_local_tenant_processing( let tenant = if conf.tenant_attaching_mark_file_path(&tenant_id).exists() { info!("tenant {tenant_id} has attaching mark file, resuming its attach operation"); - if let Some(remote_storage) = remote_storage { - match Tenant::spawn_attach(conf, tenant_id, broker_client, tenants, remote_storage, ctx) - { + if let Some(remote_storage) = resources.remote_storage { + match Tenant::spawn_attach( + conf, + tenant_id, + resources.broker_client, + tenants, + remote_storage, + ctx, + ) { Ok(tenant) => tenant, Err(e) => { error!("Failed to spawn_attach tenant {tenant_id}, reason: {e:#}"); @@ -220,15 +224,7 @@ pub(crate) fn schedule_local_tenant_processing( } else { info!("tenant {tenant_id} is assumed to be loadable, starting load operation"); // Start loading the tenant into memory. It will initially be in Loading state. - Tenant::spawn_load( - conf, - tenant_id, - broker_client, - remote_storage, - init_order, - tenants, - ctx, - ) + Tenant::spawn_load(conf, tenant_id, resources, init_order, tenants, ctx) }; Ok(tenant) } @@ -363,8 +359,12 @@ pub async fn create_tenant( // TODO: tenant directory remains on disk if we bail out from here on. // See https://github.com/neondatabase/neon/issues/4233 + let tenant_resources = TenantSharedResources { + broker_client, + remote_storage, + }; let created_tenant = - schedule_local_tenant_processing(conf, &tenant_directory, broker_client, remote_storage, None, &TENANTS, ctx)?; + schedule_local_tenant_processing(conf, &tenant_directory, tenant_resources, None, &TENANTS, ctx)?; // TODO: tenant object & its background loops remain, untracked in tenant map, if we fail here. // See https://github.com/neondatabase/neon/issues/4233 @@ -523,7 +523,11 @@ pub async fn load_tenant( .with_context(|| format!("Failed to remove tenant ignore mark {tenant_ignore_mark:?} during tenant loading"))?; } - let new_tenant = schedule_local_tenant_processing(conf, &tenant_path, broker_client, remote_storage, None, &TENANTS, ctx) + let resources = TenantSharedResources { + broker_client, + remote_storage, + }; + let new_tenant = schedule_local_tenant_processing(conf, &tenant_path, resources, None, &TENANTS, ctx) .with_context(|| { format!("Failed to schedule tenant processing in path {tenant_path:?}") })?; @@ -604,7 +608,11 @@ pub async fn attach_tenant( .context("check for attach marker file existence")?; anyhow::ensure!(marker_file_exists, "create_tenant_files should have created the attach marker file"); - let attached_tenant = schedule_local_tenant_processing(conf, &tenant_dir, broker_client, Some(remote_storage), None, &TENANTS, ctx)?; + let resources = TenantSharedResources { + broker_client, + remote_storage: Some(remote_storage), + }; + let attached_tenant = schedule_local_tenant_processing(conf, &tenant_dir, resources, None, &TENANTS, ctx)?; // TODO: tenant object & its background loops remain, untracked in tenant map, if we fail here. // See https://github.com/neondatabase/neon/issues/4233 diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index db565e2975..5913686bfe 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -140,6 +140,12 @@ fn drop_rlock(rlock: tokio::sync::OwnedRwLockReadGuard) { fn drop_wlock(rlock: tokio::sync::RwLockWriteGuard<'_, T>) { drop(rlock) } + +/// The outward-facing resources required to build a Timeline +pub struct TimelineResources { + pub remote_client: Option, +} + pub struct Timeline { conf: &'static PageServerConf, tenant_conf: Arc>, @@ -1374,7 +1380,7 @@ impl Timeline { timeline_id: TimelineId, tenant_id: TenantId, walredo_mgr: Arc, - remote_client: Option, + resources: TimelineResources, pg_version: u32, initial_logical_size_can_start: Option, initial_logical_size_attempt: Option, @@ -1409,7 +1415,7 @@ impl Timeline { walredo_mgr, walreceiver: Mutex::new(None), - remote_client: remote_client.map(Arc::new), + remote_client: resources.remote_client.map(Arc::new), // initialize in-memory 'last_record_lsn' from 'disk_consistent_lsn'. last_record_lsn: SeqWait::new(RecordLsn { diff --git a/pageserver/src/tenant/timeline/delete.rs b/pageserver/src/tenant/timeline/delete.rs index d3d9c8a082..18588cf0fd 100644 --- a/pageserver/src/tenant/timeline/delete.rs +++ b/pageserver/src/tenant/timeline/delete.rs @@ -25,7 +25,7 @@ use crate::{ InitializationOrder, }; -use super::Timeline; +use super::{Timeline, TimelineResources}; /// Now that the Timeline is in Stopping state, request all the related tasks to shut down. async fn stop_tasks(timeline: &Timeline) -> Result<(), DeleteTimelineError> { @@ -416,7 +416,7 @@ impl DeleteTimelineFlow { timeline_id, local_metadata, None, // Ancestor is not needed for deletion. - remote_client, + TimelineResources { remote_client }, init_order, // Important. We dont pass ancestor above because it can be missing. // Thus we need to skip the validation here. From 4a8bd866f68c314c0d6f4a045758288c8516d901 Mon Sep 17 00:00:00 2001 From: Felix Prasanna <91577249+fprasx@users.noreply.github.com> Date: Mon, 21 Aug 2023 13:29:16 -0400 Subject: [PATCH 39/40] bump vm-builder version to v0.16.3 (#5055) This change to autoscaling allows agents to connect directly to the monitor, completely removing the informant. --- .github/workflows/build_and_test.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index f014f51fa9..f44e0e6c05 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -780,7 +780,7 @@ jobs: run: shell: sh -eu {0} env: - VM_BUILDER_VERSION: v0.16.2 + VM_BUILDER_VERSION: v0.16.3 steps: - name: Checkout @@ -804,7 +804,6 @@ jobs: ./vm-builder \ -enable-file-cache \ -enable-monitor \ - -enable-informant \ -src=369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} \ -dst=369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} From 0b001a00013df8684d65f2b32afd42ce7fd01ae9 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Mon, 21 Aug 2023 19:20:58 +0100 Subject: [PATCH 40/40] proxy: remove connections on shutdown (#5051) ## Problem On shutdown, proxy connections are staying open. ## Summary of changes Remove the connections on shutdown --- proxy/src/http/conn_pool.rs | 42 +++++++++++++++++++++++++++++---- proxy/src/http/sql_over_http.rs | 12 ++++------ proxy/src/http/websocket.rs | 12 ++++++++++ 3 files changed, 54 insertions(+), 12 deletions(-) diff --git a/proxy/src/http/conn_pool.rs b/proxy/src/http/conn_pool.rs index 180d10940c..c02ec61945 100644 --- a/proxy/src/http/conn_pool.rs +++ b/proxy/src/http/conn_pool.rs @@ -64,13 +64,13 @@ pub struct EndpointConnPool { total_conns: usize, } -/// This is cheap and not hugely secure. -/// But probably good enough for in memory only hashes. +/// 4096 is the number of rounds that SCRAM-SHA-256 recommends. +/// It's not the 600,000 that OWASP recommends... but our passwords are high entropy anyway. /// -/// Still takes 3.5ms to hash on my hardware. +/// Still takes 1.4ms to hash on my hardware. /// We don't want to ruin the latency improvements of using the pool by making password verification take too long const PARAMS: Params = Params { - rounds: 10_000, + rounds: 4096, output_length: 32, }; @@ -99,6 +99,10 @@ pub struct GlobalConnPool { max_conns_per_endpoint: usize, proxy_config: &'static crate::config::ProxyConfig, + + // Using a lock to remove any race conditions. + // Eg cleaning up connections while a new connection is returned + closed: RwLock, } impl GlobalConnPool { @@ -108,9 +112,24 @@ impl GlobalConnPool { global_pool_size: AtomicUsize::new(0), max_conns_per_endpoint: MAX_CONNS_PER_ENDPOINT, proxy_config: config, + closed: RwLock::new(false), }) } + pub fn shutdown(&self) { + *self.closed.write() = true; + + self.global_pool.retain(|_, endpoint_pool| { + let mut pool = endpoint_pool.write(); + // by clearing this hashmap, we remove the slots that a connection can be returned to. + // when returning, it drops the connection if the slot doesn't exist + pool.pools.clear(); + pool.total_conns = 0; + + false + }); + } + pub async fn get( &self, conn_info: &ConnInfo, @@ -208,7 +227,20 @@ impl GlobalConnPool { new_client } - pub async fn put(&self, conn_info: &ConnInfo, client: Client) -> anyhow::Result<()> { + pub fn put(&self, conn_info: &ConnInfo, client: Client) -> anyhow::Result<()> { + // We want to hold this open while we return. This ensures that the pool can't close + // while we are in the middle of returning the connection. + let closed = self.closed.read(); + if *closed { + info!("pool: throwing away connection '{conn_info}' because pool is closed"); + return Ok(()); + } + + if client.inner.is_closed() { + info!("pool: throwing away connection '{conn_info}' because connection is closed"); + return Ok(()); + } + let pool = self.get_or_create_endpoint_pool(&conn_info.hostname); // return connection to the pool diff --git a/proxy/src/http/sql_over_http.rs b/proxy/src/http/sql_over_http.rs index 4470996c04..fe57096105 100644 --- a/proxy/src/http/sql_over_http.rs +++ b/proxy/src/http/sql_over_http.rs @@ -16,7 +16,6 @@ use tokio_postgres::types::Type; use tokio_postgres::GenericClient; use tokio_postgres::IsolationLevel; use tokio_postgres::Row; -use tracing::Instrument; use url::Url; use super::conn_pool::ConnInfo; @@ -286,13 +285,12 @@ pub async fn handle( }; if allow_pool { + let current_span = tracing::Span::current(); // return connection to the pool - tokio::task::spawn( - async move { - let _ = conn_pool.put(&conn_info, client).await; - } - .in_current_span(), - ); + tokio::task::spawn_blocking(move || { + let _span = current_span.enter(); + let _ = conn_pool.put(&conn_info, client); + }); } result diff --git a/proxy/src/http/websocket.rs b/proxy/src/http/websocket.rs index ba158dfca3..c85450a074 100644 --- a/proxy/src/http/websocket.rs +++ b/proxy/src/http/websocket.rs @@ -269,6 +269,18 @@ pub async fn task_main( let conn_pool: Arc = GlobalConnPool::new(config); + // shutdown the connection pool + tokio::spawn({ + let cancellation_token = cancellation_token.clone(); + let conn_pool = conn_pool.clone(); + async move { + cancellation_token.cancelled().await; + tokio::task::spawn_blocking(move || conn_pool.shutdown()) + .await + .unwrap(); + } + }); + let tls_config = config.tls_config.as_ref().map(|cfg| cfg.to_server_config()); let tls_acceptor: tokio_rustls::TlsAcceptor = match tls_config { Some(config) => config.into(),